##// END OF EJS Templates
obsolete: fix relevant-obsmarkers computation on pruned changeset...
marmoute -
r32488:176d1a0c default
parent child Browse files
Show More
@@ -1,1298 +1,1301 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 phases,
80 80 policy,
81 81 util,
82 82 )
83 83
84 84 parsers = policy.importmod(r'parsers')
85 85
86 86 _pack = struct.pack
87 87 _unpack = struct.unpack
88 88 _calcsize = struct.calcsize
89 89 propertycache = util.propertycache
90 90
91 91 # the obsolete feature is not mature enough to be enabled by default.
92 92 # you have to rely on third party extension extension to enable this.
93 93 _enabled = False
94 94
95 95 # Options for obsolescence
96 96 createmarkersopt = 'createmarkers'
97 97 allowunstableopt = 'allowunstable'
98 98 exchangeopt = 'exchange'
99 99
100 100 def isenabled(repo, option):
101 101 """Returns True if the given repository has the given obsolete option
102 102 enabled.
103 103 """
104 104 result = set(repo.ui.configlist('experimental', 'evolution'))
105 105 if 'all' in result:
106 106 return True
107 107
108 108 # For migration purposes, temporarily return true if the config hasn't been
109 109 # set but _enabled is true.
110 110 if len(result) == 0 and _enabled:
111 111 return True
112 112
113 113 # createmarkers must be enabled if other options are enabled
114 114 if ((allowunstableopt in result or exchangeopt in result) and
115 115 not createmarkersopt in result):
116 116 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 117 "if other obsolete options are enabled"))
118 118
119 119 return option in result
120 120
121 121 ### obsolescence marker flag
122 122
123 123 ## bumpedfix flag
124 124 #
125 125 # When a changeset A' succeed to a changeset A which became public, we call A'
126 126 # "bumped" because it's a successors of a public changesets
127 127 #
128 128 # o A' (bumped)
129 129 # |`:
130 130 # | o A
131 131 # |/
132 132 # o Z
133 133 #
134 134 # The way to solve this situation is to create a new changeset Ad as children
135 135 # of A. This changeset have the same content than A'. So the diff from A to A'
136 136 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 137 #
138 138 # o Ad
139 139 # |`:
140 140 # | x A'
141 141 # |'|
142 142 # o | A
143 143 # |/
144 144 # o Z
145 145 #
146 146 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 147 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 148 # This flag mean that the successors express the changes between the public and
149 149 # bumped version and fix the situation, breaking the transitivity of
150 150 # "bumped" here.
151 151 bumpedfix = 1
152 152 usingsha256 = 2
153 153
154 154 ## Parsing and writing of version "0"
155 155 #
156 156 # The header is followed by the markers. Each marker is made of:
157 157 #
158 158 # - 1 uint8 : number of new changesets "N", can be zero.
159 159 #
160 160 # - 1 uint32: metadata size "M" in bytes.
161 161 #
162 162 # - 1 byte: a bit field. It is reserved for flags used in common
163 163 # obsolete marker operations, to avoid repeated decoding of metadata
164 164 # entries.
165 165 #
166 166 # - 20 bytes: obsoleted changeset identifier.
167 167 #
168 168 # - N*20 bytes: new changesets identifiers.
169 169 #
170 170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 171 # string contains a key and a value, separated by a colon ':', without
172 172 # additional encoding. Keys cannot contain '\0' or ':' and values
173 173 # cannot contain '\0'.
174 174 _fm0version = 0
175 175 _fm0fixed = '>BIB20s'
176 176 _fm0node = '20s'
177 177 _fm0fsize = _calcsize(_fm0fixed)
178 178 _fm0fnodesize = _calcsize(_fm0node)
179 179
180 180 def _fm0readmarkers(data, off):
181 181 # Loop on markers
182 182 l = len(data)
183 183 while off + _fm0fsize <= l:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize('BB')
318 318
319 319 def _fm1purereadmarkers(data, off):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 stop = len(data) - _fm1fsize
334 334 ufixed = struct.Struct(_fm1fixed).unpack
335 335
336 336 while off <= stop:
337 337 # read fixed part
338 338 o1 = off + fsize
339 339 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 340
341 341 if flags & sha2flag:
342 342 # FIXME: prec was read as a SHA1, needs to be amended
343 343
344 344 # read 0 or more successors
345 345 if numsuc == 1:
346 346 o2 = o1 + sha2size
347 347 sucs = (data[o1:o2],)
348 348 else:
349 349 o2 = o1 + sha2size * numsuc
350 350 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 351
352 352 # read parents
353 353 if numpar == noneflag:
354 354 o3 = o2
355 355 parents = None
356 356 elif numpar == 1:
357 357 o3 = o2 + sha2size
358 358 parents = (data[o2:o3],)
359 359 else:
360 360 o3 = o2 + sha2size * numpar
361 361 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 362 else:
363 363 # read 0 or more successors
364 364 if numsuc == 1:
365 365 o2 = o1 + sha1size
366 366 sucs = (data[o1:o2],)
367 367 else:
368 368 o2 = o1 + sha1size * numsuc
369 369 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 370
371 371 # read parents
372 372 if numpar == noneflag:
373 373 o3 = o2
374 374 parents = None
375 375 elif numpar == 1:
376 376 o3 = o2 + sha1size
377 377 parents = (data[o2:o3],)
378 378 else:
379 379 o3 = o2 + sha1size * numpar
380 380 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 381
382 382 # read metadata
383 383 off = o3 + metasize * nummeta
384 384 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 385 metadata = []
386 386 for idx in xrange(0, len(metapairsize), 2):
387 387 o1 = off + metapairsize[idx]
388 388 o2 = o1 + metapairsize[idx + 1]
389 389 metadata.append((data[off:o1], data[o1:o2]))
390 390 off = o2
391 391
392 392 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 393
394 394 def _fm1encodeonemarker(marker):
395 395 pre, sucs, flags, metadata, date, parents = marker
396 396 # determine node size
397 397 _fm1node = _fm1nodesha1
398 398 if flags & usingsha256:
399 399 _fm1node = _fm1nodesha256
400 400 numsuc = len(sucs)
401 401 numextranodes = numsuc
402 402 if parents is None:
403 403 numpar = _fm1parentnone
404 404 else:
405 405 numpar = len(parents)
406 406 numextranodes += numpar
407 407 formatnodes = _fm1node * numextranodes
408 408 formatmeta = _fm1metapair * len(metadata)
409 409 format = _fm1fixed + formatnodes + formatmeta
410 410 # tz is stored in minutes so we divide by 60
411 411 tz = date[1]//60
412 412 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 413 data.extend(sucs)
414 414 if parents is not None:
415 415 data.extend(parents)
416 416 totalsize = _calcsize(format)
417 417 for key, value in metadata:
418 418 lk = len(key)
419 419 lv = len(value)
420 420 data.append(lk)
421 421 data.append(lv)
422 422 totalsize += lk + lv
423 423 data[0] = totalsize
424 424 data = [_pack(format, *data)]
425 425 for key, value in metadata:
426 426 data.append(key)
427 427 data.append(value)
428 428 return ''.join(data)
429 429
430 430 def _fm1readmarkers(data, off):
431 431 native = getattr(parsers, 'fm1readmarkers', None)
432 432 if not native:
433 433 return _fm1purereadmarkers(data, off)
434 434 stop = len(data) - _fm1fsize
435 435 return native(data, off, stop)
436 436
437 437 # mapping to read/write various marker formats
438 438 # <version> -> (decoder, encoder)
439 439 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 440 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 441
442 442 @util.nogc
443 443 def _readmarkers(data):
444 444 """Read and enumerate markers from raw data"""
445 445 off = 0
446 446 diskversion = _unpack('>B', data[off:off + 1])[0]
447 447 off += 1
448 448 if diskversion not in formats:
449 449 raise error.Abort(_('parsing obsolete marker: unknown version %r')
450 450 % diskversion)
451 451 return diskversion, formats[diskversion][0](data, off)
452 452
453 453 def encodemarkers(markers, addheader=False, version=_fm0version):
454 454 # Kept separate from flushmarkers(), it will be reused for
455 455 # markers exchange.
456 456 encodeone = formats[version][1]
457 457 if addheader:
458 458 yield _pack('>B', version)
459 459 for marker in markers:
460 460 yield encodeone(marker)
461 461
462 462
463 463 class marker(object):
464 464 """Wrap obsolete marker raw data"""
465 465
466 466 def __init__(self, repo, data):
467 467 # the repo argument will be used to create changectx in later version
468 468 self._repo = repo
469 469 self._data = data
470 470 self._decodedmeta = None
471 471
472 472 def __hash__(self):
473 473 return hash(self._data)
474 474
475 475 def __eq__(self, other):
476 476 if type(other) != type(self):
477 477 return False
478 478 return self._data == other._data
479 479
480 480 def precnode(self):
481 481 """Precursor changeset node identifier"""
482 482 return self._data[0]
483 483
484 484 def succnodes(self):
485 485 """List of successor changesets node identifiers"""
486 486 return self._data[1]
487 487
488 488 def parentnodes(self):
489 489 """Parents of the precursors (None if not recorded)"""
490 490 return self._data[5]
491 491
492 492 def metadata(self):
493 493 """Decoded metadata dictionary"""
494 494 return dict(self._data[3])
495 495
496 496 def date(self):
497 497 """Creation date as (unixtime, offset)"""
498 498 return self._data[4]
499 499
500 500 def flags(self):
501 501 """The flags field of the marker"""
502 502 return self._data[2]
503 503
504 504 @util.nogc
505 505 def _addsuccessors(successors, markers):
506 506 for mark in markers:
507 507 successors.setdefault(mark[0], set()).add(mark)
508 508
509 509 @util.nogc
510 510 def _addprecursors(precursors, markers):
511 511 for mark in markers:
512 512 for suc in mark[1]:
513 513 precursors.setdefault(suc, set()).add(mark)
514 514
515 515 @util.nogc
516 516 def _addchildren(children, markers):
517 517 for mark in markers:
518 518 parents = mark[5]
519 519 if parents is not None:
520 520 for p in parents:
521 521 children.setdefault(p, set()).add(mark)
522 522
523 523 def _checkinvalidmarkers(markers):
524 524 """search for marker with invalid data and raise error if needed
525 525
526 526 Exist as a separated function to allow the evolve extension for a more
527 527 subtle handling.
528 528 """
529 529 for mark in markers:
530 530 if node.nullid in mark[1]:
531 531 raise error.Abort(_('bad obsolescence marker detected: '
532 532 'invalid successors nullid'))
533 533
534 534 class obsstore(object):
535 535 """Store obsolete markers
536 536
537 537 Markers can be accessed with two mappings:
538 538 - precursors[x] -> set(markers on precursors edges of x)
539 539 - successors[x] -> set(markers on successors edges of x)
540 540 - children[x] -> set(markers on precursors edges of children(x)
541 541 """
542 542
543 543 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
544 544 # prec: nodeid, precursor changesets
545 545 # succs: tuple of nodeid, successor changesets (0-N length)
546 546 # flag: integer, flag field carrying modifier for the markers (see doc)
547 547 # meta: binary blob, encoded metadata dictionary
548 548 # date: (float, int) tuple, date of marker creation
549 549 # parents: (tuple of nodeid) or None, parents of precursors
550 550 # None is used when no data has been recorded
551 551
552 552 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
553 553 # caches for various obsolescence related cache
554 554 self.caches = {}
555 555 self.svfs = svfs
556 556 self._version = defaultformat
557 557 self._readonly = readonly
558 558
559 559 def __iter__(self):
560 560 return iter(self._all)
561 561
562 562 def __len__(self):
563 563 return len(self._all)
564 564
565 565 def __nonzero__(self):
566 566 if not self._cached('_all'):
567 567 try:
568 568 return self.svfs.stat('obsstore').st_size > 1
569 569 except OSError as inst:
570 570 if inst.errno != errno.ENOENT:
571 571 raise
572 572 # just build an empty _all list if no obsstore exists, which
573 573 # avoids further stat() syscalls
574 574 pass
575 575 return bool(self._all)
576 576
577 577 __bool__ = __nonzero__
578 578
579 579 @property
580 580 def readonly(self):
581 581 """True if marker creation is disabled
582 582
583 583 Remove me in the future when obsolete marker is always on."""
584 584 return self._readonly
585 585
586 586 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 587 date=None, metadata=None, ui=None):
588 588 """obsolete: add a new obsolete marker
589 589
590 590 * ensuring it is hashable
591 591 * check mandatory metadata
592 592 * encode metadata
593 593
594 594 If you are a human writing code creating marker you want to use the
595 595 `createmarkers` function in this module instead.
596 596
597 597 return True if a new marker have been added, False if the markers
598 598 already existed (no op).
599 599 """
600 600 if metadata is None:
601 601 metadata = {}
602 602 if date is None:
603 603 if 'date' in metadata:
604 604 # as a courtesy for out-of-tree extensions
605 605 date = util.parsedate(metadata.pop('date'))
606 606 elif ui is not None:
607 607 date = ui.configdate('devel', 'default-date')
608 608 if date is None:
609 609 date = util.makedate()
610 610 else:
611 611 date = util.makedate()
612 612 if len(prec) != 20:
613 613 raise ValueError(prec)
614 614 for succ in succs:
615 615 if len(succ) != 20:
616 616 raise ValueError(succ)
617 617 if prec in succs:
618 618 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619 619
620 620 metadata = tuple(sorted(metadata.iteritems()))
621 621
622 622 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
623 623 return bool(self.add(transaction, [marker]))
624 624
625 625 def add(self, transaction, markers):
626 626 """Add new markers to the store
627 627
628 628 Take care of filtering duplicate.
629 629 Return the number of new marker."""
630 630 if self._readonly:
631 631 raise error.Abort(_('creating obsolete markers is not enabled on '
632 632 'this repo'))
633 633 known = set(self._all)
634 634 new = []
635 635 for m in markers:
636 636 if m not in known:
637 637 known.add(m)
638 638 new.append(m)
639 639 if new:
640 640 f = self.svfs('obsstore', 'ab')
641 641 try:
642 642 offset = f.tell()
643 643 transaction.add('obsstore', offset)
644 644 # offset == 0: new file - add the version header
645 645 for bytes in encodemarkers(new, offset == 0, self._version):
646 646 f.write(bytes)
647 647 finally:
648 648 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
649 649 # call 'filecacheentry.refresh()' here
650 650 f.close()
651 651 self._addmarkers(new)
652 652 # new marker *may* have changed several set. invalidate the cache.
653 653 self.caches.clear()
654 654 # records the number of new markers for the transaction hooks
655 655 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
656 656 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
657 657 return len(new)
658 658
659 659 def mergemarkers(self, transaction, data):
660 660 """merge a binary stream of markers inside the obsstore
661 661
662 662 Returns the number of new markers added."""
663 663 version, markers = _readmarkers(data)
664 664 return self.add(transaction, markers)
665 665
666 666 @propertycache
667 667 def _all(self):
668 668 data = self.svfs.tryread('obsstore')
669 669 if not data:
670 670 return []
671 671 self._version, markers = _readmarkers(data)
672 672 markers = list(markers)
673 673 _checkinvalidmarkers(markers)
674 674 return markers
675 675
676 676 @propertycache
677 677 def successors(self):
678 678 successors = {}
679 679 _addsuccessors(successors, self._all)
680 680 return successors
681 681
682 682 @propertycache
683 683 def precursors(self):
684 684 precursors = {}
685 685 _addprecursors(precursors, self._all)
686 686 return precursors
687 687
688 688 @propertycache
689 689 def children(self):
690 690 children = {}
691 691 _addchildren(children, self._all)
692 692 return children
693 693
694 694 def _cached(self, attr):
695 695 return attr in self.__dict__
696 696
697 697 def _addmarkers(self, markers):
698 698 markers = list(markers) # to allow repeated iteration
699 699 self._all.extend(markers)
700 700 if self._cached('successors'):
701 701 _addsuccessors(self.successors, markers)
702 702 if self._cached('precursors'):
703 703 _addprecursors(self.precursors, markers)
704 704 if self._cached('children'):
705 705 _addchildren(self.children, markers)
706 706 _checkinvalidmarkers(markers)
707 707
708 708 def relevantmarkers(self, nodes):
709 709 """return a set of all obsolescence markers relevant to a set of nodes.
710 710
711 711 "relevant" to a set of nodes mean:
712 712
713 713 - marker that use this changeset as successor
714 714 - prune marker of direct children on this changeset
715 715 - recursive application of the two rules on precursors of these markers
716 716
717 717 It is a set so you cannot rely on order."""
718 718
719 719 pendingnodes = set(nodes)
720 720 seenmarkers = set()
721 721 seennodes = set(pendingnodes)
722 722 precursorsmarkers = self.precursors
723 succsmarkers = self.successors
723 724 children = self.children
724 725 while pendingnodes:
725 726 direct = set()
726 727 for current in pendingnodes:
727 728 direct.update(precursorsmarkers.get(current, ()))
728 729 pruned = [m for m in children.get(current, ()) if not m[1]]
729 730 direct.update(pruned)
731 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
732 direct.update(pruned)
730 733 direct -= seenmarkers
731 734 pendingnodes = set([m[0] for m in direct])
732 735 seenmarkers |= direct
733 736 pendingnodes -= seennodes
734 737 seennodes |= pendingnodes
735 738 return seenmarkers
736 739
737 740 def commonversion(versions):
738 741 """Return the newest version listed in both versions and our local formats.
739 742
740 743 Returns None if no common version exists.
741 744 """
742 745 versions.sort(reverse=True)
743 746 # search for highest version known on both side
744 747 for v in versions:
745 748 if v in formats:
746 749 return v
747 750 return None
748 751
749 752 # arbitrary picked to fit into 8K limit from HTTP server
750 753 # you have to take in account:
751 754 # - the version header
752 755 # - the base85 encoding
753 756 _maxpayload = 5300
754 757
755 758 def _pushkeyescape(markers):
756 759 """encode markers into a dict suitable for pushkey exchange
757 760
758 761 - binary data is base85 encoded
759 762 - split in chunks smaller than 5300 bytes"""
760 763 keys = {}
761 764 parts = []
762 765 currentlen = _maxpayload * 2 # ensure we create a new part
763 766 for marker in markers:
764 767 nextdata = _fm0encodeonemarker(marker)
765 768 if (len(nextdata) + currentlen > _maxpayload):
766 769 currentpart = []
767 770 currentlen = 0
768 771 parts.append(currentpart)
769 772 currentpart.append(nextdata)
770 773 currentlen += len(nextdata)
771 774 for idx, part in enumerate(reversed(parts)):
772 775 data = ''.join([_pack('>B', _fm0version)] + part)
773 776 keys['dump%i' % idx] = util.b85encode(data)
774 777 return keys
775 778
776 779 def listmarkers(repo):
777 780 """List markers over pushkey"""
778 781 if not repo.obsstore:
779 782 return {}
780 783 return _pushkeyescape(sorted(repo.obsstore))
781 784
782 785 def pushmarker(repo, key, old, new):
783 786 """Push markers over pushkey"""
784 787 if not key.startswith('dump'):
785 788 repo.ui.warn(_('unknown key: %r') % key)
786 789 return 0
787 790 if old:
788 791 repo.ui.warn(_('unexpected old value for %r') % key)
789 792 return 0
790 793 data = util.b85decode(new)
791 794 lock = repo.lock()
792 795 try:
793 796 tr = repo.transaction('pushkey: obsolete markers')
794 797 try:
795 798 repo.obsstore.mergemarkers(tr, data)
796 799 repo.invalidatevolatilesets()
797 800 tr.close()
798 801 return 1
799 802 finally:
800 803 tr.release()
801 804 finally:
802 805 lock.release()
803 806
804 807 def getmarkers(repo, nodes=None):
805 808 """returns markers known in a repository
806 809
807 810 If <nodes> is specified, only markers "relevant" to those nodes are are
808 811 returned"""
809 812 if nodes is None:
810 813 rawmarkers = repo.obsstore
811 814 else:
812 815 rawmarkers = repo.obsstore.relevantmarkers(nodes)
813 816
814 817 for markerdata in rawmarkers:
815 818 yield marker(repo, markerdata)
816 819
817 820 def relevantmarkers(repo, node):
818 821 """all obsolete markers relevant to some revision"""
819 822 for markerdata in repo.obsstore.relevantmarkers(node):
820 823 yield marker(repo, markerdata)
821 824
822 825
823 826 def precursormarkers(ctx):
824 827 """obsolete marker marking this changeset as a successors"""
825 828 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
826 829 yield marker(ctx.repo(), data)
827 830
828 831 def successormarkers(ctx):
829 832 """obsolete marker making this changeset obsolete"""
830 833 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
831 834 yield marker(ctx.repo(), data)
832 835
833 836 def allsuccessors(obsstore, nodes, ignoreflags=0):
834 837 """Yield node for every successor of <nodes>.
835 838
836 839 Some successors may be unknown locally.
837 840
838 841 This is a linear yield unsuited to detecting split changesets. It includes
839 842 initial nodes too."""
840 843 remaining = set(nodes)
841 844 seen = set(remaining)
842 845 while remaining:
843 846 current = remaining.pop()
844 847 yield current
845 848 for mark in obsstore.successors.get(current, ()):
846 849 # ignore marker flagged with specified flag
847 850 if mark[2] & ignoreflags:
848 851 continue
849 852 for suc in mark[1]:
850 853 if suc not in seen:
851 854 seen.add(suc)
852 855 remaining.add(suc)
853 856
854 857 def allprecursors(obsstore, nodes, ignoreflags=0):
855 858 """Yield node for every precursors of <nodes>.
856 859
857 860 Some precursors may be unknown locally.
858 861
859 862 This is a linear yield unsuited to detecting folded changesets. It includes
860 863 initial nodes too."""
861 864
862 865 remaining = set(nodes)
863 866 seen = set(remaining)
864 867 while remaining:
865 868 current = remaining.pop()
866 869 yield current
867 870 for mark in obsstore.precursors.get(current, ()):
868 871 # ignore marker flagged with specified flag
869 872 if mark[2] & ignoreflags:
870 873 continue
871 874 suc = mark[0]
872 875 if suc not in seen:
873 876 seen.add(suc)
874 877 remaining.add(suc)
875 878
876 879 def foreground(repo, nodes):
877 880 """return all nodes in the "foreground" of other node
878 881
879 882 The foreground of a revision is anything reachable using parent -> children
880 883 or precursor -> successor relation. It is very similar to "descendant" but
881 884 augmented with obsolescence information.
882 885
883 886 Beware that possible obsolescence cycle may result if complex situation.
884 887 """
885 888 repo = repo.unfiltered()
886 889 foreground = set(repo.set('%ln::', nodes))
887 890 if repo.obsstore:
888 891 # We only need this complicated logic if there is obsolescence
889 892 # XXX will probably deserve an optimised revset.
890 893 nm = repo.changelog.nodemap
891 894 plen = -1
892 895 # compute the whole set of successors or descendants
893 896 while len(foreground) != plen:
894 897 plen = len(foreground)
895 898 succs = set(c.node() for c in foreground)
896 899 mutable = [c.node() for c in foreground if c.mutable()]
897 900 succs.update(allsuccessors(repo.obsstore, mutable))
898 901 known = (n for n in succs if n in nm)
899 902 foreground = set(repo.set('%ln::', known))
900 903 return set(c.node() for c in foreground)
901 904
902 905
903 906 def successorssets(repo, initialnode, cache=None):
904 907 """Return set of all latest successors of initial nodes
905 908
906 909 The successors set of a changeset A are the group of revisions that succeed
907 910 A. It succeeds A as a consistent whole, each revision being only a partial
908 911 replacement. The successors set contains non-obsolete changesets only.
909 912
910 913 This function returns the full list of successor sets which is why it
911 914 returns a list of tuples and not just a single tuple. Each tuple is a valid
912 915 successors set. Note that (A,) may be a valid successors set for changeset A
913 916 (see below).
914 917
915 918 In most cases, a changeset A will have a single element (e.g. the changeset
916 919 A is replaced by A') in its successors set. Though, it is also common for a
917 920 changeset A to have no elements in its successor set (e.g. the changeset
918 921 has been pruned). Therefore, the returned list of successors sets will be
919 922 [(A',)] or [], respectively.
920 923
921 924 When a changeset A is split into A' and B', however, it will result in a
922 925 successors set containing more than a single element, i.e. [(A',B')].
923 926 Divergent changesets will result in multiple successors sets, i.e. [(A',),
924 927 (A'')].
925 928
926 929 If a changeset A is not obsolete, then it will conceptually have no
927 930 successors set. To distinguish this from a pruned changeset, the successor
928 931 set will contain itself only, i.e. [(A,)].
929 932
930 933 Finally, successors unknown locally are considered to be pruned (obsoleted
931 934 without any successors).
932 935
933 936 The optional `cache` parameter is a dictionary that may contain precomputed
934 937 successors sets. It is meant to reuse the computation of a previous call to
935 938 `successorssets` when multiple calls are made at the same time. The cache
936 939 dictionary is updated in place. The caller is responsible for its life
937 940 span. Code that makes multiple calls to `successorssets` *must* use this
938 941 cache mechanism or suffer terrible performance.
939 942 """
940 943
941 944 succmarkers = repo.obsstore.successors
942 945
943 946 # Stack of nodes we search successors sets for
944 947 toproceed = [initialnode]
945 948 # set version of above list for fast loop detection
946 949 # element added to "toproceed" must be added here
947 950 stackedset = set(toproceed)
948 951 if cache is None:
949 952 cache = {}
950 953
951 954 # This while loop is the flattened version of a recursive search for
952 955 # successors sets
953 956 #
954 957 # def successorssets(x):
955 958 # successors = directsuccessors(x)
956 959 # ss = [[]]
957 960 # for succ in directsuccessors(x):
958 961 # # product as in itertools cartesian product
959 962 # ss = product(ss, successorssets(succ))
960 963 # return ss
961 964 #
962 965 # But we can not use plain recursive calls here:
963 966 # - that would blow the python call stack
964 967 # - obsolescence markers may have cycles, we need to handle them.
965 968 #
966 969 # The `toproceed` list act as our call stack. Every node we search
967 970 # successors set for are stacked there.
968 971 #
969 972 # The `stackedset` is set version of this stack used to check if a node is
970 973 # already stacked. This check is used to detect cycles and prevent infinite
971 974 # loop.
972 975 #
973 976 # successors set of all nodes are stored in the `cache` dictionary.
974 977 #
975 978 # After this while loop ends we use the cache to return the successors sets
976 979 # for the node requested by the caller.
977 980 while toproceed:
978 981 # Every iteration tries to compute the successors sets of the topmost
979 982 # node of the stack: CURRENT.
980 983 #
981 984 # There are four possible outcomes:
982 985 #
983 986 # 1) We already know the successors sets of CURRENT:
984 987 # -> mission accomplished, pop it from the stack.
985 988 # 2) Node is not obsolete:
986 989 # -> the node is its own successors sets. Add it to the cache.
987 990 # 3) We do not know successors set of direct successors of CURRENT:
988 991 # -> We add those successors to the stack.
989 992 # 4) We know successors sets of all direct successors of CURRENT:
990 993 # -> We can compute CURRENT successors set and add it to the
991 994 # cache.
992 995 #
993 996 current = toproceed[-1]
994 997 if current in cache:
995 998 # case (1): We already know the successors sets
996 999 stackedset.remove(toproceed.pop())
997 1000 elif current not in succmarkers:
998 1001 # case (2): The node is not obsolete.
999 1002 if current in repo:
1000 1003 # We have a valid last successors.
1001 1004 cache[current] = [(current,)]
1002 1005 else:
1003 1006 # Final obsolete version is unknown locally.
1004 1007 # Do not count that as a valid successors
1005 1008 cache[current] = []
1006 1009 else:
1007 1010 # cases (3) and (4)
1008 1011 #
1009 1012 # We proceed in two phases. Phase 1 aims to distinguish case (3)
1010 1013 # from case (4):
1011 1014 #
1012 1015 # For each direct successors of CURRENT, we check whether its
1013 1016 # successors sets are known. If they are not, we stack the
1014 1017 # unknown node and proceed to the next iteration of the while
1015 1018 # loop. (case 3)
1016 1019 #
1017 1020 # During this step, we may detect obsolescence cycles: a node
1018 1021 # with unknown successors sets but already in the call stack.
1019 1022 # In such a situation, we arbitrary set the successors sets of
1020 1023 # the node to nothing (node pruned) to break the cycle.
1021 1024 #
1022 1025 # If no break was encountered we proceed to phase 2.
1023 1026 #
1024 1027 # Phase 2 computes successors sets of CURRENT (case 4); see details
1025 1028 # in phase 2 itself.
1026 1029 #
1027 1030 # Note the two levels of iteration in each phase.
1028 1031 # - The first one handles obsolescence markers using CURRENT as
1029 1032 # precursor (successors markers of CURRENT).
1030 1033 #
1031 1034 # Having multiple entry here means divergence.
1032 1035 #
1033 1036 # - The second one handles successors defined in each marker.
1034 1037 #
1035 1038 # Having none means pruned node, multiple successors means split,
1036 1039 # single successors are standard replacement.
1037 1040 #
1038 1041 for mark in sorted(succmarkers[current]):
1039 1042 for suc in mark[1]:
1040 1043 if suc not in cache:
1041 1044 if suc in stackedset:
1042 1045 # cycle breaking
1043 1046 cache[suc] = []
1044 1047 else:
1045 1048 # case (3) If we have not computed successors sets
1046 1049 # of one of those successors we add it to the
1047 1050 # `toproceed` stack and stop all work for this
1048 1051 # iteration.
1049 1052 toproceed.append(suc)
1050 1053 stackedset.add(suc)
1051 1054 break
1052 1055 else:
1053 1056 continue
1054 1057 break
1055 1058 else:
1056 1059 # case (4): we know all successors sets of all direct
1057 1060 # successors
1058 1061 #
1059 1062 # Successors set contributed by each marker depends on the
1060 1063 # successors sets of all its "successors" node.
1061 1064 #
1062 1065 # Each different marker is a divergence in the obsolescence
1063 1066 # history. It contributes successors sets distinct from other
1064 1067 # markers.
1065 1068 #
1066 1069 # Within a marker, a successor may have divergent successors
1067 1070 # sets. In such a case, the marker will contribute multiple
1068 1071 # divergent successors sets. If multiple successors have
1069 1072 # divergent successors sets, a Cartesian product is used.
1070 1073 #
1071 1074 # At the end we post-process successors sets to remove
1072 1075 # duplicated entry and successors set that are strict subset of
1073 1076 # another one.
1074 1077 succssets = []
1075 1078 for mark in sorted(succmarkers[current]):
1076 1079 # successors sets contributed by this marker
1077 1080 markss = [[]]
1078 1081 for suc in mark[1]:
1079 1082 # cardinal product with previous successors
1080 1083 productresult = []
1081 1084 for prefix in markss:
1082 1085 for suffix in cache[suc]:
1083 1086 newss = list(prefix)
1084 1087 for part in suffix:
1085 1088 # do not duplicated entry in successors set
1086 1089 # first entry wins.
1087 1090 if part not in newss:
1088 1091 newss.append(part)
1089 1092 productresult.append(newss)
1090 1093 markss = productresult
1091 1094 succssets.extend(markss)
1092 1095 # remove duplicated and subset
1093 1096 seen = []
1094 1097 final = []
1095 1098 candidate = sorted(((set(s), s) for s in succssets if s),
1096 1099 key=lambda x: len(x[1]), reverse=True)
1097 1100 for setversion, listversion in candidate:
1098 1101 for seenset in seen:
1099 1102 if setversion.issubset(seenset):
1100 1103 break
1101 1104 else:
1102 1105 final.append(listversion)
1103 1106 seen.append(setversion)
1104 1107 final.reverse() # put small successors set first
1105 1108 cache[current] = final
1106 1109 return cache[initialnode]
1107 1110
1108 1111 # mapping of 'set-name' -> <function to compute this set>
1109 1112 cachefuncs = {}
1110 1113 def cachefor(name):
1111 1114 """Decorator to register a function as computing the cache for a set"""
1112 1115 def decorator(func):
1113 1116 assert name not in cachefuncs
1114 1117 cachefuncs[name] = func
1115 1118 return func
1116 1119 return decorator
1117 1120
1118 1121 def getrevs(repo, name):
1119 1122 """Return the set of revision that belong to the <name> set
1120 1123
1121 1124 Such access may compute the set and cache it for future use"""
1122 1125 repo = repo.unfiltered()
1123 1126 if not repo.obsstore:
1124 1127 return frozenset()
1125 1128 if name not in repo.obsstore.caches:
1126 1129 repo.obsstore.caches[name] = cachefuncs[name](repo)
1127 1130 return repo.obsstore.caches[name]
1128 1131
1129 1132 # To be simple we need to invalidate obsolescence cache when:
1130 1133 #
1131 1134 # - new changeset is added:
1132 1135 # - public phase is changed
1133 1136 # - obsolescence marker are added
1134 1137 # - strip is used a repo
1135 1138 def clearobscaches(repo):
1136 1139 """Remove all obsolescence related cache from a repo
1137 1140
1138 1141 This remove all cache in obsstore is the obsstore already exist on the
1139 1142 repo.
1140 1143
1141 1144 (We could be smarter here given the exact event that trigger the cache
1142 1145 clearing)"""
1143 1146 # only clear cache is there is obsstore data in this repo
1144 1147 if 'obsstore' in repo._filecache:
1145 1148 repo.obsstore.caches.clear()
1146 1149
1147 1150 @cachefor('obsolete')
1148 1151 def _computeobsoleteset(repo):
1149 1152 """the set of obsolete revisions"""
1150 1153 obs = set()
1151 1154 getnode = repo.changelog.node
1152 1155 notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
1153 1156 for r in notpublic:
1154 1157 if getnode(r) in repo.obsstore.successors:
1155 1158 obs.add(r)
1156 1159 return obs
1157 1160
1158 1161 @cachefor('unstable')
1159 1162 def _computeunstableset(repo):
1160 1163 """the set of non obsolete revisions with obsolete parents"""
1161 1164 revs = [(ctx.rev(), ctx) for ctx in
1162 1165 repo.set('(not public()) and (not obsolete())')]
1163 1166 revs.sort(key=lambda x:x[0])
1164 1167 unstable = set()
1165 1168 for rev, ctx in revs:
1166 1169 # A rev is unstable if one of its parent is obsolete or unstable
1167 1170 # this works since we traverse following growing rev order
1168 1171 if any((x.obsolete() or (x.rev() in unstable))
1169 1172 for x in ctx.parents()):
1170 1173 unstable.add(rev)
1171 1174 return unstable
1172 1175
1173 1176 @cachefor('suspended')
1174 1177 def _computesuspendedset(repo):
1175 1178 """the set of obsolete parents with non obsolete descendants"""
1176 1179 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1177 1180 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1178 1181
1179 1182 @cachefor('extinct')
1180 1183 def _computeextinctset(repo):
1181 1184 """the set of obsolete parents without non obsolete descendants"""
1182 1185 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1183 1186
1184 1187
1185 1188 @cachefor('bumped')
1186 1189 def _computebumpedset(repo):
1187 1190 """the set of revs trying to obsolete public revisions"""
1188 1191 bumped = set()
1189 1192 # util function (avoid attribute lookup in the loop)
1190 1193 phase = repo._phasecache.phase # would be faster to grab the full list
1191 1194 public = phases.public
1192 1195 cl = repo.changelog
1193 1196 torev = cl.nodemap.get
1194 1197 for ctx in repo.set('(not public()) and (not obsolete())'):
1195 1198 rev = ctx.rev()
1196 1199 # We only evaluate mutable, non-obsolete revision
1197 1200 node = ctx.node()
1198 1201 # (future) A cache of precursors may worth if split is very common
1199 1202 for pnode in allprecursors(repo.obsstore, [node],
1200 1203 ignoreflags=bumpedfix):
1201 1204 prev = torev(pnode) # unfiltered! but so is phasecache
1202 1205 if (prev is not None) and (phase(repo, prev) <= public):
1203 1206 # we have a public precursor
1204 1207 bumped.add(rev)
1205 1208 break # Next draft!
1206 1209 return bumped
1207 1210
1208 1211 @cachefor('divergent')
1209 1212 def _computedivergentset(repo):
1210 1213 """the set of rev that compete to be the final successors of some revision.
1211 1214 """
1212 1215 divergent = set()
1213 1216 obsstore = repo.obsstore
1214 1217 newermap = {}
1215 1218 for ctx in repo.set('(not public()) - obsolete()'):
1216 1219 mark = obsstore.precursors.get(ctx.node(), ())
1217 1220 toprocess = set(mark)
1218 1221 seen = set()
1219 1222 while toprocess:
1220 1223 prec = toprocess.pop()[0]
1221 1224 if prec in seen:
1222 1225 continue # emergency cycle hanging prevention
1223 1226 seen.add(prec)
1224 1227 if prec not in newermap:
1225 1228 successorssets(repo, prec, newermap)
1226 1229 newer = [n for n in newermap[prec] if n]
1227 1230 if len(newer) > 1:
1228 1231 divergent.add(ctx.rev())
1229 1232 break
1230 1233 toprocess.update(obsstore.precursors.get(prec, ()))
1231 1234 return divergent
1232 1235
1233 1236
1234 1237 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1235 1238 operation=None):
1236 1239 """Add obsolete markers between changesets in a repo
1237 1240
1238 1241 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1239 1242 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1240 1243 containing metadata for this marker only. It is merged with the global
1241 1244 metadata specified through the `metadata` argument of this function,
1242 1245
1243 1246 Trying to obsolete a public changeset will raise an exception.
1244 1247
1245 1248 Current user and date are used except if specified otherwise in the
1246 1249 metadata attribute.
1247 1250
1248 1251 This function operates within a transaction of its own, but does
1249 1252 not take any lock on the repo.
1250 1253 """
1251 1254 # prepare metadata
1252 1255 if metadata is None:
1253 1256 metadata = {}
1254 1257 if 'user' not in metadata:
1255 1258 metadata['user'] = repo.ui.username()
1256 1259 useoperation = repo.ui.configbool('experimental',
1257 1260 'evolution.track-operation',
1258 1261 False)
1259 1262 if useoperation and operation:
1260 1263 metadata['operation'] = operation
1261 1264 tr = repo.transaction('add-obsolescence-marker')
1262 1265 try:
1263 1266 markerargs = []
1264 1267 for rel in relations:
1265 1268 prec = rel[0]
1266 1269 sucs = rel[1]
1267 1270 localmetadata = metadata.copy()
1268 1271 if 2 < len(rel):
1269 1272 localmetadata.update(rel[2])
1270 1273
1271 1274 if not prec.mutable():
1272 1275 raise error.Abort(_("cannot obsolete public changeset: %s")
1273 1276 % prec,
1274 1277 hint="see 'hg help phases' for details")
1275 1278 nprec = prec.node()
1276 1279 nsucs = tuple(s.node() for s in sucs)
1277 1280 npare = None
1278 1281 if not nsucs:
1279 1282 npare = tuple(p.node() for p in prec.parents())
1280 1283 if nprec in nsucs:
1281 1284 raise error.Abort(_("changeset %s cannot obsolete itself")
1282 1285 % prec)
1283 1286
1284 1287 # Creating the marker causes the hidden cache to become invalid,
1285 1288 # which causes recomputation when we ask for prec.parents() above.
1286 1289 # Resulting in n^2 behavior. So let's prepare all of the args
1287 1290 # first, then create the markers.
1288 1291 markerargs.append((nprec, nsucs, npare, localmetadata))
1289 1292
1290 1293 for args in markerargs:
1291 1294 nprec, nsucs, npare, localmetadata = args
1292 1295 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1293 1296 date=date, metadata=localmetadata,
1294 1297 ui=repo.ui)
1295 1298 repo.filteredrevcache.clear()
1296 1299 tr.close()
1297 1300 finally:
1298 1301 tr.release()
@@ -1,102 +1,198 b''
1 1 ==================================================
2 2 Test obsmarkers interaction with bundle and strip
3 3 ==================================================
4 4
5 5 In practice, this file does not yet contains any tests for bundle and strip.
6 6 But their will be some soon (tm).
7 7
8 8 For now this test check the logic computing markers relevant to a set of
9 9 revision. That logic will be use by "hg bundle" to select the markers to
10 10 include, and strip to find the markers to backup.
11 11
12 12 Setup a repository with various case
13 13 ====================================
14 14
15 15 Config setup
16 16 ------------
17 17
18 18 $ cat >> $HGRCPATH <<EOF
19 19 > [ui]
20 20 > # simpler log output
21 21 > logtemplate = "{node|short}: {desc}\n"
22 22 >
23 23 > [experimental]
24 24 > # enable evolution
25 25 > evolution = all
26 26 >
27 27 > # include obsmarkers in bundle
28 28 > evolution.bundle-obsmarker = yes
29 29 >
30 30 > [extensions]
31 31 > # needed for some tests
32 32 > strip =
33 33 > [defaults]
34 34 > # we'll query many hidden changeset
35 35 > debugobsolete = --hidden
36 36 > EOF
37 37
38 38 $ mkcommit() {
39 39 > echo "$1" > "$1"
40 40 > hg add "$1"
41 41 > hg ci -m "$1"
42 42 > }
43 43
44 44 $ getid() {
45 45 > hg log --hidden --template '{node}\n' --rev "$1"
46 46 > }
47 47
48 48 $ mktestrepo () {
49 49 > [ -n "$1" ] || exit 1
50 50 > cd $TESTTMP
51 51 > hg init $1
52 52 > cd $1
53 53 > mkcommit ROOT
54 54 > }
55 55
56 56 root setup
57 57 -------------
58 58
59 59 simple chain
60 60 ============
61 61
62 62 . A0
63 63 . ⇠ø⇠◔ A1
64 64 . |/
65 65 . ●
66 66
67 67 setup
68 68 -----
69 69
70 70 $ mktestrepo simple-chain
71 71 $ mkcommit 'C-A0'
72 72 $ hg up 'desc("ROOT")'
73 73 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
74 74 $ mkcommit 'C-A1'
75 75 created new head
76 76 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
77 77 $ hg debugobsolete `getid 'desc("C-A0")'` a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1
78 78 $ hg debugobsolete a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 `getid 'desc("C-A1")'`
79 79
80 80 $ hg up 'desc("ROOT")'
81 81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 82 $ hg log --hidden -G
83 83 o cf2c22470d67: C-A1
84 84 |
85 85 | x 84fcb0dfe17b: C-A0
86 86 |/
87 87 @ ea207398892e: ROOT
88 88
89 89 $ hg debugobsolete
90 90 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
91 91 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
92 92 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
93 93
94 94 Actual testing
95 95 --------------
96 96
97 97 $ hg debugobsolete --rev 'desc("C-A0")'
98 98 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
99 99 $ hg debugobsolete --rev 'desc("C-A1")'
100 100 84fcb0dfe17b256ebae52e05572993b9194c018a a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
101 101 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
102 102 a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
103
104 chain with prune children
105 =========================
106
107 . ⇠⊗ B0
108 . |
109 . ⇠ø⇠◔ A1
110 . |
111 . ●
112
113 setup
114 -----
115
116 $ mktestrepo prune
117 $ mkcommit 'C-A0'
118 $ mkcommit 'C-B0'
119 $ hg up 'desc("ROOT")'
120 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
121 $ mkcommit 'C-A1'
122 created new head
123 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
124 $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
125 $ hg debugobsolete --record-parents `getid 'desc("C-B0")'`
126 $ hg up 'desc("ROOT")'
127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 $ hg log --hidden -G
129 o cf2c22470d67: C-A1
130 |
131 | x 29f93b1df87b: C-B0
132 | |
133 | x 84fcb0dfe17b: C-A0
134 |/
135 @ ea207398892e: ROOT
136
137 $ hg debugobsolete
138 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
139 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
140 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
141
142 Actual testing
143 --------------
144
145 $ hg debugobsolete --rev 'desc("C-A0")'
146 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
147 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
148 $ hg debugobsolete --rev 'desc("C-B0")'
149 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
150 $ hg debugobsolete --rev 'desc("C-A1")'
151 29f93b1df87baee1824e014080d8adf145f81783 0 {84fcb0dfe17b256ebae52e05572993b9194c018a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
152 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
153 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
154
155 chain with precursors also pruned
156 =================================
157
158 . A0 (also pruned)
159 . ⇠ø⇠◔ A1
160 . |
161 . ●
162
163 setup
164 -----
165
166 $ mktestrepo prune-inline
167 $ mkcommit 'C-A0'
168 $ hg up 'desc("ROOT")'
169 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
170 $ mkcommit 'C-A1'
171 created new head
172 $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'`
173 $ hg debugobsolete --record-parents `getid 'desc("C-A0")'`
174 $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'`
175 $ hg up 'desc("ROOT")'
176 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
177 $ hg log --hidden -G
178 o cf2c22470d67: C-A1
179 |
180 | x 84fcb0dfe17b: C-A0
181 |/
182 @ ea207398892e: ROOT
183
184 $ hg debugobsolete
185 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
186 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
187 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
188
189 Actual testing
190 --------------
191
192 $ hg debugobsolete --rev 'desc("C-A0")'
193 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
194 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
195 $ hg debugobsolete --rev 'desc("C-A1")'
196 84fcb0dfe17b256ebae52e05572993b9194c018a 0 {ea207398892eb49e06441f10dda2a731f0450f20} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 84fcb0dfe17b256ebae52e05572993b9194c018a cf2c22470d67233004e934a31184ac2b35389914 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 84fcb0dfe17b256ebae52e05572993b9194c018a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
General Comments 0
You need to be logged in to leave comments. Login now