##// END OF EJS Templates
obsstore: rename precursors into predecessors...
Boris Feld -
r33699:d5acd967 default
parent child Browse files
Show More
@@ -1,1041 +1,1049 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 183 while off < stop:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize(_fm1metapair)
318 318
319 319 def _fm1purereadmarkers(data, off, stop):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 ufixed = struct.Struct(_fm1fixed).unpack
334 334
335 335 while off < stop:
336 336 # read fixed part
337 337 o1 = off + fsize
338 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 339
340 340 if flags & sha2flag:
341 341 # FIXME: prec was read as a SHA1, needs to be amended
342 342
343 343 # read 0 or more successors
344 344 if numsuc == 1:
345 345 o2 = o1 + sha2size
346 346 sucs = (data[o1:o2],)
347 347 else:
348 348 o2 = o1 + sha2size * numsuc
349 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 350
351 351 # read parents
352 352 if numpar == noneflag:
353 353 o3 = o2
354 354 parents = None
355 355 elif numpar == 1:
356 356 o3 = o2 + sha2size
357 357 parents = (data[o2:o3],)
358 358 else:
359 359 o3 = o2 + sha2size * numpar
360 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 361 else:
362 362 # read 0 or more successors
363 363 if numsuc == 1:
364 364 o2 = o1 + sha1size
365 365 sucs = (data[o1:o2],)
366 366 else:
367 367 o2 = o1 + sha1size * numsuc
368 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 369
370 370 # read parents
371 371 if numpar == noneflag:
372 372 o3 = o2
373 373 parents = None
374 374 elif numpar == 1:
375 375 o3 = o2 + sha1size
376 376 parents = (data[o2:o3],)
377 377 else:
378 378 o3 = o2 + sha1size * numpar
379 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 380
381 381 # read metadata
382 382 off = o3 + metasize * nummeta
383 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 384 metadata = []
385 385 for idx in xrange(0, len(metapairsize), 2):
386 386 o1 = off + metapairsize[idx]
387 387 o2 = o1 + metapairsize[idx + 1]
388 388 metadata.append((data[off:o1], data[o1:o2]))
389 389 off = o2
390 390
391 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 392
393 393 def _fm1encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 # determine node size
396 396 _fm1node = _fm1nodesha1
397 397 if flags & usingsha256:
398 398 _fm1node = _fm1nodesha256
399 399 numsuc = len(sucs)
400 400 numextranodes = numsuc
401 401 if parents is None:
402 402 numpar = _fm1parentnone
403 403 else:
404 404 numpar = len(parents)
405 405 numextranodes += numpar
406 406 formatnodes = _fm1node * numextranodes
407 407 formatmeta = _fm1metapair * len(metadata)
408 408 format = _fm1fixed + formatnodes + formatmeta
409 409 # tz is stored in minutes so we divide by 60
410 410 tz = date[1]//60
411 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 412 data.extend(sucs)
413 413 if parents is not None:
414 414 data.extend(parents)
415 415 totalsize = _calcsize(format)
416 416 for key, value in metadata:
417 417 lk = len(key)
418 418 lv = len(value)
419 419 data.append(lk)
420 420 data.append(lv)
421 421 totalsize += lk + lv
422 422 data[0] = totalsize
423 423 data = [_pack(format, *data)]
424 424 for key, value in metadata:
425 425 data.append(key)
426 426 data.append(value)
427 427 return ''.join(data)
428 428
429 429 def _fm1readmarkers(data, off, stop):
430 430 native = getattr(parsers, 'fm1readmarkers', None)
431 431 if not native:
432 432 return _fm1purereadmarkers(data, off, stop)
433 433 return native(data, off, stop)
434 434
435 435 # mapping to read/write various marker formats
436 436 # <version> -> (decoder, encoder)
437 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439 439
440 440 def _readmarkerversion(data):
441 441 return _unpack('>B', data[0:1])[0]
442 442
443 443 @util.nogc
444 444 def _readmarkers(data, off=None, stop=None):
445 445 """Read and enumerate markers from raw data"""
446 446 diskversion = _readmarkerversion(data)
447 447 if not off:
448 448 off = 1 # skip 1 byte version number
449 449 if stop is None:
450 450 stop = len(data)
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off, stop)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 def _addprecursors(*args, **kwargs):
474 474 msg = ("'obsolete._addprecursors' is deprecated, "
475 475 "use 'obsolete._addpredecessors'")
476 476 util.nouideprecwarn(msg, '4.4')
477 477
478 478 return _addpredecessors(*args, **kwargs)
479 479
480 480 @util.nogc
481 481 def _addpredecessors(predecessors, markers):
482 482 for mark in markers:
483 483 for suc in mark[1]:
484 484 predecessors.setdefault(suc, set()).add(mark)
485 485
486 486 @util.nogc
487 487 def _addchildren(children, markers):
488 488 for mark in markers:
489 489 parents = mark[5]
490 490 if parents is not None:
491 491 for p in parents:
492 492 children.setdefault(p, set()).add(mark)
493 493
494 494 def _checkinvalidmarkers(markers):
495 495 """search for marker with invalid data and raise error if needed
496 496
497 497 Exist as a separated function to allow the evolve extension for a more
498 498 subtle handling.
499 499 """
500 500 for mark in markers:
501 501 if node.nullid in mark[1]:
502 502 raise error.Abort(_('bad obsolescence marker detected: '
503 503 'invalid successors nullid'))
504 504
505 505 class obsstore(object):
506 506 """Store obsolete markers
507 507
508 508 Markers can be accessed with two mappings:
509 509 - precursors[x] -> set(markers on precursors edges of x)
510 510 - successors[x] -> set(markers on successors edges of x)
511 511 - children[x] -> set(markers on precursors edges of children(x)
512 512 """
513 513
514 514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 515 # prec: nodeid, precursor changesets
516 516 # succs: tuple of nodeid, successor changesets (0-N length)
517 517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 518 # meta: binary blob, encoded metadata dictionary
519 519 # date: (float, int) tuple, date of marker creation
520 520 # parents: (tuple of nodeid) or None, parents of precursors
521 521 # None is used when no data has been recorded
522 522
523 523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 524 # caches for various obsolescence related cache
525 525 self.caches = {}
526 526 self.svfs = svfs
527 527 self._defaultformat = defaultformat
528 528 self._readonly = readonly
529 529
530 530 def __iter__(self):
531 531 return iter(self._all)
532 532
533 533 def __len__(self):
534 534 return len(self._all)
535 535
536 536 def __nonzero__(self):
537 537 if not self._cached('_all'):
538 538 try:
539 539 return self.svfs.stat('obsstore').st_size > 1
540 540 except OSError as inst:
541 541 if inst.errno != errno.ENOENT:
542 542 raise
543 543 # just build an empty _all list if no obsstore exists, which
544 544 # avoids further stat() syscalls
545 545 pass
546 546 return bool(self._all)
547 547
548 548 __bool__ = __nonzero__
549 549
550 550 @property
551 551 def readonly(self):
552 552 """True if marker creation is disabled
553 553
554 554 Remove me in the future when obsolete marker is always on."""
555 555 return self._readonly
556 556
557 557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 558 date=None, metadata=None, ui=None):
559 559 """obsolete: add a new obsolete marker
560 560
561 561 * ensuring it is hashable
562 562 * check mandatory metadata
563 563 * encode metadata
564 564
565 565 If you are a human writing code creating marker you want to use the
566 566 `createmarkers` function in this module instead.
567 567
568 568 return True if a new marker have been added, False if the markers
569 569 already existed (no op).
570 570 """
571 571 if metadata is None:
572 572 metadata = {}
573 573 if date is None:
574 574 if 'date' in metadata:
575 575 # as a courtesy for out-of-tree extensions
576 576 date = util.parsedate(metadata.pop('date'))
577 577 elif ui is not None:
578 578 date = ui.configdate('devel', 'default-date')
579 579 if date is None:
580 580 date = util.makedate()
581 581 else:
582 582 date = util.makedate()
583 583 if len(prec) != 20:
584 584 raise ValueError(prec)
585 585 for succ in succs:
586 586 if len(succ) != 20:
587 587 raise ValueError(succ)
588 588 if prec in succs:
589 589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590 590
591 591 metadata = tuple(sorted(metadata.iteritems()))
592 592
593 593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 594 return bool(self.add(transaction, [marker]))
595 595
596 596 def add(self, transaction, markers):
597 597 """Add new markers to the store
598 598
599 599 Take care of filtering duplicate.
600 600 Return the number of new marker."""
601 601 if self._readonly:
602 602 raise error.Abort(_('creating obsolete markers is not enabled on '
603 603 'this repo'))
604 604 known = set()
605 605 getsuccessors = self.successors.get
606 606 new = []
607 607 for m in markers:
608 608 if m not in getsuccessors(m[0], ()) and m not in known:
609 609 known.add(m)
610 610 new.append(m)
611 611 if new:
612 612 f = self.svfs('obsstore', 'ab')
613 613 try:
614 614 offset = f.tell()
615 615 transaction.add('obsstore', offset)
616 616 # offset == 0: new file - add the version header
617 617 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 618 f.write(data)
619 619 finally:
620 620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 621 # call 'filecacheentry.refresh()' here
622 622 f.close()
623 623 addedmarkers = transaction.changes.get('obsmarkers')
624 624 if addedmarkers is not None:
625 625 addedmarkers.update(new)
626 626 self._addmarkers(new, data)
627 627 # new marker *may* have changed several set. invalidate the cache.
628 628 self.caches.clear()
629 629 # records the number of new markers for the transaction hooks
630 630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
632 632 return len(new)
633 633
634 634 def mergemarkers(self, transaction, data):
635 635 """merge a binary stream of markers inside the obsstore
636 636
637 637 Returns the number of new markers added."""
638 638 version, markers = _readmarkers(data)
639 639 return self.add(transaction, markers)
640 640
641 641 @propertycache
642 642 def _data(self):
643 643 return self.svfs.tryread('obsstore')
644 644
645 645 @propertycache
646 646 def _version(self):
647 647 if len(self._data) >= 1:
648 648 return _readmarkerversion(self._data)
649 649 else:
650 650 return self._defaultformat
651 651
652 652 @propertycache
653 653 def _all(self):
654 654 data = self._data
655 655 if not data:
656 656 return []
657 657 self._version, markers = _readmarkers(data)
658 658 markers = list(markers)
659 659 _checkinvalidmarkers(markers)
660 660 return markers
661 661
662 662 @propertycache
663 663 def successors(self):
664 664 successors = {}
665 665 _addsuccessors(successors, self._all)
666 666 return successors
667 667
668 @property
669 def precursors(self):
670 msg = ("'obsstore.precursors' is deprecated, "
671 "use 'obsstore.predecessors'")
672 util.nouideprecwarn(msg, '4.4')
673
674 return self.predecessors
675
668 676 @propertycache
669 def precursors(self):
677 def predecessors(self):
670 678 predecessors = {}
671 679 _addpredecessors(predecessors, self._all)
672 680 return predecessors
673 681
674 682 @propertycache
675 683 def children(self):
676 684 children = {}
677 685 _addchildren(children, self._all)
678 686 return children
679 687
680 688 def _cached(self, attr):
681 689 return attr in self.__dict__
682 690
683 691 def _addmarkers(self, markers, rawdata):
684 692 markers = list(markers) # to allow repeated iteration
685 693 self._data = self._data + rawdata
686 694 self._all.extend(markers)
687 695 if self._cached('successors'):
688 696 _addsuccessors(self.successors, markers)
689 if self._cached('precursors'):
690 _addpredecessors(self.precursors, markers)
697 if self._cached('predecessors'):
698 _addpredecessors(self.predecessors, markers)
691 699 if self._cached('children'):
692 700 _addchildren(self.children, markers)
693 701 _checkinvalidmarkers(markers)
694 702
695 703 def relevantmarkers(self, nodes):
696 704 """return a set of all obsolescence markers relevant to a set of nodes.
697 705
698 706 "relevant" to a set of nodes mean:
699 707
700 708 - marker that use this changeset as successor
701 709 - prune marker of direct children on this changeset
702 710 - recursive application of the two rules on precursors of these markers
703 711
704 712 It is a set so you cannot rely on order."""
705 713
706 714 pendingnodes = set(nodes)
707 715 seenmarkers = set()
708 716 seennodes = set(pendingnodes)
709 precursorsmarkers = self.precursors
717 precursorsmarkers = self.predecessors
710 718 succsmarkers = self.successors
711 719 children = self.children
712 720 while pendingnodes:
713 721 direct = set()
714 722 for current in pendingnodes:
715 723 direct.update(precursorsmarkers.get(current, ()))
716 724 pruned = [m for m in children.get(current, ()) if not m[1]]
717 725 direct.update(pruned)
718 726 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
719 727 direct.update(pruned)
720 728 direct -= seenmarkers
721 729 pendingnodes = set([m[0] for m in direct])
722 730 seenmarkers |= direct
723 731 pendingnodes -= seennodes
724 732 seennodes |= pendingnodes
725 733 return seenmarkers
726 734
727 735 def makestore(ui, repo):
728 736 """Create an obsstore instance from a repo."""
729 737 # read default format for new obsstore.
730 738 # developer config: format.obsstore-version
731 739 defaultformat = ui.configint('format', 'obsstore-version')
732 740 # rely on obsstore class default when possible.
733 741 kwargs = {}
734 742 if defaultformat is not None:
735 743 kwargs['defaultformat'] = defaultformat
736 744 readonly = not isenabled(repo, createmarkersopt)
737 745 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
738 746 if store and readonly:
739 747 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
740 748 % len(list(store)))
741 749 return store
742 750
743 751 def commonversion(versions):
744 752 """Return the newest version listed in both versions and our local formats.
745 753
746 754 Returns None if no common version exists.
747 755 """
748 756 versions.sort(reverse=True)
749 757 # search for highest version known on both side
750 758 for v in versions:
751 759 if v in formats:
752 760 return v
753 761 return None
754 762
755 763 # arbitrary picked to fit into 8K limit from HTTP server
756 764 # you have to take in account:
757 765 # - the version header
758 766 # - the base85 encoding
759 767 _maxpayload = 5300
760 768
761 769 def _pushkeyescape(markers):
762 770 """encode markers into a dict suitable for pushkey exchange
763 771
764 772 - binary data is base85 encoded
765 773 - split in chunks smaller than 5300 bytes"""
766 774 keys = {}
767 775 parts = []
768 776 currentlen = _maxpayload * 2 # ensure we create a new part
769 777 for marker in markers:
770 778 nextdata = _fm0encodeonemarker(marker)
771 779 if (len(nextdata) + currentlen > _maxpayload):
772 780 currentpart = []
773 781 currentlen = 0
774 782 parts.append(currentpart)
775 783 currentpart.append(nextdata)
776 784 currentlen += len(nextdata)
777 785 for idx, part in enumerate(reversed(parts)):
778 786 data = ''.join([_pack('>B', _fm0version)] + part)
779 787 keys['dump%i' % idx] = util.b85encode(data)
780 788 return keys
781 789
782 790 def listmarkers(repo):
783 791 """List markers over pushkey"""
784 792 if not repo.obsstore:
785 793 return {}
786 794 return _pushkeyescape(sorted(repo.obsstore))
787 795
788 796 def pushmarker(repo, key, old, new):
789 797 """Push markers over pushkey"""
790 798 if not key.startswith('dump'):
791 799 repo.ui.warn(_('unknown key: %r') % key)
792 800 return False
793 801 if old:
794 802 repo.ui.warn(_('unexpected old value for %r') % key)
795 803 return False
796 804 data = util.b85decode(new)
797 805 lock = repo.lock()
798 806 try:
799 807 tr = repo.transaction('pushkey: obsolete markers')
800 808 try:
801 809 repo.obsstore.mergemarkers(tr, data)
802 810 repo.invalidatevolatilesets()
803 811 tr.close()
804 812 return True
805 813 finally:
806 814 tr.release()
807 815 finally:
808 816 lock.release()
809 817
810 818 # keep compatibility for the 4.3 cycle
811 819 def allprecursors(obsstore, nodes, ignoreflags=0):
812 820 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
813 821 util.nouideprecwarn(movemsg, '4.3')
814 822 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
815 823
816 824 def allsuccessors(obsstore, nodes, ignoreflags=0):
817 825 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
818 826 util.nouideprecwarn(movemsg, '4.3')
819 827 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
820 828
821 829 def marker(repo, data):
822 830 movemsg = 'obsolete.marker moved to obsutil.marker'
823 831 repo.ui.deprecwarn(movemsg, '4.3')
824 832 return obsutil.marker(repo, data)
825 833
826 834 def getmarkers(repo, nodes=None, exclusive=False):
827 835 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
828 836 repo.ui.deprecwarn(movemsg, '4.3')
829 837 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
830 838
831 839 def exclusivemarkers(repo, nodes):
832 840 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
833 841 repo.ui.deprecwarn(movemsg, '4.3')
834 842 return obsutil.exclusivemarkers(repo, nodes)
835 843
836 844 def foreground(repo, nodes):
837 845 movemsg = 'obsolete.foreground moved to obsutil.foreground'
838 846 repo.ui.deprecwarn(movemsg, '4.3')
839 847 return obsutil.foreground(repo, nodes)
840 848
841 849 def successorssets(repo, initialnode, cache=None):
842 850 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
843 851 repo.ui.deprecwarn(movemsg, '4.3')
844 852 return obsutil.successorssets(repo, initialnode, cache=cache)
845 853
846 854 # mapping of 'set-name' -> <function to compute this set>
847 855 cachefuncs = {}
848 856 def cachefor(name):
849 857 """Decorator to register a function as computing the cache for a set"""
850 858 def decorator(func):
851 859 if name in cachefuncs:
852 860 msg = "duplicated registration for volatileset '%s' (existing: %r)"
853 861 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
854 862 cachefuncs[name] = func
855 863 return func
856 864 return decorator
857 865
858 866 def getrevs(repo, name):
859 867 """Return the set of revision that belong to the <name> set
860 868
861 869 Such access may compute the set and cache it for future use"""
862 870 repo = repo.unfiltered()
863 871 if not repo.obsstore:
864 872 return frozenset()
865 873 if name not in repo.obsstore.caches:
866 874 repo.obsstore.caches[name] = cachefuncs[name](repo)
867 875 return repo.obsstore.caches[name]
868 876
869 877 # To be simple we need to invalidate obsolescence cache when:
870 878 #
871 879 # - new changeset is added:
872 880 # - public phase is changed
873 881 # - obsolescence marker are added
874 882 # - strip is used a repo
875 883 def clearobscaches(repo):
876 884 """Remove all obsolescence related cache from a repo
877 885
878 886 This remove all cache in obsstore is the obsstore already exist on the
879 887 repo.
880 888
881 889 (We could be smarter here given the exact event that trigger the cache
882 890 clearing)"""
883 891 # only clear cache is there is obsstore data in this repo
884 892 if 'obsstore' in repo._filecache:
885 893 repo.obsstore.caches.clear()
886 894
887 895 def _mutablerevs(repo):
888 896 """the set of mutable revision in the repository"""
889 897 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
890 898
891 899 @cachefor('obsolete')
892 900 def _computeobsoleteset(repo):
893 901 """the set of obsolete revisions"""
894 902 getnode = repo.changelog.node
895 903 notpublic = _mutablerevs(repo)
896 904 isobs = repo.obsstore.successors.__contains__
897 905 obs = set(r for r in notpublic if isobs(getnode(r)))
898 906 return obs
899 907
900 908 @cachefor('unstable')
901 909 def _computeunstableset(repo):
902 910 """the set of non obsolete revisions with obsolete parents"""
903 911 pfunc = repo.changelog.parentrevs
904 912 mutable = _mutablerevs(repo)
905 913 obsolete = getrevs(repo, 'obsolete')
906 914 others = mutable - obsolete
907 915 unstable = set()
908 916 for r in sorted(others):
909 917 # A rev is unstable if one of its parent is obsolete or unstable
910 918 # this works since we traverse following growing rev order
911 919 for p in pfunc(r):
912 920 if p in obsolete or p in unstable:
913 921 unstable.add(r)
914 922 break
915 923 return unstable
916 924
917 925 @cachefor('suspended')
918 926 def _computesuspendedset(repo):
919 927 """the set of obsolete parents with non obsolete descendants"""
920 928 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
921 929 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
922 930
923 931 @cachefor('extinct')
924 932 def _computeextinctset(repo):
925 933 """the set of obsolete parents without non obsolete descendants"""
926 934 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
927 935
928 936
929 937 @cachefor('bumped')
930 938 def _computebumpedset(repo):
931 939 """the set of revs trying to obsolete public revisions"""
932 940 bumped = set()
933 941 # util function (avoid attribute lookup in the loop)
934 942 phase = repo._phasecache.phase # would be faster to grab the full list
935 943 public = phases.public
936 944 cl = repo.changelog
937 945 torev = cl.nodemap.get
938 946 for ctx in repo.set('(not public()) and (not obsolete())'):
939 947 rev = ctx.rev()
940 948 # We only evaluate mutable, non-obsolete revision
941 949 node = ctx.node()
942 950 # (future) A cache of precursors may worth if split is very common
943 951 for pnode in obsutil.allprecursors(repo.obsstore, [node],
944 952 ignoreflags=bumpedfix):
945 953 prev = torev(pnode) # unfiltered! but so is phasecache
946 954 if (prev is not None) and (phase(repo, prev) <= public):
947 955 # we have a public precursor
948 956 bumped.add(rev)
949 957 break # Next draft!
950 958 return bumped
951 959
952 960 @cachefor('divergent')
953 961 def _computedivergentset(repo):
954 962 """the set of rev that compete to be the final successors of some revision.
955 963 """
956 964 divergent = set()
957 965 obsstore = repo.obsstore
958 966 newermap = {}
959 967 for ctx in repo.set('(not public()) - obsolete()'):
960 mark = obsstore.precursors.get(ctx.node(), ())
968 mark = obsstore.predecessors.get(ctx.node(), ())
961 969 toprocess = set(mark)
962 970 seen = set()
963 971 while toprocess:
964 972 prec = toprocess.pop()[0]
965 973 if prec in seen:
966 974 continue # emergency cycle hanging prevention
967 975 seen.add(prec)
968 976 if prec not in newermap:
969 977 obsutil.successorssets(repo, prec, cache=newermap)
970 978 newer = [n for n in newermap[prec] if n]
971 979 if len(newer) > 1:
972 980 divergent.add(ctx.rev())
973 981 break
974 toprocess.update(obsstore.precursors.get(prec, ()))
982 toprocess.update(obsstore.predecessors.get(prec, ()))
975 983 return divergent
976 984
977 985
978 986 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
979 987 operation=None):
980 988 """Add obsolete markers between changesets in a repo
981 989
982 990 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
983 991 tuple. `old` and `news` are changectx. metadata is an optional dictionary
984 992 containing metadata for this marker only. It is merged with the global
985 993 metadata specified through the `metadata` argument of this function,
986 994
987 995 Trying to obsolete a public changeset will raise an exception.
988 996
989 997 Current user and date are used except if specified otherwise in the
990 998 metadata attribute.
991 999
992 1000 This function operates within a transaction of its own, but does
993 1001 not take any lock on the repo.
994 1002 """
995 1003 # prepare metadata
996 1004 if metadata is None:
997 1005 metadata = {}
998 1006 if 'user' not in metadata:
999 1007 metadata['user'] = repo.ui.username()
1000 1008 useoperation = repo.ui.configbool('experimental',
1001 1009 'evolution.track-operation')
1002 1010 if useoperation and operation:
1003 1011 metadata['operation'] = operation
1004 1012 tr = repo.transaction('add-obsolescence-marker')
1005 1013 try:
1006 1014 markerargs = []
1007 1015 for rel in relations:
1008 1016 prec = rel[0]
1009 1017 sucs = rel[1]
1010 1018 localmetadata = metadata.copy()
1011 1019 if 2 < len(rel):
1012 1020 localmetadata.update(rel[2])
1013 1021
1014 1022 if not prec.mutable():
1015 1023 raise error.Abort(_("cannot obsolete public changeset: %s")
1016 1024 % prec,
1017 1025 hint="see 'hg help phases' for details")
1018 1026 nprec = prec.node()
1019 1027 nsucs = tuple(s.node() for s in sucs)
1020 1028 npare = None
1021 1029 if not nsucs:
1022 1030 npare = tuple(p.node() for p in prec.parents())
1023 1031 if nprec in nsucs:
1024 1032 raise error.Abort(_("changeset %s cannot obsolete itself")
1025 1033 % prec)
1026 1034
1027 1035 # Creating the marker causes the hidden cache to become invalid,
1028 1036 # which causes recomputation when we ask for prec.parents() above.
1029 1037 # Resulting in n^2 behavior. So let's prepare all of the args
1030 1038 # first, then create the markers.
1031 1039 markerargs.append((nprec, nsucs, npare, localmetadata))
1032 1040
1033 1041 for args in markerargs:
1034 1042 nprec, nsucs, npare, localmetadata = args
1035 1043 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1036 1044 date=date, metadata=localmetadata,
1037 1045 ui=repo.ui)
1038 1046 repo.filteredrevcache.clear()
1039 1047 tr.close()
1040 1048 finally:
1041 1049 tr.release()
@@ -1,544 +1,544 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 phases,
12 12 util
13 13 )
14 14
15 15 class marker(object):
16 16 """Wrap obsolete marker raw data"""
17 17
18 18 def __init__(self, repo, data):
19 19 # the repo argument will be used to create changectx in later version
20 20 self._repo = repo
21 21 self._data = data
22 22 self._decodedmeta = None
23 23
24 24 def __hash__(self):
25 25 return hash(self._data)
26 26
27 27 def __eq__(self, other):
28 28 if type(other) != type(self):
29 29 return False
30 30 return self._data == other._data
31 31
32 32 def precnode(self):
33 33 msg = ("'marker.precnode' is deprecated, "
34 34 "use 'marker.precnode'")
35 35 util.nouideprecwarn(msg, '4.4')
36 36 return self.prednode()
37 37
38 38 def prednode(self):
39 39 """Predecessor changeset node identifier"""
40 40 return self._data[0]
41 41
42 42 def succnodes(self):
43 43 """List of successor changesets node identifiers"""
44 44 return self._data[1]
45 45
46 46 def parentnodes(self):
47 47 """Parents of the predecessors (None if not recorded)"""
48 48 return self._data[5]
49 49
50 50 def metadata(self):
51 51 """Decoded metadata dictionary"""
52 52 return dict(self._data[3])
53 53
54 54 def date(self):
55 55 """Creation date as (unixtime, offset)"""
56 56 return self._data[4]
57 57
58 58 def flags(self):
59 59 """The flags field of the marker"""
60 60 return self._data[2]
61 61
62 62 def getmarkers(repo, nodes=None, exclusive=False):
63 63 """returns markers known in a repository
64 64
65 65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 66 returned"""
67 67 if nodes is None:
68 68 rawmarkers = repo.obsstore
69 69 elif exclusive:
70 70 rawmarkers = exclusivemarkers(repo, nodes)
71 71 else:
72 72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73 73
74 74 for markerdata in rawmarkers:
75 75 yield marker(repo, markerdata)
76 76
77 77 def closestpredecessors(repo, nodeid):
78 78 """yield the list of next predecessors pointing on visible changectx nodes
79 79
80 80 This function respect the repoview filtering, filtered revision will be
81 81 considered missing.
82 82 """
83 83
84 precursors = repo.obsstore.precursors
84 precursors = repo.obsstore.predecessors
85 85 stack = [nodeid]
86 86 seen = set(stack)
87 87
88 88 while stack:
89 89 current = stack.pop()
90 90 currentpreccs = precursors.get(current, ())
91 91
92 92 for prec in currentpreccs:
93 93 precnodeid = prec[0]
94 94
95 95 # Basic cycle protection
96 96 if precnodeid in seen:
97 97 continue
98 98 seen.add(precnodeid)
99 99
100 100 if precnodeid in repo:
101 101 yield precnodeid
102 102 else:
103 103 stack.append(precnodeid)
104 104
105 105 def allprecursors(obsstore, nodes, ignoreflags=0):
106 106 """Yield node for every precursors of <nodes>.
107 107
108 108 Some precursors may be unknown locally.
109 109
110 110 This is a linear yield unsuited to detecting folded changesets. It includes
111 111 initial nodes too."""
112 112
113 113 remaining = set(nodes)
114 114 seen = set(remaining)
115 115 while remaining:
116 116 current = remaining.pop()
117 117 yield current
118 for mark in obsstore.precursors.get(current, ()):
118 for mark in obsstore.predecessors.get(current, ()):
119 119 # ignore marker flagged with specified flag
120 120 if mark[2] & ignoreflags:
121 121 continue
122 122 suc = mark[0]
123 123 if suc not in seen:
124 124 seen.add(suc)
125 125 remaining.add(suc)
126 126
127 127 def allsuccessors(obsstore, nodes, ignoreflags=0):
128 128 """Yield node for every successor of <nodes>.
129 129
130 130 Some successors may be unknown locally.
131 131
132 132 This is a linear yield unsuited to detecting split changesets. It includes
133 133 initial nodes too."""
134 134 remaining = set(nodes)
135 135 seen = set(remaining)
136 136 while remaining:
137 137 current = remaining.pop()
138 138 yield current
139 139 for mark in obsstore.successors.get(current, ()):
140 140 # ignore marker flagged with specified flag
141 141 if mark[2] & ignoreflags:
142 142 continue
143 143 for suc in mark[1]:
144 144 if suc not in seen:
145 145 seen.add(suc)
146 146 remaining.add(suc)
147 147
148 148 def _filterprunes(markers):
149 149 """return a set with no prune markers"""
150 150 return set(m for m in markers if m[1])
151 151
152 152 def exclusivemarkers(repo, nodes):
153 153 """set of markers relevant to "nodes" but no other locally-known nodes
154 154
155 155 This function compute the set of markers "exclusive" to a locally-known
156 156 node. This means we walk the markers starting from <nodes> until we reach a
157 157 locally-known precursors outside of <nodes>. Element of <nodes> with
158 158 locally-known successors outside of <nodes> are ignored (since their
159 159 precursors markers are also relevant to these successors).
160 160
161 161 For example:
162 162
163 163 # (A0 rewritten as A1)
164 164 #
165 165 # A0 <-1- A1 # Marker "1" is exclusive to A1
166 166
167 167 or
168 168
169 169 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
170 170 #
171 171 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
172 172
173 173 or
174 174
175 175 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
176 176 #
177 177 # <-2- A1 # Marker "2" is exclusive to A0,A1
178 178 # /
179 179 # <-1- A0
180 180 # \
181 181 # <-3- A2 # Marker "3" is exclusive to A0,A2
182 182 #
183 183 # in addition:
184 184 #
185 185 # Markers "2,3" are exclusive to A1,A2
186 186 # Markers "1,2,3" are exclusive to A0,A1,A2
187 187
188 188 See test/test-obsolete-bundle-strip.t for more examples.
189 189
190 190 An example usage is strip. When stripping a changeset, we also want to
191 191 strip the markers exclusive to this changeset. Otherwise we would have
192 192 "dangling"" obsolescence markers from its precursors: Obsolescence markers
193 193 marking a node as obsolete without any successors available locally.
194 194
195 195 As for relevant markers, the prune markers for children will be followed.
196 196 Of course, they will only be followed if the pruned children is
197 197 locally-known. Since the prune markers are relevant to the pruned node.
198 198 However, while prune markers are considered relevant to the parent of the
199 199 pruned changesets, prune markers for locally-known changeset (with no
200 200 successors) are considered exclusive to the pruned nodes. This allows
201 201 to strip the prune markers (with the rest of the exclusive chain) alongside
202 202 the pruned changesets.
203 203 """
204 204 # running on a filtered repository would be dangerous as markers could be
205 205 # reported as exclusive when they are relevant for other filtered nodes.
206 206 unfi = repo.unfiltered()
207 207
208 208 # shortcut to various useful item
209 209 nm = unfi.changelog.nodemap
210 precursorsmarkers = unfi.obsstore.precursors
210 precursorsmarkers = unfi.obsstore.predecessors
211 211 successormarkers = unfi.obsstore.successors
212 212 childrenmarkers = unfi.obsstore.children
213 213
214 214 # exclusive markers (return of the function)
215 215 exclmarkers = set()
216 216 # we need fast membership testing
217 217 nodes = set(nodes)
218 218 # looking for head in the obshistory
219 219 #
220 220 # XXX we are ignoring all issues in regard with cycle for now.
221 221 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
222 222 stack.sort()
223 223 # nodes already stacked
224 224 seennodes = set(stack)
225 225 while stack:
226 226 current = stack.pop()
227 227 # fetch precursors markers
228 228 markers = list(precursorsmarkers.get(current, ()))
229 229 # extend the list with prune markers
230 230 for mark in successormarkers.get(current, ()):
231 231 if not mark[1]:
232 232 markers.append(mark)
233 233 # and markers from children (looking for prune)
234 234 for mark in childrenmarkers.get(current, ()):
235 235 if not mark[1]:
236 236 markers.append(mark)
237 237 # traverse the markers
238 238 for mark in markers:
239 239 if mark in exclmarkers:
240 240 # markers already selected
241 241 continue
242 242
243 243 # If the markers is about the current node, select it
244 244 #
245 245 # (this delay the addition of markers from children)
246 246 if mark[1] or mark[0] == current:
247 247 exclmarkers.add(mark)
248 248
249 249 # should we keep traversing through the precursors?
250 250 prec = mark[0]
251 251
252 252 # nodes in the stack or already processed
253 253 if prec in seennodes:
254 254 continue
255 255
256 256 # is this a locally known node ?
257 257 known = prec in nm
258 258 # if locally-known and not in the <nodes> set the traversal
259 259 # stop here.
260 260 if known and prec not in nodes:
261 261 continue
262 262
263 263 # do not keep going if there are unselected markers pointing to this
264 264 # nodes. If we end up traversing these unselected markers later the
265 265 # node will be taken care of at that point.
266 266 precmarkers = _filterprunes(successormarkers.get(prec))
267 267 if precmarkers.issubset(exclmarkers):
268 268 seennodes.add(prec)
269 269 stack.append(prec)
270 270
271 271 return exclmarkers
272 272
273 273 def foreground(repo, nodes):
274 274 """return all nodes in the "foreground" of other node
275 275
276 276 The foreground of a revision is anything reachable using parent -> children
277 277 or precursor -> successor relation. It is very similar to "descendant" but
278 278 augmented with obsolescence information.
279 279
280 280 Beware that possible obsolescence cycle may result if complex situation.
281 281 """
282 282 repo = repo.unfiltered()
283 283 foreground = set(repo.set('%ln::', nodes))
284 284 if repo.obsstore:
285 285 # We only need this complicated logic if there is obsolescence
286 286 # XXX will probably deserve an optimised revset.
287 287 nm = repo.changelog.nodemap
288 288 plen = -1
289 289 # compute the whole set of successors or descendants
290 290 while len(foreground) != plen:
291 291 plen = len(foreground)
292 292 succs = set(c.node() for c in foreground)
293 293 mutable = [c.node() for c in foreground if c.mutable()]
294 294 succs.update(allsuccessors(repo.obsstore, mutable))
295 295 known = (n for n in succs if n in nm)
296 296 foreground = set(repo.set('%ln::', known))
297 297 return set(c.node() for c in foreground)
298 298
299 299 def getobsoleted(repo, tr):
300 300 """return the set of pre-existing revisions obsoleted by a transaction"""
301 301 torev = repo.unfiltered().changelog.nodemap.get
302 302 phase = repo._phasecache.phase
303 303 succsmarkers = repo.obsstore.successors.get
304 304 public = phases.public
305 305 addedmarkers = tr.changes.get('obsmarkers')
306 306 addedrevs = tr.changes.get('revs')
307 307 seenrevs = set(addedrevs)
308 308 obsoleted = set()
309 309 for mark in addedmarkers:
310 310 node = mark[0]
311 311 rev = torev(node)
312 312 if rev is None or rev in seenrevs:
313 313 continue
314 314 seenrevs.add(rev)
315 315 if phase(repo, rev) == public:
316 316 continue
317 317 if set(succsmarkers(node) or []).issubset(addedmarkers):
318 318 obsoleted.add(rev)
319 319 return obsoleted
320 320
321 321 def successorssets(repo, initialnode, closest=False, cache=None):
322 322 """Return set of all latest successors of initial nodes
323 323
324 324 The successors set of a changeset A are the group of revisions that succeed
325 325 A. It succeeds A as a consistent whole, each revision being only a partial
326 326 replacement. By default, the successors set contains non-obsolete
327 327 changesets only, walking the obsolescence graph until reaching a leaf. If
328 328 'closest' is set to True, closest successors-sets are return (the
329 329 obsolescence walk stops on known changesets).
330 330
331 331 This function returns the full list of successor sets which is why it
332 332 returns a list of tuples and not just a single tuple. Each tuple is a valid
333 333 successors set. Note that (A,) may be a valid successors set for changeset A
334 334 (see below).
335 335
336 336 In most cases, a changeset A will have a single element (e.g. the changeset
337 337 A is replaced by A') in its successors set. Though, it is also common for a
338 338 changeset A to have no elements in its successor set (e.g. the changeset
339 339 has been pruned). Therefore, the returned list of successors sets will be
340 340 [(A',)] or [], respectively.
341 341
342 342 When a changeset A is split into A' and B', however, it will result in a
343 343 successors set containing more than a single element, i.e. [(A',B')].
344 344 Divergent changesets will result in multiple successors sets, i.e. [(A',),
345 345 (A'')].
346 346
347 347 If a changeset A is not obsolete, then it will conceptually have no
348 348 successors set. To distinguish this from a pruned changeset, the successor
349 349 set will contain itself only, i.e. [(A,)].
350 350
351 351 Finally, final successors unknown locally are considered to be pruned
352 352 (pruned: obsoleted without any successors). (Final: successors not affected
353 353 by markers).
354 354
355 355 The 'closest' mode respect the repoview filtering. For example, without
356 356 filter it will stop at the first locally known changeset, with 'visible'
357 357 filter it will stop on visible changesets).
358 358
359 359 The optional `cache` parameter is a dictionary that may contains
360 360 precomputed successors sets. It is meant to reuse the computation of a
361 361 previous call to `successorssets` when multiple calls are made at the same
362 362 time. The cache dictionary is updated in place. The caller is responsible
363 363 for its life span. Code that makes multiple calls to `successorssets`
364 364 *should* use this cache mechanism or risk a performance hit.
365 365
366 366 Since results are different depending of the 'closest' most, the same cache
367 367 cannot be reused for both mode.
368 368 """
369 369
370 370 succmarkers = repo.obsstore.successors
371 371
372 372 # Stack of nodes we search successors sets for
373 373 toproceed = [initialnode]
374 374 # set version of above list for fast loop detection
375 375 # element added to "toproceed" must be added here
376 376 stackedset = set(toproceed)
377 377 if cache is None:
378 378 cache = {}
379 379
380 380 # This while loop is the flattened version of a recursive search for
381 381 # successors sets
382 382 #
383 383 # def successorssets(x):
384 384 # successors = directsuccessors(x)
385 385 # ss = [[]]
386 386 # for succ in directsuccessors(x):
387 387 # # product as in itertools cartesian product
388 388 # ss = product(ss, successorssets(succ))
389 389 # return ss
390 390 #
391 391 # But we can not use plain recursive calls here:
392 392 # - that would blow the python call stack
393 393 # - obsolescence markers may have cycles, we need to handle them.
394 394 #
395 395 # The `toproceed` list act as our call stack. Every node we search
396 396 # successors set for are stacked there.
397 397 #
398 398 # The `stackedset` is set version of this stack used to check if a node is
399 399 # already stacked. This check is used to detect cycles and prevent infinite
400 400 # loop.
401 401 #
402 402 # successors set of all nodes are stored in the `cache` dictionary.
403 403 #
404 404 # After this while loop ends we use the cache to return the successors sets
405 405 # for the node requested by the caller.
406 406 while toproceed:
407 407 # Every iteration tries to compute the successors sets of the topmost
408 408 # node of the stack: CURRENT.
409 409 #
410 410 # There are four possible outcomes:
411 411 #
412 412 # 1) We already know the successors sets of CURRENT:
413 413 # -> mission accomplished, pop it from the stack.
414 414 # 2) Stop the walk:
415 415 # default case: Node is not obsolete
416 416 # closest case: Node is known at this repo filter level
417 417 # -> the node is its own successors sets. Add it to the cache.
418 418 # 3) We do not know successors set of direct successors of CURRENT:
419 419 # -> We add those successors to the stack.
420 420 # 4) We know successors sets of all direct successors of CURRENT:
421 421 # -> We can compute CURRENT successors set and add it to the
422 422 # cache.
423 423 #
424 424 current = toproceed[-1]
425 425
426 426 # case 2 condition is a bit hairy because of closest,
427 427 # we compute it on its own
428 428 case2condition = ((current not in succmarkers)
429 429 or (closest and current != initialnode
430 430 and current in repo))
431 431
432 432 if current in cache:
433 433 # case (1): We already know the successors sets
434 434 stackedset.remove(toproceed.pop())
435 435 elif case2condition:
436 436 # case (2): end of walk.
437 437 if current in repo:
438 438 # We have a valid successors.
439 439 cache[current] = [(current,)]
440 440 else:
441 441 # Final obsolete version is unknown locally.
442 442 # Do not count that as a valid successors
443 443 cache[current] = []
444 444 else:
445 445 # cases (3) and (4)
446 446 #
447 447 # We proceed in two phases. Phase 1 aims to distinguish case (3)
448 448 # from case (4):
449 449 #
450 450 # For each direct successors of CURRENT, we check whether its
451 451 # successors sets are known. If they are not, we stack the
452 452 # unknown node and proceed to the next iteration of the while
453 453 # loop. (case 3)
454 454 #
455 455 # During this step, we may detect obsolescence cycles: a node
456 456 # with unknown successors sets but already in the call stack.
457 457 # In such a situation, we arbitrary set the successors sets of
458 458 # the node to nothing (node pruned) to break the cycle.
459 459 #
460 460 # If no break was encountered we proceed to phase 2.
461 461 #
462 462 # Phase 2 computes successors sets of CURRENT (case 4); see details
463 463 # in phase 2 itself.
464 464 #
465 465 # Note the two levels of iteration in each phase.
466 466 # - The first one handles obsolescence markers using CURRENT as
467 467 # precursor (successors markers of CURRENT).
468 468 #
469 469 # Having multiple entry here means divergence.
470 470 #
471 471 # - The second one handles successors defined in each marker.
472 472 #
473 473 # Having none means pruned node, multiple successors means split,
474 474 # single successors are standard replacement.
475 475 #
476 476 for mark in sorted(succmarkers[current]):
477 477 for suc in mark[1]:
478 478 if suc not in cache:
479 479 if suc in stackedset:
480 480 # cycle breaking
481 481 cache[suc] = []
482 482 else:
483 483 # case (3) If we have not computed successors sets
484 484 # of one of those successors we add it to the
485 485 # `toproceed` stack and stop all work for this
486 486 # iteration.
487 487 toproceed.append(suc)
488 488 stackedset.add(suc)
489 489 break
490 490 else:
491 491 continue
492 492 break
493 493 else:
494 494 # case (4): we know all successors sets of all direct
495 495 # successors
496 496 #
497 497 # Successors set contributed by each marker depends on the
498 498 # successors sets of all its "successors" node.
499 499 #
500 500 # Each different marker is a divergence in the obsolescence
501 501 # history. It contributes successors sets distinct from other
502 502 # markers.
503 503 #
504 504 # Within a marker, a successor may have divergent successors
505 505 # sets. In such a case, the marker will contribute multiple
506 506 # divergent successors sets. If multiple successors have
507 507 # divergent successors sets, a Cartesian product is used.
508 508 #
509 509 # At the end we post-process successors sets to remove
510 510 # duplicated entry and successors set that are strict subset of
511 511 # another one.
512 512 succssets = []
513 513 for mark in sorted(succmarkers[current]):
514 514 # successors sets contributed by this marker
515 515 markss = [[]]
516 516 for suc in mark[1]:
517 517 # cardinal product with previous successors
518 518 productresult = []
519 519 for prefix in markss:
520 520 for suffix in cache[suc]:
521 521 newss = list(prefix)
522 522 for part in suffix:
523 523 # do not duplicated entry in successors set
524 524 # first entry wins.
525 525 if part not in newss:
526 526 newss.append(part)
527 527 productresult.append(newss)
528 528 markss = productresult
529 529 succssets.extend(markss)
530 530 # remove duplicated and subset
531 531 seen = []
532 532 final = []
533 533 candidate = sorted(((set(s), s) for s in succssets if s),
534 534 key=lambda x: len(x[1]), reverse=True)
535 535 for setversion, listversion in candidate:
536 536 for seenset in seen:
537 537 if setversion.issubset(seenset):
538 538 break
539 539 else:
540 540 final.append(listversion)
541 541 seen.append(setversion)
542 542 final.reverse() # put small successors set first
543 543 cache[current] = final
544 544 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now