##// END OF EJS Templates
effectflag: store an empty effect flag for the moment...
Boris Feld -
r34414:014d467f default
parent child Browse files
Show More
@@ -1,1083 +1,1095
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 183 while off < stop:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: predecessor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize(_fm1metapair)
318 318
319 319 def _fm1purereadmarkers(data, off, stop):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 ufixed = struct.Struct(_fm1fixed).unpack
334 334
335 335 while off < stop:
336 336 # read fixed part
337 337 o1 = off + fsize
338 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 339
340 340 if flags & sha2flag:
341 341 # FIXME: prec was read as a SHA1, needs to be amended
342 342
343 343 # read 0 or more successors
344 344 if numsuc == 1:
345 345 o2 = o1 + sha2size
346 346 sucs = (data[o1:o2],)
347 347 else:
348 348 o2 = o1 + sha2size * numsuc
349 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 350
351 351 # read parents
352 352 if numpar == noneflag:
353 353 o3 = o2
354 354 parents = None
355 355 elif numpar == 1:
356 356 o3 = o2 + sha2size
357 357 parents = (data[o2:o3],)
358 358 else:
359 359 o3 = o2 + sha2size * numpar
360 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 361 else:
362 362 # read 0 or more successors
363 363 if numsuc == 1:
364 364 o2 = o1 + sha1size
365 365 sucs = (data[o1:o2],)
366 366 else:
367 367 o2 = o1 + sha1size * numsuc
368 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 369
370 370 # read parents
371 371 if numpar == noneflag:
372 372 o3 = o2
373 373 parents = None
374 374 elif numpar == 1:
375 375 o3 = o2 + sha1size
376 376 parents = (data[o2:o3],)
377 377 else:
378 378 o3 = o2 + sha1size * numpar
379 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 380
381 381 # read metadata
382 382 off = o3 + metasize * nummeta
383 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 384 metadata = []
385 385 for idx in xrange(0, len(metapairsize), 2):
386 386 o1 = off + metapairsize[idx]
387 387 o2 = o1 + metapairsize[idx + 1]
388 388 metadata.append((data[off:o1], data[o1:o2]))
389 389 off = o2
390 390
391 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 392
393 393 def _fm1encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 # determine node size
396 396 _fm1node = _fm1nodesha1
397 397 if flags & usingsha256:
398 398 _fm1node = _fm1nodesha256
399 399 numsuc = len(sucs)
400 400 numextranodes = numsuc
401 401 if parents is None:
402 402 numpar = _fm1parentnone
403 403 else:
404 404 numpar = len(parents)
405 405 numextranodes += numpar
406 406 formatnodes = _fm1node * numextranodes
407 407 formatmeta = _fm1metapair * len(metadata)
408 408 format = _fm1fixed + formatnodes + formatmeta
409 409 # tz is stored in minutes so we divide by 60
410 410 tz = date[1]//60
411 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 412 data.extend(sucs)
413 413 if parents is not None:
414 414 data.extend(parents)
415 415 totalsize = _calcsize(format)
416 416 for key, value in metadata:
417 417 lk = len(key)
418 418 lv = len(value)
419 419 if lk > 255:
420 420 msg = ('obsstore metadata key cannot be longer than 255 bytes'
421 421 ' (key "%s" is %u bytes)') % (key, lk)
422 422 raise error.ProgrammingError(msg)
423 423 if lv > 255:
424 424 msg = ('obsstore metadata value cannot be longer than 255 bytes'
425 425 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
426 426 raise error.ProgrammingError(msg)
427 427 data.append(lk)
428 428 data.append(lv)
429 429 totalsize += lk + lv
430 430 data[0] = totalsize
431 431 data = [_pack(format, *data)]
432 432 for key, value in metadata:
433 433 data.append(key)
434 434 data.append(value)
435 435 return ''.join(data)
436 436
437 437 def _fm1readmarkers(data, off, stop):
438 438 native = getattr(parsers, 'fm1readmarkers', None)
439 439 if not native:
440 440 return _fm1purereadmarkers(data, off, stop)
441 441 return native(data, off, stop)
442 442
443 443 # mapping to read/write various marker formats
444 444 # <version> -> (decoder, encoder)
445 445 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
446 446 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
447 447
448 448 def _readmarkerversion(data):
449 449 return _unpack('>B', data[0:1])[0]
450 450
451 451 @util.nogc
452 452 def _readmarkers(data, off=None, stop=None):
453 453 """Read and enumerate markers from raw data"""
454 454 diskversion = _readmarkerversion(data)
455 455 if not off:
456 456 off = 1 # skip 1 byte version number
457 457 if stop is None:
458 458 stop = len(data)
459 459 if diskversion not in formats:
460 460 msg = _('parsing obsolete marker: unknown version %r') % diskversion
461 461 raise error.UnknownVersion(msg, version=diskversion)
462 462 return diskversion, formats[diskversion][0](data, off, stop)
463 463
464 464 def encodeheader(version=_fm0version):
465 465 return _pack('>B', version)
466 466
467 467 def encodemarkers(markers, addheader=False, version=_fm0version):
468 468 # Kept separate from flushmarkers(), it will be reused for
469 469 # markers exchange.
470 470 encodeone = formats[version][1]
471 471 if addheader:
472 472 yield encodeheader(version)
473 473 for marker in markers:
474 474 yield encodeone(marker)
475 475
476 476 @util.nogc
477 477 def _addsuccessors(successors, markers):
478 478 for mark in markers:
479 479 successors.setdefault(mark[0], set()).add(mark)
480 480
481 481 def _addprecursors(*args, **kwargs):
482 482 msg = ("'obsolete._addprecursors' is deprecated, "
483 483 "use 'obsolete._addpredecessors'")
484 484 util.nouideprecwarn(msg, '4.4')
485 485
486 486 return _addpredecessors(*args, **kwargs)
487 487
488 488 @util.nogc
489 489 def _addpredecessors(predecessors, markers):
490 490 for mark in markers:
491 491 for suc in mark[1]:
492 492 predecessors.setdefault(suc, set()).add(mark)
493 493
494 494 @util.nogc
495 495 def _addchildren(children, markers):
496 496 for mark in markers:
497 497 parents = mark[5]
498 498 if parents is not None:
499 499 for p in parents:
500 500 children.setdefault(p, set()).add(mark)
501 501
502 502 def _checkinvalidmarkers(markers):
503 503 """search for marker with invalid data and raise error if needed
504 504
505 505 Exist as a separated function to allow the evolve extension for a more
506 506 subtle handling.
507 507 """
508 508 for mark in markers:
509 509 if node.nullid in mark[1]:
510 510 raise error.Abort(_('bad obsolescence marker detected: '
511 511 'invalid successors nullid'))
512 512
513 513 class obsstore(object):
514 514 """Store obsolete markers
515 515
516 516 Markers can be accessed with two mappings:
517 517 - predecessors[x] -> set(markers on predecessors edges of x)
518 518 - successors[x] -> set(markers on successors edges of x)
519 519 - children[x] -> set(markers on predecessors edges of children(x)
520 520 """
521 521
522 522 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
523 523 # prec: nodeid, predecessors changesets
524 524 # succs: tuple of nodeid, successor changesets (0-N length)
525 525 # flag: integer, flag field carrying modifier for the markers (see doc)
526 526 # meta: binary blob, encoded metadata dictionary
527 527 # date: (float, int) tuple, date of marker creation
528 528 # parents: (tuple of nodeid) or None, parents of predecessors
529 529 # None is used when no data has been recorded
530 530
531 531 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
532 532 # caches for various obsolescence related cache
533 533 self.caches = {}
534 534 self.svfs = svfs
535 535 self._defaultformat = defaultformat
536 536 self._readonly = readonly
537 537
538 538 def __iter__(self):
539 539 return iter(self._all)
540 540
541 541 def __len__(self):
542 542 return len(self._all)
543 543
544 544 def __nonzero__(self):
545 545 if not self._cached('_all'):
546 546 try:
547 547 return self.svfs.stat('obsstore').st_size > 1
548 548 except OSError as inst:
549 549 if inst.errno != errno.ENOENT:
550 550 raise
551 551 # just build an empty _all list if no obsstore exists, which
552 552 # avoids further stat() syscalls
553 553 return bool(self._all)
554 554
555 555 __bool__ = __nonzero__
556 556
557 557 @property
558 558 def readonly(self):
559 559 """True if marker creation is disabled
560 560
561 561 Remove me in the future when obsolete marker is always on."""
562 562 return self._readonly
563 563
564 564 def create(self, transaction, prec, succs=(), flag=0, parents=None,
565 565 date=None, metadata=None, ui=None):
566 566 """obsolete: add a new obsolete marker
567 567
568 568 * ensuring it is hashable
569 569 * check mandatory metadata
570 570 * encode metadata
571 571
572 572 If you are a human writing code creating marker you want to use the
573 573 `createmarkers` function in this module instead.
574 574
575 575 return True if a new marker have been added, False if the markers
576 576 already existed (no op).
577 577 """
578 578 if metadata is None:
579 579 metadata = {}
580 580 if date is None:
581 581 if 'date' in metadata:
582 582 # as a courtesy for out-of-tree extensions
583 583 date = util.parsedate(metadata.pop('date'))
584 584 elif ui is not None:
585 585 date = ui.configdate('devel', 'default-date')
586 586 if date is None:
587 587 date = util.makedate()
588 588 else:
589 589 date = util.makedate()
590 590 if len(prec) != 20:
591 591 raise ValueError(prec)
592 592 for succ in succs:
593 593 if len(succ) != 20:
594 594 raise ValueError(succ)
595 595 if prec in succs:
596 596 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
597 597
598 598 metadata = tuple(sorted(metadata.iteritems()))
599 599
600 600 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
601 601 return bool(self.add(transaction, [marker]))
602 602
603 603 def add(self, transaction, markers):
604 604 """Add new markers to the store
605 605
606 606 Take care of filtering duplicate.
607 607 Return the number of new marker."""
608 608 if self._readonly:
609 609 raise error.Abort(_('creating obsolete markers is not enabled on '
610 610 'this repo'))
611 611 known = set()
612 612 getsuccessors = self.successors.get
613 613 new = []
614 614 for m in markers:
615 615 if m not in getsuccessors(m[0], ()) and m not in known:
616 616 known.add(m)
617 617 new.append(m)
618 618 if new:
619 619 f = self.svfs('obsstore', 'ab')
620 620 try:
621 621 offset = f.tell()
622 622 transaction.add('obsstore', offset)
623 623 # offset == 0: new file - add the version header
624 624 data = b''.join(encodemarkers(new, offset == 0, self._version))
625 625 f.write(data)
626 626 finally:
627 627 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
628 628 # call 'filecacheentry.refresh()' here
629 629 f.close()
630 630 addedmarkers = transaction.changes.get('obsmarkers')
631 631 if addedmarkers is not None:
632 632 addedmarkers.update(new)
633 633 self._addmarkers(new, data)
634 634 # new marker *may* have changed several set. invalidate the cache.
635 635 self.caches.clear()
636 636 # records the number of new markers for the transaction hooks
637 637 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
638 638 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
639 639 return len(new)
640 640
641 641 def mergemarkers(self, transaction, data):
642 642 """merge a binary stream of markers inside the obsstore
643 643
644 644 Returns the number of new markers added."""
645 645 version, markers = _readmarkers(data)
646 646 return self.add(transaction, markers)
647 647
648 648 @propertycache
649 649 def _data(self):
650 650 return self.svfs.tryread('obsstore')
651 651
652 652 @propertycache
653 653 def _version(self):
654 654 if len(self._data) >= 1:
655 655 return _readmarkerversion(self._data)
656 656 else:
657 657 return self._defaultformat
658 658
659 659 @propertycache
660 660 def _all(self):
661 661 data = self._data
662 662 if not data:
663 663 return []
664 664 self._version, markers = _readmarkers(data)
665 665 markers = list(markers)
666 666 _checkinvalidmarkers(markers)
667 667 return markers
668 668
669 669 @propertycache
670 670 def successors(self):
671 671 successors = {}
672 672 _addsuccessors(successors, self._all)
673 673 return successors
674 674
675 675 @property
676 676 def precursors(self):
677 677 msg = ("'obsstore.precursors' is deprecated, "
678 678 "use 'obsstore.predecessors'")
679 679 util.nouideprecwarn(msg, '4.4')
680 680
681 681 return self.predecessors
682 682
683 683 @propertycache
684 684 def predecessors(self):
685 685 predecessors = {}
686 686 _addpredecessors(predecessors, self._all)
687 687 return predecessors
688 688
689 689 @propertycache
690 690 def children(self):
691 691 children = {}
692 692 _addchildren(children, self._all)
693 693 return children
694 694
695 695 def _cached(self, attr):
696 696 return attr in self.__dict__
697 697
698 698 def _addmarkers(self, markers, rawdata):
699 699 markers = list(markers) # to allow repeated iteration
700 700 self._data = self._data + rawdata
701 701 self._all.extend(markers)
702 702 if self._cached('successors'):
703 703 _addsuccessors(self.successors, markers)
704 704 if self._cached('predecessors'):
705 705 _addpredecessors(self.predecessors, markers)
706 706 if self._cached('children'):
707 707 _addchildren(self.children, markers)
708 708 _checkinvalidmarkers(markers)
709 709
710 710 def relevantmarkers(self, nodes):
711 711 """return a set of all obsolescence markers relevant to a set of nodes.
712 712
713 713 "relevant" to a set of nodes mean:
714 714
715 715 - marker that use this changeset as successor
716 716 - prune marker of direct children on this changeset
717 717 - recursive application of the two rules on predecessors of these
718 718 markers
719 719
720 720 It is a set so you cannot rely on order."""
721 721
722 722 pendingnodes = set(nodes)
723 723 seenmarkers = set()
724 724 seennodes = set(pendingnodes)
725 725 precursorsmarkers = self.predecessors
726 726 succsmarkers = self.successors
727 727 children = self.children
728 728 while pendingnodes:
729 729 direct = set()
730 730 for current in pendingnodes:
731 731 direct.update(precursorsmarkers.get(current, ()))
732 732 pruned = [m for m in children.get(current, ()) if not m[1]]
733 733 direct.update(pruned)
734 734 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
735 735 direct.update(pruned)
736 736 direct -= seenmarkers
737 737 pendingnodes = set([m[0] for m in direct])
738 738 seenmarkers |= direct
739 739 pendingnodes -= seennodes
740 740 seennodes |= pendingnodes
741 741 return seenmarkers
742 742
743 743 def makestore(ui, repo):
744 744 """Create an obsstore instance from a repo."""
745 745 # read default format for new obsstore.
746 746 # developer config: format.obsstore-version
747 747 defaultformat = ui.configint('format', 'obsstore-version')
748 748 # rely on obsstore class default when possible.
749 749 kwargs = {}
750 750 if defaultformat is not None:
751 751 kwargs['defaultformat'] = defaultformat
752 752 readonly = not isenabled(repo, createmarkersopt)
753 753 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
754 754 if store and readonly:
755 755 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
756 756 % len(list(store)))
757 757 return store
758 758
759 759 def commonversion(versions):
760 760 """Return the newest version listed in both versions and our local formats.
761 761
762 762 Returns None if no common version exists.
763 763 """
764 764 versions.sort(reverse=True)
765 765 # search for highest version known on both side
766 766 for v in versions:
767 767 if v in formats:
768 768 return v
769 769 return None
770 770
771 771 # arbitrary picked to fit into 8K limit from HTTP server
772 772 # you have to take in account:
773 773 # - the version header
774 774 # - the base85 encoding
775 775 _maxpayload = 5300
776 776
777 777 def _pushkeyescape(markers):
778 778 """encode markers into a dict suitable for pushkey exchange
779 779
780 780 - binary data is base85 encoded
781 781 - split in chunks smaller than 5300 bytes"""
782 782 keys = {}
783 783 parts = []
784 784 currentlen = _maxpayload * 2 # ensure we create a new part
785 785 for marker in markers:
786 786 nextdata = _fm0encodeonemarker(marker)
787 787 if (len(nextdata) + currentlen > _maxpayload):
788 788 currentpart = []
789 789 currentlen = 0
790 790 parts.append(currentpart)
791 791 currentpart.append(nextdata)
792 792 currentlen += len(nextdata)
793 793 for idx, part in enumerate(reversed(parts)):
794 794 data = ''.join([_pack('>B', _fm0version)] + part)
795 795 keys['dump%i' % idx] = util.b85encode(data)
796 796 return keys
797 797
798 798 def listmarkers(repo):
799 799 """List markers over pushkey"""
800 800 if not repo.obsstore:
801 801 return {}
802 802 return _pushkeyescape(sorted(repo.obsstore))
803 803
804 804 def pushmarker(repo, key, old, new):
805 805 """Push markers over pushkey"""
806 806 if not key.startswith('dump'):
807 807 repo.ui.warn(_('unknown key: %r') % key)
808 808 return False
809 809 if old:
810 810 repo.ui.warn(_('unexpected old value for %r') % key)
811 811 return False
812 812 data = util.b85decode(new)
813 813 lock = repo.lock()
814 814 try:
815 815 tr = repo.transaction('pushkey: obsolete markers')
816 816 try:
817 817 repo.obsstore.mergemarkers(tr, data)
818 818 repo.invalidatevolatilesets()
819 819 tr.close()
820 820 return True
821 821 finally:
822 822 tr.release()
823 823 finally:
824 824 lock.release()
825 825
826 826 # keep compatibility for the 4.3 cycle
827 827 def allprecursors(obsstore, nodes, ignoreflags=0):
828 828 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
829 829 util.nouideprecwarn(movemsg, '4.3')
830 830 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
831 831
832 832 def allsuccessors(obsstore, nodes, ignoreflags=0):
833 833 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
834 834 util.nouideprecwarn(movemsg, '4.3')
835 835 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
836 836
837 837 def marker(repo, data):
838 838 movemsg = 'obsolete.marker moved to obsutil.marker'
839 839 repo.ui.deprecwarn(movemsg, '4.3')
840 840 return obsutil.marker(repo, data)
841 841
842 842 def getmarkers(repo, nodes=None, exclusive=False):
843 843 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
844 844 repo.ui.deprecwarn(movemsg, '4.3')
845 845 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
846 846
847 847 def exclusivemarkers(repo, nodes):
848 848 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
849 849 repo.ui.deprecwarn(movemsg, '4.3')
850 850 return obsutil.exclusivemarkers(repo, nodes)
851 851
852 852 def foreground(repo, nodes):
853 853 movemsg = 'obsolete.foreground moved to obsutil.foreground'
854 854 repo.ui.deprecwarn(movemsg, '4.3')
855 855 return obsutil.foreground(repo, nodes)
856 856
857 857 def successorssets(repo, initialnode, cache=None):
858 858 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
859 859 repo.ui.deprecwarn(movemsg, '4.3')
860 860 return obsutil.successorssets(repo, initialnode, cache=cache)
861 861
862 862 # mapping of 'set-name' -> <function to compute this set>
863 863 cachefuncs = {}
864 864 def cachefor(name):
865 865 """Decorator to register a function as computing the cache for a set"""
866 866 def decorator(func):
867 867 if name in cachefuncs:
868 868 msg = "duplicated registration for volatileset '%s' (existing: %r)"
869 869 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
870 870 cachefuncs[name] = func
871 871 return func
872 872 return decorator
873 873
874 874 def getrevs(repo, name):
875 875 """Return the set of revision that belong to the <name> set
876 876
877 877 Such access may compute the set and cache it for future use"""
878 878 repo = repo.unfiltered()
879 879 if not repo.obsstore:
880 880 return frozenset()
881 881 if name not in repo.obsstore.caches:
882 882 repo.obsstore.caches[name] = cachefuncs[name](repo)
883 883 return repo.obsstore.caches[name]
884 884
885 885 # To be simple we need to invalidate obsolescence cache when:
886 886 #
887 887 # - new changeset is added:
888 888 # - public phase is changed
889 889 # - obsolescence marker are added
890 890 # - strip is used a repo
891 891 def clearobscaches(repo):
892 892 """Remove all obsolescence related cache from a repo
893 893
894 894 This remove all cache in obsstore is the obsstore already exist on the
895 895 repo.
896 896
897 897 (We could be smarter here given the exact event that trigger the cache
898 898 clearing)"""
899 899 # only clear cache is there is obsstore data in this repo
900 900 if 'obsstore' in repo._filecache:
901 901 repo.obsstore.caches.clear()
902 902
903 903 def _mutablerevs(repo):
904 904 """the set of mutable revision in the repository"""
905 905 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
906 906
907 907 @cachefor('obsolete')
908 908 def _computeobsoleteset(repo):
909 909 """the set of obsolete revisions"""
910 910 getnode = repo.changelog.node
911 911 notpublic = _mutablerevs(repo)
912 912 isobs = repo.obsstore.successors.__contains__
913 913 obs = set(r for r in notpublic if isobs(getnode(r)))
914 914 return obs
915 915
916 916 @cachefor('unstable')
917 917 def _computeunstableset(repo):
918 918 msg = ("'unstable' volatile set is deprecated, "
919 919 "use 'orphan'")
920 920 repo.ui.deprecwarn(msg, '4.4')
921 921
922 922 return _computeorphanset(repo)
923 923
924 924 @cachefor('orphan')
925 925 def _computeorphanset(repo):
926 926 """the set of non obsolete revisions with obsolete parents"""
927 927 pfunc = repo.changelog.parentrevs
928 928 mutable = _mutablerevs(repo)
929 929 obsolete = getrevs(repo, 'obsolete')
930 930 others = mutable - obsolete
931 931 unstable = set()
932 932 for r in sorted(others):
933 933 # A rev is unstable if one of its parent is obsolete or unstable
934 934 # this works since we traverse following growing rev order
935 935 for p in pfunc(r):
936 936 if p in obsolete or p in unstable:
937 937 unstable.add(r)
938 938 break
939 939 return unstable
940 940
941 941 @cachefor('suspended')
942 942 def _computesuspendedset(repo):
943 943 """the set of obsolete parents with non obsolete descendants"""
944 944 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
945 945 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
946 946
947 947 @cachefor('extinct')
948 948 def _computeextinctset(repo):
949 949 """the set of obsolete parents without non obsolete descendants"""
950 950 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
951 951
952 952 @cachefor('bumped')
953 953 def _computebumpedset(repo):
954 954 msg = ("'bumped' volatile set is deprecated, "
955 955 "use 'phasedivergent'")
956 956 repo.ui.deprecwarn(msg, '4.4')
957 957
958 958 return _computephasedivergentset(repo)
959 959
960 960 @cachefor('phasedivergent')
961 961 def _computephasedivergentset(repo):
962 962 """the set of revs trying to obsolete public revisions"""
963 963 bumped = set()
964 964 # util function (avoid attribute lookup in the loop)
965 965 phase = repo._phasecache.phase # would be faster to grab the full list
966 966 public = phases.public
967 967 cl = repo.changelog
968 968 torev = cl.nodemap.get
969 969 for ctx in repo.set('(not public()) and (not obsolete())'):
970 970 rev = ctx.rev()
971 971 # We only evaluate mutable, non-obsolete revision
972 972 node = ctx.node()
973 973 # (future) A cache of predecessors may worth if split is very common
974 974 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
975 975 ignoreflags=bumpedfix):
976 976 prev = torev(pnode) # unfiltered! but so is phasecache
977 977 if (prev is not None) and (phase(repo, prev) <= public):
978 978 # we have a public predecessor
979 979 bumped.add(rev)
980 980 break # Next draft!
981 981 return bumped
982 982
983 983 @cachefor('divergent')
984 984 def _computedivergentset(repo):
985 985 msg = ("'divergent' volatile set is deprecated, "
986 986 "use 'contentdivergent'")
987 987 repo.ui.deprecwarn(msg, '4.4')
988 988
989 989 return _computecontentdivergentset(repo)
990 990
991 991 @cachefor('contentdivergent')
992 992 def _computecontentdivergentset(repo):
993 993 """the set of rev that compete to be the final successors of some revision.
994 994 """
995 995 divergent = set()
996 996 obsstore = repo.obsstore
997 997 newermap = {}
998 998 for ctx in repo.set('(not public()) - obsolete()'):
999 999 mark = obsstore.predecessors.get(ctx.node(), ())
1000 1000 toprocess = set(mark)
1001 1001 seen = set()
1002 1002 while toprocess:
1003 1003 prec = toprocess.pop()[0]
1004 1004 if prec in seen:
1005 1005 continue # emergency cycle hanging prevention
1006 1006 seen.add(prec)
1007 1007 if prec not in newermap:
1008 1008 obsutil.successorssets(repo, prec, cache=newermap)
1009 1009 newer = [n for n in newermap[prec] if n]
1010 1010 if len(newer) > 1:
1011 1011 divergent.add(ctx.rev())
1012 1012 break
1013 1013 toprocess.update(obsstore.predecessors.get(prec, ()))
1014 1014 return divergent
1015 1015
1016 1016
1017 1017 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1018 1018 operation=None):
1019 1019 """Add obsolete markers between changesets in a repo
1020 1020
1021 1021 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1022 1022 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1023 1023 containing metadata for this marker only. It is merged with the global
1024 1024 metadata specified through the `metadata` argument of this function,
1025 1025
1026 1026 Trying to obsolete a public changeset will raise an exception.
1027 1027
1028 1028 Current user and date are used except if specified otherwise in the
1029 1029 metadata attribute.
1030 1030
1031 1031 This function operates within a transaction of its own, but does
1032 1032 not take any lock on the repo.
1033 1033 """
1034 1034 # prepare metadata
1035 1035 if metadata is None:
1036 1036 metadata = {}
1037 1037 if 'user' not in metadata:
1038 1038 metadata['user'] = repo.ui.username()
1039 1039
1040 1040 # Operation metadata handling
1041 1041 useoperation = repo.ui.configbool('experimental',
1042 1042 'stabilization.track-operation')
1043 1043 if useoperation and operation:
1044 1044 metadata['operation'] = operation
1045 1045
1046 # Effect flag metadata handling
1047 saveeffectflag = repo.ui.configbool('experimental',
1048 'effect-flags',
1049 False)
1050
1046 1051 tr = repo.transaction('add-obsolescence-marker')
1047 1052 try:
1048 1053 markerargs = []
1049 1054 for rel in relations:
1050 1055 prec = rel[0]
1051 1056 sucs = rel[1]
1052 1057 localmetadata = metadata.copy()
1053 1058 if 2 < len(rel):
1054 1059 localmetadata.update(rel[2])
1055 1060
1056 1061 if not prec.mutable():
1057 1062 raise error.Abort(_("cannot obsolete public changeset: %s")
1058 1063 % prec,
1059 1064 hint="see 'hg help phases' for details")
1060 1065 nprec = prec.node()
1061 1066 nsucs = tuple(s.node() for s in sucs)
1062 1067 npare = None
1063 1068 if not nsucs:
1064 1069 npare = tuple(p.node() for p in prec.parents())
1065 1070 if nprec in nsucs:
1066 1071 raise error.Abort(_("changeset %s cannot obsolete itself")
1067 1072 % prec)
1068 1073
1074 # Effect flag can be different by relation
1075 if saveeffectflag:
1076 # The effect flag is saved in a versioned field name for future
1077 # evolution
1078 effectflag = obsutil.geteffectflag(rel)
1079 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1080
1069 1081 # Creating the marker causes the hidden cache to become invalid,
1070 1082 # which causes recomputation when we ask for prec.parents() above.
1071 1083 # Resulting in n^2 behavior. So let's prepare all of the args
1072 1084 # first, then create the markers.
1073 1085 markerargs.append((nprec, nsucs, npare, localmetadata))
1074 1086
1075 1087 for args in markerargs:
1076 1088 nprec, nsucs, npare, localmetadata = args
1077 1089 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1078 1090 date=date, metadata=localmetadata,
1079 1091 ui=repo.ui)
1080 1092 repo.filteredrevcache.clear()
1081 1093 tr.close()
1082 1094 finally:
1083 1095 tr.release()
@@ -1,657 +1,670
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 phases,
12 12 util
13 13 )
14 14
15 15 class marker(object):
16 16 """Wrap obsolete marker raw data"""
17 17
18 18 def __init__(self, repo, data):
19 19 # the repo argument will be used to create changectx in later version
20 20 self._repo = repo
21 21 self._data = data
22 22 self._decodedmeta = None
23 23
24 24 def __hash__(self):
25 25 return hash(self._data)
26 26
27 27 def __eq__(self, other):
28 28 if type(other) != type(self):
29 29 return False
30 30 return self._data == other._data
31 31
32 32 def precnode(self):
33 33 msg = ("'marker.precnode' is deprecated, "
34 34 "use 'marker.prednode'")
35 35 util.nouideprecwarn(msg, '4.4')
36 36 return self.prednode()
37 37
38 38 def prednode(self):
39 39 """Predecessor changeset node identifier"""
40 40 return self._data[0]
41 41
42 42 def succnodes(self):
43 43 """List of successor changesets node identifiers"""
44 44 return self._data[1]
45 45
46 46 def parentnodes(self):
47 47 """Parents of the predecessors (None if not recorded)"""
48 48 return self._data[5]
49 49
50 50 def metadata(self):
51 51 """Decoded metadata dictionary"""
52 52 return dict(self._data[3])
53 53
54 54 def date(self):
55 55 """Creation date as (unixtime, offset)"""
56 56 return self._data[4]
57 57
58 58 def flags(self):
59 59 """The flags field of the marker"""
60 60 return self._data[2]
61 61
62 62 def getmarkers(repo, nodes=None, exclusive=False):
63 63 """returns markers known in a repository
64 64
65 65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 66 returned"""
67 67 if nodes is None:
68 68 rawmarkers = repo.obsstore
69 69 elif exclusive:
70 70 rawmarkers = exclusivemarkers(repo, nodes)
71 71 else:
72 72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73 73
74 74 for markerdata in rawmarkers:
75 75 yield marker(repo, markerdata)
76 76
77 77 def closestpredecessors(repo, nodeid):
78 78 """yield the list of next predecessors pointing on visible changectx nodes
79 79
80 80 This function respect the repoview filtering, filtered revision will be
81 81 considered missing.
82 82 """
83 83
84 84 precursors = repo.obsstore.predecessors
85 85 stack = [nodeid]
86 86 seen = set(stack)
87 87
88 88 while stack:
89 89 current = stack.pop()
90 90 currentpreccs = precursors.get(current, ())
91 91
92 92 for prec in currentpreccs:
93 93 precnodeid = prec[0]
94 94
95 95 # Basic cycle protection
96 96 if precnodeid in seen:
97 97 continue
98 98 seen.add(precnodeid)
99 99
100 100 if precnodeid in repo:
101 101 yield precnodeid
102 102 else:
103 103 stack.append(precnodeid)
104 104
105 105 def allprecursors(*args, **kwargs):
106 106 """ (DEPRECATED)
107 107 """
108 108 msg = ("'obsutil.allprecursors' is deprecated, "
109 109 "use 'obsutil.allpredecessors'")
110 110 util.nouideprecwarn(msg, '4.4')
111 111
112 112 return allpredecessors(*args, **kwargs)
113 113
114 114 def allpredecessors(obsstore, nodes, ignoreflags=0):
115 115 """Yield node for every precursors of <nodes>.
116 116
117 117 Some precursors may be unknown locally.
118 118
119 119 This is a linear yield unsuited to detecting folded changesets. It includes
120 120 initial nodes too."""
121 121
122 122 remaining = set(nodes)
123 123 seen = set(remaining)
124 124 while remaining:
125 125 current = remaining.pop()
126 126 yield current
127 127 for mark in obsstore.predecessors.get(current, ()):
128 128 # ignore marker flagged with specified flag
129 129 if mark[2] & ignoreflags:
130 130 continue
131 131 suc = mark[0]
132 132 if suc not in seen:
133 133 seen.add(suc)
134 134 remaining.add(suc)
135 135
136 136 def allsuccessors(obsstore, nodes, ignoreflags=0):
137 137 """Yield node for every successor of <nodes>.
138 138
139 139 Some successors may be unknown locally.
140 140
141 141 This is a linear yield unsuited to detecting split changesets. It includes
142 142 initial nodes too."""
143 143 remaining = set(nodes)
144 144 seen = set(remaining)
145 145 while remaining:
146 146 current = remaining.pop()
147 147 yield current
148 148 for mark in obsstore.successors.get(current, ()):
149 149 # ignore marker flagged with specified flag
150 150 if mark[2] & ignoreflags:
151 151 continue
152 152 for suc in mark[1]:
153 153 if suc not in seen:
154 154 seen.add(suc)
155 155 remaining.add(suc)
156 156
157 157 def _filterprunes(markers):
158 158 """return a set with no prune markers"""
159 159 return set(m for m in markers if m[1])
160 160
161 161 def exclusivemarkers(repo, nodes):
162 162 """set of markers relevant to "nodes" but no other locally-known nodes
163 163
164 164 This function compute the set of markers "exclusive" to a locally-known
165 165 node. This means we walk the markers starting from <nodes> until we reach a
166 166 locally-known precursors outside of <nodes>. Element of <nodes> with
167 167 locally-known successors outside of <nodes> are ignored (since their
168 168 precursors markers are also relevant to these successors).
169 169
170 170 For example:
171 171
172 172 # (A0 rewritten as A1)
173 173 #
174 174 # A0 <-1- A1 # Marker "1" is exclusive to A1
175 175
176 176 or
177 177
178 178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
179 179 #
180 180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
181 181
182 182 or
183 183
184 184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
185 185 #
186 186 # <-2- A1 # Marker "2" is exclusive to A0,A1
187 187 # /
188 188 # <-1- A0
189 189 # \
190 190 # <-3- A2 # Marker "3" is exclusive to A0,A2
191 191 #
192 192 # in addition:
193 193 #
194 194 # Markers "2,3" are exclusive to A1,A2
195 195 # Markers "1,2,3" are exclusive to A0,A1,A2
196 196
197 197 See test/test-obsolete-bundle-strip.t for more examples.
198 198
199 199 An example usage is strip. When stripping a changeset, we also want to
200 200 strip the markers exclusive to this changeset. Otherwise we would have
201 201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
202 202 marking a node as obsolete without any successors available locally.
203 203
204 204 As for relevant markers, the prune markers for children will be followed.
205 205 Of course, they will only be followed if the pruned children is
206 206 locally-known. Since the prune markers are relevant to the pruned node.
207 207 However, while prune markers are considered relevant to the parent of the
208 208 pruned changesets, prune markers for locally-known changeset (with no
209 209 successors) are considered exclusive to the pruned nodes. This allows
210 210 to strip the prune markers (with the rest of the exclusive chain) alongside
211 211 the pruned changesets.
212 212 """
213 213 # running on a filtered repository would be dangerous as markers could be
214 214 # reported as exclusive when they are relevant for other filtered nodes.
215 215 unfi = repo.unfiltered()
216 216
217 217 # shortcut to various useful item
218 218 nm = unfi.changelog.nodemap
219 219 precursorsmarkers = unfi.obsstore.predecessors
220 220 successormarkers = unfi.obsstore.successors
221 221 childrenmarkers = unfi.obsstore.children
222 222
223 223 # exclusive markers (return of the function)
224 224 exclmarkers = set()
225 225 # we need fast membership testing
226 226 nodes = set(nodes)
227 227 # looking for head in the obshistory
228 228 #
229 229 # XXX we are ignoring all issues in regard with cycle for now.
230 230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
231 231 stack.sort()
232 232 # nodes already stacked
233 233 seennodes = set(stack)
234 234 while stack:
235 235 current = stack.pop()
236 236 # fetch precursors markers
237 237 markers = list(precursorsmarkers.get(current, ()))
238 238 # extend the list with prune markers
239 239 for mark in successormarkers.get(current, ()):
240 240 if not mark[1]:
241 241 markers.append(mark)
242 242 # and markers from children (looking for prune)
243 243 for mark in childrenmarkers.get(current, ()):
244 244 if not mark[1]:
245 245 markers.append(mark)
246 246 # traverse the markers
247 247 for mark in markers:
248 248 if mark in exclmarkers:
249 249 # markers already selected
250 250 continue
251 251
252 252 # If the markers is about the current node, select it
253 253 #
254 254 # (this delay the addition of markers from children)
255 255 if mark[1] or mark[0] == current:
256 256 exclmarkers.add(mark)
257 257
258 258 # should we keep traversing through the precursors?
259 259 prec = mark[0]
260 260
261 261 # nodes in the stack or already processed
262 262 if prec in seennodes:
263 263 continue
264 264
265 265 # is this a locally known node ?
266 266 known = prec in nm
267 267 # if locally-known and not in the <nodes> set the traversal
268 268 # stop here.
269 269 if known and prec not in nodes:
270 270 continue
271 271
272 272 # do not keep going if there are unselected markers pointing to this
273 273 # nodes. If we end up traversing these unselected markers later the
274 274 # node will be taken care of at that point.
275 275 precmarkers = _filterprunes(successormarkers.get(prec))
276 276 if precmarkers.issubset(exclmarkers):
277 277 seennodes.add(prec)
278 278 stack.append(prec)
279 279
280 280 return exclmarkers
281 281
282 282 def foreground(repo, nodes):
283 283 """return all nodes in the "foreground" of other node
284 284
285 285 The foreground of a revision is anything reachable using parent -> children
286 286 or precursor -> successor relation. It is very similar to "descendant" but
287 287 augmented with obsolescence information.
288 288
289 289 Beware that possible obsolescence cycle may result if complex situation.
290 290 """
291 291 repo = repo.unfiltered()
292 292 foreground = set(repo.set('%ln::', nodes))
293 293 if repo.obsstore:
294 294 # We only need this complicated logic if there is obsolescence
295 295 # XXX will probably deserve an optimised revset.
296 296 nm = repo.changelog.nodemap
297 297 plen = -1
298 298 # compute the whole set of successors or descendants
299 299 while len(foreground) != plen:
300 300 plen = len(foreground)
301 301 succs = set(c.node() for c in foreground)
302 302 mutable = [c.node() for c in foreground if c.mutable()]
303 303 succs.update(allsuccessors(repo.obsstore, mutable))
304 304 known = (n for n in succs if n in nm)
305 305 foreground = set(repo.set('%ln::', known))
306 306 return set(c.node() for c in foreground)
307 307
308 # logic around storing and using effect flags
309 EFFECTFLAGFIELD = "ef1"
310
311 def geteffectflag(relation):
312 """ From an obs-marker relation, compute what changed between the
313 predecessor and the successor.
314 """
315 effects = 0
316
317 source = relation[0]
318
319 return effects
320
308 321 def getobsoleted(repo, tr):
309 322 """return the set of pre-existing revisions obsoleted by a transaction"""
310 323 torev = repo.unfiltered().changelog.nodemap.get
311 324 phase = repo._phasecache.phase
312 325 succsmarkers = repo.obsstore.successors.get
313 326 public = phases.public
314 327 addedmarkers = tr.changes.get('obsmarkers')
315 328 addedrevs = tr.changes.get('revs')
316 329 seenrevs = set(addedrevs)
317 330 obsoleted = set()
318 331 for mark in addedmarkers:
319 332 node = mark[0]
320 333 rev = torev(node)
321 334 if rev is None or rev in seenrevs:
322 335 continue
323 336 seenrevs.add(rev)
324 337 if phase(repo, rev) == public:
325 338 continue
326 339 if set(succsmarkers(node) or []).issubset(addedmarkers):
327 340 obsoleted.add(rev)
328 341 return obsoleted
329 342
330 343 class _succs(list):
331 344 """small class to represent a successors with some metadata about it"""
332 345
333 346 def __init__(self, *args, **kwargs):
334 347 super(_succs, self).__init__(*args, **kwargs)
335 348 self.markers = set()
336 349
337 350 def copy(self):
338 351 new = _succs(self)
339 352 new.markers = self.markers.copy()
340 353 return new
341 354
342 355 @util.propertycache
343 356 def _set(self):
344 357 # immutable
345 358 return set(self)
346 359
347 360 def canmerge(self, other):
348 361 return self._set.issubset(other._set)
349 362
350 363 def successorssets(repo, initialnode, closest=False, cache=None):
351 364 """Return set of all latest successors of initial nodes
352 365
353 366 The successors set of a changeset A are the group of revisions that succeed
354 367 A. It succeeds A as a consistent whole, each revision being only a partial
355 368 replacement. By default, the successors set contains non-obsolete
356 369 changesets only, walking the obsolescence graph until reaching a leaf. If
357 370 'closest' is set to True, closest successors-sets are return (the
358 371 obsolescence walk stops on known changesets).
359 372
360 373 This function returns the full list of successor sets which is why it
361 374 returns a list of tuples and not just a single tuple. Each tuple is a valid
362 375 successors set. Note that (A,) may be a valid successors set for changeset A
363 376 (see below).
364 377
365 378 In most cases, a changeset A will have a single element (e.g. the changeset
366 379 A is replaced by A') in its successors set. Though, it is also common for a
367 380 changeset A to have no elements in its successor set (e.g. the changeset
368 381 has been pruned). Therefore, the returned list of successors sets will be
369 382 [(A',)] or [], respectively.
370 383
371 384 When a changeset A is split into A' and B', however, it will result in a
372 385 successors set containing more than a single element, i.e. [(A',B')].
373 386 Divergent changesets will result in multiple successors sets, i.e. [(A',),
374 387 (A'')].
375 388
376 389 If a changeset A is not obsolete, then it will conceptually have no
377 390 successors set. To distinguish this from a pruned changeset, the successor
378 391 set will contain itself only, i.e. [(A,)].
379 392
380 393 Finally, final successors unknown locally are considered to be pruned
381 394 (pruned: obsoleted without any successors). (Final: successors not affected
382 395 by markers).
383 396
384 397 The 'closest' mode respect the repoview filtering. For example, without
385 398 filter it will stop at the first locally known changeset, with 'visible'
386 399 filter it will stop on visible changesets).
387 400
388 401 The optional `cache` parameter is a dictionary that may contains
389 402 precomputed successors sets. It is meant to reuse the computation of a
390 403 previous call to `successorssets` when multiple calls are made at the same
391 404 time. The cache dictionary is updated in place. The caller is responsible
392 405 for its life span. Code that makes multiple calls to `successorssets`
393 406 *should* use this cache mechanism or risk a performance hit.
394 407
395 408 Since results are different depending of the 'closest' most, the same cache
396 409 cannot be reused for both mode.
397 410 """
398 411
399 412 succmarkers = repo.obsstore.successors
400 413
401 414 # Stack of nodes we search successors sets for
402 415 toproceed = [initialnode]
403 416 # set version of above list for fast loop detection
404 417 # element added to "toproceed" must be added here
405 418 stackedset = set(toproceed)
406 419 if cache is None:
407 420 cache = {}
408 421
409 422 # This while loop is the flattened version of a recursive search for
410 423 # successors sets
411 424 #
412 425 # def successorssets(x):
413 426 # successors = directsuccessors(x)
414 427 # ss = [[]]
415 428 # for succ in directsuccessors(x):
416 429 # # product as in itertools cartesian product
417 430 # ss = product(ss, successorssets(succ))
418 431 # return ss
419 432 #
420 433 # But we can not use plain recursive calls here:
421 434 # - that would blow the python call stack
422 435 # - obsolescence markers may have cycles, we need to handle them.
423 436 #
424 437 # The `toproceed` list act as our call stack. Every node we search
425 438 # successors set for are stacked there.
426 439 #
427 440 # The `stackedset` is set version of this stack used to check if a node is
428 441 # already stacked. This check is used to detect cycles and prevent infinite
429 442 # loop.
430 443 #
431 444 # successors set of all nodes are stored in the `cache` dictionary.
432 445 #
433 446 # After this while loop ends we use the cache to return the successors sets
434 447 # for the node requested by the caller.
435 448 while toproceed:
436 449 # Every iteration tries to compute the successors sets of the topmost
437 450 # node of the stack: CURRENT.
438 451 #
439 452 # There are four possible outcomes:
440 453 #
441 454 # 1) We already know the successors sets of CURRENT:
442 455 # -> mission accomplished, pop it from the stack.
443 456 # 2) Stop the walk:
444 457 # default case: Node is not obsolete
445 458 # closest case: Node is known at this repo filter level
446 459 # -> the node is its own successors sets. Add it to the cache.
447 460 # 3) We do not know successors set of direct successors of CURRENT:
448 461 # -> We add those successors to the stack.
449 462 # 4) We know successors sets of all direct successors of CURRENT:
450 463 # -> We can compute CURRENT successors set and add it to the
451 464 # cache.
452 465 #
453 466 current = toproceed[-1]
454 467
455 468 # case 2 condition is a bit hairy because of closest,
456 469 # we compute it on its own
457 470 case2condition = ((current not in succmarkers)
458 471 or (closest and current != initialnode
459 472 and current in repo))
460 473
461 474 if current in cache:
462 475 # case (1): We already know the successors sets
463 476 stackedset.remove(toproceed.pop())
464 477 elif case2condition:
465 478 # case (2): end of walk.
466 479 if current in repo:
467 480 # We have a valid successors.
468 481 cache[current] = [_succs((current,))]
469 482 else:
470 483 # Final obsolete version is unknown locally.
471 484 # Do not count that as a valid successors
472 485 cache[current] = []
473 486 else:
474 487 # cases (3) and (4)
475 488 #
476 489 # We proceed in two phases. Phase 1 aims to distinguish case (3)
477 490 # from case (4):
478 491 #
479 492 # For each direct successors of CURRENT, we check whether its
480 493 # successors sets are known. If they are not, we stack the
481 494 # unknown node and proceed to the next iteration of the while
482 495 # loop. (case 3)
483 496 #
484 497 # During this step, we may detect obsolescence cycles: a node
485 498 # with unknown successors sets but already in the call stack.
486 499 # In such a situation, we arbitrary set the successors sets of
487 500 # the node to nothing (node pruned) to break the cycle.
488 501 #
489 502 # If no break was encountered we proceed to phase 2.
490 503 #
491 504 # Phase 2 computes successors sets of CURRENT (case 4); see details
492 505 # in phase 2 itself.
493 506 #
494 507 # Note the two levels of iteration in each phase.
495 508 # - The first one handles obsolescence markers using CURRENT as
496 509 # precursor (successors markers of CURRENT).
497 510 #
498 511 # Having multiple entry here means divergence.
499 512 #
500 513 # - The second one handles successors defined in each marker.
501 514 #
502 515 # Having none means pruned node, multiple successors means split,
503 516 # single successors are standard replacement.
504 517 #
505 518 for mark in sorted(succmarkers[current]):
506 519 for suc in mark[1]:
507 520 if suc not in cache:
508 521 if suc in stackedset:
509 522 # cycle breaking
510 523 cache[suc] = []
511 524 else:
512 525 # case (3) If we have not computed successors sets
513 526 # of one of those successors we add it to the
514 527 # `toproceed` stack and stop all work for this
515 528 # iteration.
516 529 toproceed.append(suc)
517 530 stackedset.add(suc)
518 531 break
519 532 else:
520 533 continue
521 534 break
522 535 else:
523 536 # case (4): we know all successors sets of all direct
524 537 # successors
525 538 #
526 539 # Successors set contributed by each marker depends on the
527 540 # successors sets of all its "successors" node.
528 541 #
529 542 # Each different marker is a divergence in the obsolescence
530 543 # history. It contributes successors sets distinct from other
531 544 # markers.
532 545 #
533 546 # Within a marker, a successor may have divergent successors
534 547 # sets. In such a case, the marker will contribute multiple
535 548 # divergent successors sets. If multiple successors have
536 549 # divergent successors sets, a Cartesian product is used.
537 550 #
538 551 # At the end we post-process successors sets to remove
539 552 # duplicated entry and successors set that are strict subset of
540 553 # another one.
541 554 succssets = []
542 555 for mark in sorted(succmarkers[current]):
543 556 # successors sets contributed by this marker
544 557 base = _succs()
545 558 base.markers.add(mark)
546 559 markss = [base]
547 560 for suc in mark[1]:
548 561 # cardinal product with previous successors
549 562 productresult = []
550 563 for prefix in markss:
551 564 for suffix in cache[suc]:
552 565 newss = prefix.copy()
553 566 newss.markers.update(suffix.markers)
554 567 for part in suffix:
555 568 # do not duplicated entry in successors set
556 569 # first entry wins.
557 570 if part not in newss:
558 571 newss.append(part)
559 572 productresult.append(newss)
560 573 markss = productresult
561 574 succssets.extend(markss)
562 575 # remove duplicated and subset
563 576 seen = []
564 577 final = []
565 578 candidates = sorted((s for s in succssets if s),
566 579 key=len, reverse=True)
567 580 for cand in candidates:
568 581 for seensuccs in seen:
569 582 if cand.canmerge(seensuccs):
570 583 seensuccs.markers.update(cand.markers)
571 584 break
572 585 else:
573 586 final.append(cand)
574 587 seen.append(cand)
575 588 final.reverse() # put small successors set first
576 589 cache[current] = final
577 590 return cache[initialnode]
578 591
579 592 def successorsandmarkers(repo, ctx):
580 593 """compute the raw data needed for computing obsfate
581 594 Returns a list of dict, one dict per successors set
582 595 """
583 596 if not ctx.obsolete():
584 597 return None
585 598
586 599 ssets = successorssets(repo, ctx.node(), closest=True)
587 600
588 601 # closestsuccessors returns an empty list for pruned revisions, remap it
589 602 # into a list containing an empty list for future processing
590 603 if ssets == []:
591 604 ssets = [[]]
592 605
593 606 # Try to recover pruned markers
594 607 succsmap = repo.obsstore.successors
595 608 fullsuccessorsets = [] # successor set + markers
596 609 for sset in ssets:
597 610 if sset:
598 611 fullsuccessorsets.append(sset)
599 612 else:
600 613 # successorsset return an empty set() when ctx or one of its
601 614 # successors is pruned.
602 615 # In this case, walk the obs-markers tree again starting with ctx
603 616 # and find the relevant pruning obs-makers, the ones without
604 617 # successors.
605 618 # Having these markers allow us to compute some information about
606 619 # its fate, like who pruned this changeset and when.
607 620
608 621 # XXX we do not catch all prune markers (eg rewritten then pruned)
609 622 # (fix me later)
610 623 foundany = False
611 624 for mark in succsmap.get(ctx.node(), ()):
612 625 if not mark[1]:
613 626 foundany = True
614 627 sset = _succs()
615 628 sset.markers.add(mark)
616 629 fullsuccessorsets.append(sset)
617 630 if not foundany:
618 631 fullsuccessorsets.append(_succs())
619 632
620 633 values = []
621 634 for sset in fullsuccessorsets:
622 635 values.append({'successors': sset, 'markers': sset.markers})
623 636
624 637 return values
625 638
626 639 def successorsetverb(successorset):
627 640 """ Return the verb summarizing the successorset
628 641 """
629 642 if not successorset:
630 643 verb = 'pruned'
631 644 elif len(successorset) == 1:
632 645 verb = 'rewritten'
633 646 else:
634 647 verb = 'split'
635 648 return verb
636 649
637 650 def markersdates(markers):
638 651 """returns the list of dates for a list of markers
639 652 """
640 653 return [m[4] for m in markers]
641 654
642 655 def markersusers(markers):
643 656 """ Returns a sorted list of markers users without duplicates
644 657 """
645 658 markersmeta = [dict(m[3]) for m in markers]
646 659 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
647 660
648 661 return sorted(users)
649 662
650 663 def markersoperations(markers):
651 664 """ Returns a sorted list of markers operations without duplicates
652 665 """
653 666 markersmeta = [dict(m[3]) for m in markers]
654 667 operations = set(meta.get('operation') for meta in markersmeta
655 668 if meta.get('operation'))
656 669
657 670 return sorted(operations)
General Comments 0
You need to be logged in to leave comments. Login now