##// END OF EJS Templates
obsolete: explicitly pass relation items to effectflag computation...
Boris Feld -
r39956:bae6f141 default
parent child Browse files
Show More
@@ -1,1033 +1,1033
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 encoding,
78 78 error,
79 79 node,
80 80 obsutil,
81 81 phases,
82 82 policy,
83 83 pycompat,
84 84 util,
85 85 )
86 86 from .utils import dateutil
87 87
88 88 parsers = policy.importmod(r'parsers')
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92 _calcsize = struct.calcsize
93 93 propertycache = util.propertycache
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # Options for obsolescence
100 100 createmarkersopt = 'createmarkers'
101 101 allowunstableopt = 'allowunstable'
102 102 exchangeopt = 'exchange'
103 103
104 104 def _getoptionvalue(repo, option):
105 105 """Returns True if the given repository has the given obsolete option
106 106 enabled.
107 107 """
108 108 configkey = 'evolution.%s' % option
109 109 newconfig = repo.ui.configbool('experimental', configkey)
110 110
111 111 # Return the value only if defined
112 112 if newconfig is not None:
113 113 return newconfig
114 114
115 115 # Fallback on generic option
116 116 try:
117 117 return repo.ui.configbool('experimental', 'evolution')
118 118 except (error.ConfigError, AttributeError):
119 119 # Fallback on old-fashion config
120 120 # inconsistent config: experimental.evolution
121 121 result = set(repo.ui.configlist('experimental', 'evolution'))
122 122
123 123 if 'all' in result:
124 124 return True
125 125
126 126 # For migration purposes, temporarily return true if the config hasn't
127 127 # been set but _enabled is true.
128 128 if len(result) == 0 and _enabled:
129 129 return True
130 130
131 131 # Temporary hack for next check
132 132 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
133 133 if newconfig:
134 134 result.add('createmarkers')
135 135
136 136 return option in result
137 137
138 138 def getoptions(repo):
139 139 """Returns dicts showing state of obsolescence features."""
140 140
141 141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 142 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 143 exchangevalue = _getoptionvalue(repo, exchangeopt)
144 144
145 145 # createmarkers must be enabled if other options are enabled
146 146 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
147 147 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
148 148 "if other obsolete options are enabled"))
149 149
150 150 return {
151 151 createmarkersopt: createmarkersvalue,
152 152 allowunstableopt: unstablevalue,
153 153 exchangeopt: exchangevalue,
154 154 }
155 155
156 156 def isenabled(repo, option):
157 157 """Returns True if the given repository has the given obsolete option
158 158 enabled.
159 159 """
160 160 return getoptions(repo)[option]
161 161
162 162 # Creating aliases for marker flags because evolve extension looks for
163 163 # bumpedfix in obsolete.py
164 164 bumpedfix = obsutil.bumpedfix
165 165 usingsha256 = obsutil.usingsha256
166 166
167 167 ## Parsing and writing of version "0"
168 168 #
169 169 # The header is followed by the markers. Each marker is made of:
170 170 #
171 171 # - 1 uint8 : number of new changesets "N", can be zero.
172 172 #
173 173 # - 1 uint32: metadata size "M" in bytes.
174 174 #
175 175 # - 1 byte: a bit field. It is reserved for flags used in common
176 176 # obsolete marker operations, to avoid repeated decoding of metadata
177 177 # entries.
178 178 #
179 179 # - 20 bytes: obsoleted changeset identifier.
180 180 #
181 181 # - N*20 bytes: new changesets identifiers.
182 182 #
183 183 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 184 # string contains a key and a value, separated by a colon ':', without
185 185 # additional encoding. Keys cannot contain '\0' or ':' and values
186 186 # cannot contain '\0'.
187 187 _fm0version = 0
188 188 _fm0fixed = '>BIB20s'
189 189 _fm0node = '20s'
190 190 _fm0fsize = _calcsize(_fm0fixed)
191 191 _fm0fnodesize = _calcsize(_fm0node)
192 192
193 193 def _fm0readmarkers(data, off, stop):
194 194 # Loop on markers
195 195 while off < stop:
196 196 # read fixed part
197 197 cur = data[off:off + _fm0fsize]
198 198 off += _fm0fsize
199 199 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
200 200 # read replacement
201 201 sucs = ()
202 202 if numsuc:
203 203 s = (_fm0fnodesize * numsuc)
204 204 cur = data[off:off + s]
205 205 sucs = _unpack(_fm0node * numsuc, cur)
206 206 off += s
207 207 # read metadata
208 208 # (metadata will be decoded on demand)
209 209 metadata = data[off:off + mdsize]
210 210 if len(metadata) != mdsize:
211 211 raise error.Abort(_('parsing obsolete marker: metadata is too '
212 212 'short, %d bytes expected, got %d')
213 213 % (mdsize, len(metadata)))
214 214 off += mdsize
215 215 metadata = _fm0decodemeta(metadata)
216 216 try:
217 217 when, offset = metadata.pop('date', '0 0').split(' ')
218 218 date = float(when), int(offset)
219 219 except ValueError:
220 220 date = (0., 0)
221 221 parents = None
222 222 if 'p2' in metadata:
223 223 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
224 224 elif 'p1' in metadata:
225 225 parents = (metadata.pop('p1', None),)
226 226 elif 'p0' in metadata:
227 227 parents = ()
228 228 if parents is not None:
229 229 try:
230 230 parents = tuple(node.bin(p) for p in parents)
231 231 # if parent content is not a nodeid, drop the data
232 232 for p in parents:
233 233 if len(p) != 20:
234 234 parents = None
235 235 break
236 236 except TypeError:
237 237 # if content cannot be translated to nodeid drop the data.
238 238 parents = None
239 239
240 240 metadata = tuple(sorted(metadata.iteritems()))
241 241
242 242 yield (pre, sucs, flags, metadata, date, parents)
243 243
244 244 def _fm0encodeonemarker(marker):
245 245 pre, sucs, flags, metadata, date, parents = marker
246 246 if flags & usingsha256:
247 247 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
248 248 metadata = dict(metadata)
249 249 time, tz = date
250 250 metadata['date'] = '%r %i' % (time, tz)
251 251 if parents is not None:
252 252 if not parents:
253 253 # mark that we explicitly recorded no parents
254 254 metadata['p0'] = ''
255 255 for i, p in enumerate(parents, 1):
256 256 metadata['p%i' % i] = node.hex(p)
257 257 metadata = _fm0encodemeta(metadata)
258 258 numsuc = len(sucs)
259 259 format = _fm0fixed + (_fm0node * numsuc)
260 260 data = [numsuc, len(metadata), flags, pre]
261 261 data.extend(sucs)
262 262 return _pack(format, *data) + metadata
263 263
264 264 def _fm0encodemeta(meta):
265 265 """Return encoded metadata string to string mapping.
266 266
267 267 Assume no ':' in key and no '\0' in both key and value."""
268 268 for key, value in meta.iteritems():
269 269 if ':' in key or '\0' in key:
270 270 raise ValueError("':' and '\0' are forbidden in metadata key'")
271 271 if '\0' in value:
272 272 raise ValueError("':' is forbidden in metadata value'")
273 273 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
274 274
275 275 def _fm0decodemeta(data):
276 276 """Return string to string dictionary from encoded version."""
277 277 d = {}
278 278 for l in data.split('\0'):
279 279 if l:
280 280 key, value = l.split(':')
281 281 d[key] = value
282 282 return d
283 283
284 284 ## Parsing and writing of version "1"
285 285 #
286 286 # The header is followed by the markers. Each marker is made of:
287 287 #
288 288 # - uint32: total size of the marker (including this field)
289 289 #
290 290 # - float64: date in seconds since epoch
291 291 #
292 292 # - int16: timezone offset in minutes
293 293 #
294 294 # - uint16: a bit field. It is reserved for flags used in common
295 295 # obsolete marker operations, to avoid repeated decoding of metadata
296 296 # entries.
297 297 #
298 298 # - uint8: number of successors "N", can be zero.
299 299 #
300 300 # - uint8: number of parents "P", can be zero.
301 301 #
302 302 # 0: parents data stored but no parent,
303 303 # 1: one parent stored,
304 304 # 2: two parents stored,
305 305 # 3: no parent data stored
306 306 #
307 307 # - uint8: number of metadata entries M
308 308 #
309 309 # - 20 or 32 bytes: predecessor changeset identifier.
310 310 #
311 311 # - N*(20 or 32) bytes: successors changesets identifiers.
312 312 #
313 313 # - P*(20 or 32) bytes: parents of the predecessors changesets.
314 314 #
315 315 # - M*(uint8, uint8): size of all metadata entries (key and value)
316 316 #
317 317 # - remaining bytes: the metadata, each (key, value) pair after the other.
318 318 _fm1version = 1
319 319 _fm1fixed = '>IdhHBBB20s'
320 320 _fm1nodesha1 = '20s'
321 321 _fm1nodesha256 = '32s'
322 322 _fm1nodesha1size = _calcsize(_fm1nodesha1)
323 323 _fm1nodesha256size = _calcsize(_fm1nodesha256)
324 324 _fm1fsize = _calcsize(_fm1fixed)
325 325 _fm1parentnone = 3
326 326 _fm1parentshift = 14
327 327 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
328 328 _fm1metapair = 'BB'
329 329 _fm1metapairsize = _calcsize(_fm1metapair)
330 330
331 331 def _fm1purereadmarkers(data, off, stop):
332 332 # make some global constants local for performance
333 333 noneflag = _fm1parentnone
334 334 sha2flag = usingsha256
335 335 sha1size = _fm1nodesha1size
336 336 sha2size = _fm1nodesha256size
337 337 sha1fmt = _fm1nodesha1
338 338 sha2fmt = _fm1nodesha256
339 339 metasize = _fm1metapairsize
340 340 metafmt = _fm1metapair
341 341 fsize = _fm1fsize
342 342 unpack = _unpack
343 343
344 344 # Loop on markers
345 345 ufixed = struct.Struct(_fm1fixed).unpack
346 346
347 347 while off < stop:
348 348 # read fixed part
349 349 o1 = off + fsize
350 350 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
351 351
352 352 if flags & sha2flag:
353 353 # FIXME: prec was read as a SHA1, needs to be amended
354 354
355 355 # read 0 or more successors
356 356 if numsuc == 1:
357 357 o2 = o1 + sha2size
358 358 sucs = (data[o1:o2],)
359 359 else:
360 360 o2 = o1 + sha2size * numsuc
361 361 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
362 362
363 363 # read parents
364 364 if numpar == noneflag:
365 365 o3 = o2
366 366 parents = None
367 367 elif numpar == 1:
368 368 o3 = o2 + sha2size
369 369 parents = (data[o2:o3],)
370 370 else:
371 371 o3 = o2 + sha2size * numpar
372 372 parents = unpack(sha2fmt * numpar, data[o2:o3])
373 373 else:
374 374 # read 0 or more successors
375 375 if numsuc == 1:
376 376 o2 = o1 + sha1size
377 377 sucs = (data[o1:o2],)
378 378 else:
379 379 o2 = o1 + sha1size * numsuc
380 380 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
381 381
382 382 # read parents
383 383 if numpar == noneflag:
384 384 o3 = o2
385 385 parents = None
386 386 elif numpar == 1:
387 387 o3 = o2 + sha1size
388 388 parents = (data[o2:o3],)
389 389 else:
390 390 o3 = o2 + sha1size * numpar
391 391 parents = unpack(sha1fmt * numpar, data[o2:o3])
392 392
393 393 # read metadata
394 394 off = o3 + metasize * nummeta
395 395 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
396 396 metadata = []
397 397 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 398 o1 = off + metapairsize[idx]
399 399 o2 = o1 + metapairsize[idx + 1]
400 400 metadata.append((data[off:o1], data[o1:o2]))
401 401 off = o2
402 402
403 403 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404 404
405 405 def _fm1encodeonemarker(marker):
406 406 pre, sucs, flags, metadata, date, parents = marker
407 407 # determine node size
408 408 _fm1node = _fm1nodesha1
409 409 if flags & usingsha256:
410 410 _fm1node = _fm1nodesha256
411 411 numsuc = len(sucs)
412 412 numextranodes = numsuc
413 413 if parents is None:
414 414 numpar = _fm1parentnone
415 415 else:
416 416 numpar = len(parents)
417 417 numextranodes += numpar
418 418 formatnodes = _fm1node * numextranodes
419 419 formatmeta = _fm1metapair * len(metadata)
420 420 format = _fm1fixed + formatnodes + formatmeta
421 421 # tz is stored in minutes so we divide by 60
422 422 tz = date[1]//60
423 423 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
424 424 data.extend(sucs)
425 425 if parents is not None:
426 426 data.extend(parents)
427 427 totalsize = _calcsize(format)
428 428 for key, value in metadata:
429 429 lk = len(key)
430 430 lv = len(value)
431 431 if lk > 255:
432 432 msg = ('obsstore metadata key cannot be longer than 255 bytes'
433 433 ' (key "%s" is %u bytes)') % (key, lk)
434 434 raise error.ProgrammingError(msg)
435 435 if lv > 255:
436 436 msg = ('obsstore metadata value cannot be longer than 255 bytes'
437 437 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
438 438 raise error.ProgrammingError(msg)
439 439 data.append(lk)
440 440 data.append(lv)
441 441 totalsize += lk + lv
442 442 data[0] = totalsize
443 443 data = [_pack(format, *data)]
444 444 for key, value in metadata:
445 445 data.append(key)
446 446 data.append(value)
447 447 return ''.join(data)
448 448
449 449 def _fm1readmarkers(data, off, stop):
450 450 native = getattr(parsers, 'fm1readmarkers', None)
451 451 if not native:
452 452 return _fm1purereadmarkers(data, off, stop)
453 453 return native(data, off, stop)
454 454
455 455 # mapping to read/write various marker formats
456 456 # <version> -> (decoder, encoder)
457 457 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
458 458 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
459 459
460 460 def _readmarkerversion(data):
461 461 return _unpack('>B', data[0:1])[0]
462 462
463 463 @util.nogc
464 464 def _readmarkers(data, off=None, stop=None):
465 465 """Read and enumerate markers from raw data"""
466 466 diskversion = _readmarkerversion(data)
467 467 if not off:
468 468 off = 1 # skip 1 byte version number
469 469 if stop is None:
470 470 stop = len(data)
471 471 if diskversion not in formats:
472 472 msg = _('parsing obsolete marker: unknown version %r') % diskversion
473 473 raise error.UnknownVersion(msg, version=diskversion)
474 474 return diskversion, formats[diskversion][0](data, off, stop)
475 475
476 476 def encodeheader(version=_fm0version):
477 477 return _pack('>B', version)
478 478
479 479 def encodemarkers(markers, addheader=False, version=_fm0version):
480 480 # Kept separate from flushmarkers(), it will be reused for
481 481 # markers exchange.
482 482 encodeone = formats[version][1]
483 483 if addheader:
484 484 yield encodeheader(version)
485 485 for marker in markers:
486 486 yield encodeone(marker)
487 487
488 488 @util.nogc
489 489 def _addsuccessors(successors, markers):
490 490 for mark in markers:
491 491 successors.setdefault(mark[0], set()).add(mark)
492 492
493 493 @util.nogc
494 494 def _addpredecessors(predecessors, markers):
495 495 for mark in markers:
496 496 for suc in mark[1]:
497 497 predecessors.setdefault(suc, set()).add(mark)
498 498
499 499 @util.nogc
500 500 def _addchildren(children, markers):
501 501 for mark in markers:
502 502 parents = mark[5]
503 503 if parents is not None:
504 504 for p in parents:
505 505 children.setdefault(p, set()).add(mark)
506 506
507 507 def _checkinvalidmarkers(markers):
508 508 """search for marker with invalid data and raise error if needed
509 509
510 510 Exist as a separated function to allow the evolve extension for a more
511 511 subtle handling.
512 512 """
513 513 for mark in markers:
514 514 if node.nullid in mark[1]:
515 515 raise error.Abort(_('bad obsolescence marker detected: '
516 516 'invalid successors nullid'))
517 517
518 518 class obsstore(object):
519 519 """Store obsolete markers
520 520
521 521 Markers can be accessed with two mappings:
522 522 - predecessors[x] -> set(markers on predecessors edges of x)
523 523 - successors[x] -> set(markers on successors edges of x)
524 524 - children[x] -> set(markers on predecessors edges of children(x)
525 525 """
526 526
527 527 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
528 528 # prec: nodeid, predecessors changesets
529 529 # succs: tuple of nodeid, successor changesets (0-N length)
530 530 # flag: integer, flag field carrying modifier for the markers (see doc)
531 531 # meta: binary blob in UTF-8, encoded metadata dictionary
532 532 # date: (float, int) tuple, date of marker creation
533 533 # parents: (tuple of nodeid) or None, parents of predecessors
534 534 # None is used when no data has been recorded
535 535
536 536 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
537 537 # caches for various obsolescence related cache
538 538 self.caches = {}
539 539 self.svfs = svfs
540 540 self._defaultformat = defaultformat
541 541 self._readonly = readonly
542 542
543 543 def __iter__(self):
544 544 return iter(self._all)
545 545
546 546 def __len__(self):
547 547 return len(self._all)
548 548
549 549 def __nonzero__(self):
550 550 if not self._cached(r'_all'):
551 551 try:
552 552 return self.svfs.stat('obsstore').st_size > 1
553 553 except OSError as inst:
554 554 if inst.errno != errno.ENOENT:
555 555 raise
556 556 # just build an empty _all list if no obsstore exists, which
557 557 # avoids further stat() syscalls
558 558 return bool(self._all)
559 559
560 560 __bool__ = __nonzero__
561 561
562 562 @property
563 563 def readonly(self):
564 564 """True if marker creation is disabled
565 565
566 566 Remove me in the future when obsolete marker is always on."""
567 567 return self._readonly
568 568
569 569 def create(self, transaction, prec, succs=(), flag=0, parents=None,
570 570 date=None, metadata=None, ui=None):
571 571 """obsolete: add a new obsolete marker
572 572
573 573 * ensuring it is hashable
574 574 * check mandatory metadata
575 575 * encode metadata
576 576
577 577 If you are a human writing code creating marker you want to use the
578 578 `createmarkers` function in this module instead.
579 579
580 580 return True if a new marker have been added, False if the markers
581 581 already existed (no op).
582 582 """
583 583 if metadata is None:
584 584 metadata = {}
585 585 if date is None:
586 586 if 'date' in metadata:
587 587 # as a courtesy for out-of-tree extensions
588 588 date = dateutil.parsedate(metadata.pop('date'))
589 589 elif ui is not None:
590 590 date = ui.configdate('devel', 'default-date')
591 591 if date is None:
592 592 date = dateutil.makedate()
593 593 else:
594 594 date = dateutil.makedate()
595 595 if len(prec) != 20:
596 596 raise ValueError(prec)
597 597 for succ in succs:
598 598 if len(succ) != 20:
599 599 raise ValueError(succ)
600 600 if prec in succs:
601 601 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
602 602
603 603 metadata = tuple(sorted(metadata.iteritems()))
604 604 for k, v in metadata:
605 605 try:
606 606 # might be better to reject non-ASCII keys
607 607 k.decode('utf-8')
608 608 v.decode('utf-8')
609 609 except UnicodeDecodeError:
610 610 raise error.ProgrammingError(
611 611 'obsstore metadata must be valid UTF-8 sequence '
612 612 '(key = %r, value = %r)'
613 613 % (pycompat.bytestr(k), pycompat.bytestr(v)))
614 614
615 615 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
616 616 return bool(self.add(transaction, [marker]))
617 617
618 618 def add(self, transaction, markers):
619 619 """Add new markers to the store
620 620
621 621 Take care of filtering duplicate.
622 622 Return the number of new marker."""
623 623 if self._readonly:
624 624 raise error.Abort(_('creating obsolete markers is not enabled on '
625 625 'this repo'))
626 626 known = set()
627 627 getsuccessors = self.successors.get
628 628 new = []
629 629 for m in markers:
630 630 if m not in getsuccessors(m[0], ()) and m not in known:
631 631 known.add(m)
632 632 new.append(m)
633 633 if new:
634 634 f = self.svfs('obsstore', 'ab')
635 635 try:
636 636 offset = f.tell()
637 637 transaction.add('obsstore', offset)
638 638 # offset == 0: new file - add the version header
639 639 data = b''.join(encodemarkers(new, offset == 0, self._version))
640 640 f.write(data)
641 641 finally:
642 642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
643 643 # call 'filecacheentry.refresh()' here
644 644 f.close()
645 645 addedmarkers = transaction.changes.get('obsmarkers')
646 646 if addedmarkers is not None:
647 647 addedmarkers.update(new)
648 648 self._addmarkers(new, data)
649 649 # new marker *may* have changed several set. invalidate the cache.
650 650 self.caches.clear()
651 651 # records the number of new markers for the transaction hooks
652 652 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
653 653 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
654 654 return len(new)
655 655
656 656 def mergemarkers(self, transaction, data):
657 657 """merge a binary stream of markers inside the obsstore
658 658
659 659 Returns the number of new markers added."""
660 660 version, markers = _readmarkers(data)
661 661 return self.add(transaction, markers)
662 662
663 663 @propertycache
664 664 def _data(self):
665 665 return self.svfs.tryread('obsstore')
666 666
667 667 @propertycache
668 668 def _version(self):
669 669 if len(self._data) >= 1:
670 670 return _readmarkerversion(self._data)
671 671 else:
672 672 return self._defaultformat
673 673
674 674 @propertycache
675 675 def _all(self):
676 676 data = self._data
677 677 if not data:
678 678 return []
679 679 self._version, markers = _readmarkers(data)
680 680 markers = list(markers)
681 681 _checkinvalidmarkers(markers)
682 682 return markers
683 683
684 684 @propertycache
685 685 def successors(self):
686 686 successors = {}
687 687 _addsuccessors(successors, self._all)
688 688 return successors
689 689
690 690 @propertycache
691 691 def predecessors(self):
692 692 predecessors = {}
693 693 _addpredecessors(predecessors, self._all)
694 694 return predecessors
695 695
696 696 @propertycache
697 697 def children(self):
698 698 children = {}
699 699 _addchildren(children, self._all)
700 700 return children
701 701
702 702 def _cached(self, attr):
703 703 return attr in self.__dict__
704 704
705 705 def _addmarkers(self, markers, rawdata):
706 706 markers = list(markers) # to allow repeated iteration
707 707 self._data = self._data + rawdata
708 708 self._all.extend(markers)
709 709 if self._cached(r'successors'):
710 710 _addsuccessors(self.successors, markers)
711 711 if self._cached(r'predecessors'):
712 712 _addpredecessors(self.predecessors, markers)
713 713 if self._cached(r'children'):
714 714 _addchildren(self.children, markers)
715 715 _checkinvalidmarkers(markers)
716 716
717 717 def relevantmarkers(self, nodes):
718 718 """return a set of all obsolescence markers relevant to a set of nodes.
719 719
720 720 "relevant" to a set of nodes mean:
721 721
722 722 - marker that use this changeset as successor
723 723 - prune marker of direct children on this changeset
724 724 - recursive application of the two rules on predecessors of these
725 725 markers
726 726
727 727 It is a set so you cannot rely on order."""
728 728
729 729 pendingnodes = set(nodes)
730 730 seenmarkers = set()
731 731 seennodes = set(pendingnodes)
732 732 precursorsmarkers = self.predecessors
733 733 succsmarkers = self.successors
734 734 children = self.children
735 735 while pendingnodes:
736 736 direct = set()
737 737 for current in pendingnodes:
738 738 direct.update(precursorsmarkers.get(current, ()))
739 739 pruned = [m for m in children.get(current, ()) if not m[1]]
740 740 direct.update(pruned)
741 741 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
742 742 direct.update(pruned)
743 743 direct -= seenmarkers
744 744 pendingnodes = set([m[0] for m in direct])
745 745 seenmarkers |= direct
746 746 pendingnodes -= seennodes
747 747 seennodes |= pendingnodes
748 748 return seenmarkers
749 749
750 750 def makestore(ui, repo):
751 751 """Create an obsstore instance from a repo."""
752 752 # read default format for new obsstore.
753 753 # developer config: format.obsstore-version
754 754 defaultformat = ui.configint('format', 'obsstore-version')
755 755 # rely on obsstore class default when possible.
756 756 kwargs = {}
757 757 if defaultformat is not None:
758 758 kwargs[r'defaultformat'] = defaultformat
759 759 readonly = not isenabled(repo, createmarkersopt)
760 760 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
761 761 if store and readonly:
762 762 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
763 763 % len(list(store)))
764 764 return store
765 765
766 766 def commonversion(versions):
767 767 """Return the newest version listed in both versions and our local formats.
768 768
769 769 Returns None if no common version exists.
770 770 """
771 771 versions.sort(reverse=True)
772 772 # search for highest version known on both side
773 773 for v in versions:
774 774 if v in formats:
775 775 return v
776 776 return None
777 777
778 778 # arbitrary picked to fit into 8K limit from HTTP server
779 779 # you have to take in account:
780 780 # - the version header
781 781 # - the base85 encoding
782 782 _maxpayload = 5300
783 783
784 784 def _pushkeyescape(markers):
785 785 """encode markers into a dict suitable for pushkey exchange
786 786
787 787 - binary data is base85 encoded
788 788 - split in chunks smaller than 5300 bytes"""
789 789 keys = {}
790 790 parts = []
791 791 currentlen = _maxpayload * 2 # ensure we create a new part
792 792 for marker in markers:
793 793 nextdata = _fm0encodeonemarker(marker)
794 794 if (len(nextdata) + currentlen > _maxpayload):
795 795 currentpart = []
796 796 currentlen = 0
797 797 parts.append(currentpart)
798 798 currentpart.append(nextdata)
799 799 currentlen += len(nextdata)
800 800 for idx, part in enumerate(reversed(parts)):
801 801 data = ''.join([_pack('>B', _fm0version)] + part)
802 802 keys['dump%i' % idx] = util.b85encode(data)
803 803 return keys
804 804
805 805 def listmarkers(repo):
806 806 """List markers over pushkey"""
807 807 if not repo.obsstore:
808 808 return {}
809 809 return _pushkeyescape(sorted(repo.obsstore))
810 810
811 811 def pushmarker(repo, key, old, new):
812 812 """Push markers over pushkey"""
813 813 if not key.startswith('dump'):
814 814 repo.ui.warn(_('unknown key: %r') % key)
815 815 return False
816 816 if old:
817 817 repo.ui.warn(_('unexpected old value for %r') % key)
818 818 return False
819 819 data = util.b85decode(new)
820 820 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
821 821 repo.obsstore.mergemarkers(tr, data)
822 822 repo.invalidatevolatilesets()
823 823 return True
824 824
825 825 # mapping of 'set-name' -> <function to compute this set>
826 826 cachefuncs = {}
827 827 def cachefor(name):
828 828 """Decorator to register a function as computing the cache for a set"""
829 829 def decorator(func):
830 830 if name in cachefuncs:
831 831 msg = "duplicated registration for volatileset '%s' (existing: %r)"
832 832 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
833 833 cachefuncs[name] = func
834 834 return func
835 835 return decorator
836 836
837 837 def getrevs(repo, name):
838 838 """Return the set of revision that belong to the <name> set
839 839
840 840 Such access may compute the set and cache it for future use"""
841 841 repo = repo.unfiltered()
842 842 if not repo.obsstore:
843 843 return frozenset()
844 844 if name not in repo.obsstore.caches:
845 845 repo.obsstore.caches[name] = cachefuncs[name](repo)
846 846 return repo.obsstore.caches[name]
847 847
848 848 # To be simple we need to invalidate obsolescence cache when:
849 849 #
850 850 # - new changeset is added:
851 851 # - public phase is changed
852 852 # - obsolescence marker are added
853 853 # - strip is used a repo
854 854 def clearobscaches(repo):
855 855 """Remove all obsolescence related cache from a repo
856 856
857 857 This remove all cache in obsstore is the obsstore already exist on the
858 858 repo.
859 859
860 860 (We could be smarter here given the exact event that trigger the cache
861 861 clearing)"""
862 862 # only clear cache is there is obsstore data in this repo
863 863 if 'obsstore' in repo._filecache:
864 864 repo.obsstore.caches.clear()
865 865
866 866 def _mutablerevs(repo):
867 867 """the set of mutable revision in the repository"""
868 868 return repo._phasecache.getrevset(repo, phases.mutablephases)
869 869
870 870 @cachefor('obsolete')
871 871 def _computeobsoleteset(repo):
872 872 """the set of obsolete revisions"""
873 873 getnode = repo.changelog.node
874 874 notpublic = _mutablerevs(repo)
875 875 isobs = repo.obsstore.successors.__contains__
876 876 obs = set(r for r in notpublic if isobs(getnode(r)))
877 877 return obs
878 878
879 879 @cachefor('orphan')
880 880 def _computeorphanset(repo):
881 881 """the set of non obsolete revisions with obsolete parents"""
882 882 pfunc = repo.changelog.parentrevs
883 883 mutable = _mutablerevs(repo)
884 884 obsolete = getrevs(repo, 'obsolete')
885 885 others = mutable - obsolete
886 886 unstable = set()
887 887 for r in sorted(others):
888 888 # A rev is unstable if one of its parent is obsolete or unstable
889 889 # this works since we traverse following growing rev order
890 890 for p in pfunc(r):
891 891 if p in obsolete or p in unstable:
892 892 unstable.add(r)
893 893 break
894 894 return unstable
895 895
896 896 @cachefor('suspended')
897 897 def _computesuspendedset(repo):
898 898 """the set of obsolete parents with non obsolete descendants"""
899 899 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
900 900 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
901 901
902 902 @cachefor('extinct')
903 903 def _computeextinctset(repo):
904 904 """the set of obsolete parents without non obsolete descendants"""
905 905 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
906 906
907 907 @cachefor('phasedivergent')
908 908 def _computephasedivergentset(repo):
909 909 """the set of revs trying to obsolete public revisions"""
910 910 bumped = set()
911 911 # util function (avoid attribute lookup in the loop)
912 912 phase = repo._phasecache.phase # would be faster to grab the full list
913 913 public = phases.public
914 914 cl = repo.changelog
915 915 torev = cl.nodemap.get
916 916 tonode = cl.node
917 917 for rev in repo.revs('(not public()) and (not obsolete())'):
918 918 # We only evaluate mutable, non-obsolete revision
919 919 node = tonode(rev)
920 920 # (future) A cache of predecessors may worth if split is very common
921 921 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
922 922 ignoreflags=bumpedfix):
923 923 prev = torev(pnode) # unfiltered! but so is phasecache
924 924 if (prev is not None) and (phase(repo, prev) <= public):
925 925 # we have a public predecessor
926 926 bumped.add(rev)
927 927 break # Next draft!
928 928 return bumped
929 929
930 930 @cachefor('contentdivergent')
931 931 def _computecontentdivergentset(repo):
932 932 """the set of rev that compete to be the final successors of some revision.
933 933 """
934 934 divergent = set()
935 935 obsstore = repo.obsstore
936 936 newermap = {}
937 937 tonode = repo.changelog.node
938 938 for rev in repo.revs('(not public()) - obsolete()'):
939 939 node = tonode(rev)
940 940 mark = obsstore.predecessors.get(node, ())
941 941 toprocess = set(mark)
942 942 seen = set()
943 943 while toprocess:
944 944 prec = toprocess.pop()[0]
945 945 if prec in seen:
946 946 continue # emergency cycle hanging prevention
947 947 seen.add(prec)
948 948 if prec not in newermap:
949 949 obsutil.successorssets(repo, prec, cache=newermap)
950 950 newer = [n for n in newermap[prec] if n]
951 951 if len(newer) > 1:
952 952 divergent.add(rev)
953 953 break
954 954 toprocess.update(obsstore.predecessors.get(prec, ()))
955 955 return divergent
956 956
957 957
958 958 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
959 959 operation=None):
960 960 """Add obsolete markers between changesets in a repo
961 961
962 962 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
963 963 tuple. `old` and `news` are changectx. metadata is an optional dictionary
964 964 containing metadata for this marker only. It is merged with the global
965 965 metadata specified through the `metadata` argument of this function.
966 966 Any string values in metadata must be UTF-8 bytes.
967 967
968 968 Trying to obsolete a public changeset will raise an exception.
969 969
970 970 Current user and date are used except if specified otherwise in the
971 971 metadata attribute.
972 972
973 973 This function operates within a transaction of its own, but does
974 974 not take any lock on the repo.
975 975 """
976 976 # prepare metadata
977 977 if metadata is None:
978 978 metadata = {}
979 979 if 'user' not in metadata:
980 980 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
981 981 metadata['user'] = encoding.fromlocal(luser)
982 982
983 983 # Operation metadata handling
984 984 useoperation = repo.ui.configbool('experimental',
985 985 'evolution.track-operation')
986 986 if useoperation and operation:
987 987 metadata['operation'] = operation
988 988
989 989 # Effect flag metadata handling
990 990 saveeffectflag = repo.ui.configbool('experimental',
991 991 'evolution.effect-flags')
992 992
993 993 with repo.transaction('add-obsolescence-marker') as tr:
994 994 markerargs = []
995 995 for rel in relations:
996 996 prec = rel[0]
997 997 sucs = rel[1]
998 998 localmetadata = metadata.copy()
999 999 if 2 < len(rel):
1000 1000 localmetadata.update(rel[2])
1001 1001
1002 1002 if not prec.mutable():
1003 1003 raise error.Abort(_("cannot obsolete public changeset: %s")
1004 1004 % prec,
1005 1005 hint="see 'hg help phases' for details")
1006 1006 nprec = prec.node()
1007 1007 nsucs = tuple(s.node() for s in sucs)
1008 1008 npare = None
1009 1009 if not nsucs:
1010 1010 npare = tuple(p.node() for p in prec.parents())
1011 1011 if nprec in nsucs:
1012 1012 raise error.Abort(_("changeset %s cannot obsolete itself")
1013 1013 % prec)
1014 1014
1015 1015 # Effect flag can be different by relation
1016 1016 if saveeffectflag:
1017 1017 # The effect flag is saved in a versioned field name for future
1018 1018 # evolution
1019 effectflag = obsutil.geteffectflag(rel)
1019 effectflag = obsutil.geteffectflag(prec, sucs)
1020 1020 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1021 1021
1022 1022 # Creating the marker causes the hidden cache to become invalid,
1023 1023 # which causes recomputation when we ask for prec.parents() above.
1024 1024 # Resulting in n^2 behavior. So let's prepare all of the args
1025 1025 # first, then create the markers.
1026 1026 markerargs.append((nprec, nsucs, npare, localmetadata))
1027 1027
1028 1028 for args in markerargs:
1029 1029 nprec, nsucs, npare, localmetadata = args
1030 1030 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1031 1031 date=date, metadata=localmetadata,
1032 1032 ui=repo.ui)
1033 1033 repo.filteredrevcache.clear()
@@ -1,982 +1,980
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 diffutil,
15 15 encoding,
16 16 node as nodemod,
17 17 phases,
18 18 util,
19 19 )
20 20 from .utils import (
21 21 dateutil,
22 22 )
23 23
24 24 ### obsolescence marker flag
25 25
26 26 ## bumpedfix flag
27 27 #
28 28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 29 # "bumped" because it's a successors of a public changesets
30 30 #
31 31 # o A' (bumped)
32 32 # |`:
33 33 # | o A
34 34 # |/
35 35 # o Z
36 36 #
37 37 # The way to solve this situation is to create a new changeset Ad as children
38 38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 40 #
41 41 # o Ad
42 42 # |`:
43 43 # | x A'
44 44 # |'|
45 45 # o | A
46 46 # |/
47 47 # o Z
48 48 #
49 49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 51 # This flag mean that the successors express the changes between the public and
52 52 # bumped version and fix the situation, breaking the transitivity of
53 53 # "bumped" here.
54 54 bumpedfix = 1
55 55 usingsha256 = 2
56 56
57 57 class marker(object):
58 58 """Wrap obsolete marker raw data"""
59 59
60 60 def __init__(self, repo, data):
61 61 # the repo argument will be used to create changectx in later version
62 62 self._repo = repo
63 63 self._data = data
64 64 self._decodedmeta = None
65 65
66 66 def __hash__(self):
67 67 return hash(self._data)
68 68
69 69 def __eq__(self, other):
70 70 if type(other) != type(self):
71 71 return False
72 72 return self._data == other._data
73 73
74 74 def prednode(self):
75 75 """Predecessor changeset node identifier"""
76 76 return self._data[0]
77 77
78 78 def succnodes(self):
79 79 """List of successor changesets node identifiers"""
80 80 return self._data[1]
81 81
82 82 def parentnodes(self):
83 83 """Parents of the predecessors (None if not recorded)"""
84 84 return self._data[5]
85 85
86 86 def metadata(self):
87 87 """Decoded metadata dictionary"""
88 88 return dict(self._data[3])
89 89
90 90 def date(self):
91 91 """Creation date as (unixtime, offset)"""
92 92 return self._data[4]
93 93
94 94 def flags(self):
95 95 """The flags field of the marker"""
96 96 return self._data[2]
97 97
98 98 def getmarkers(repo, nodes=None, exclusive=False):
99 99 """returns markers known in a repository
100 100
101 101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 102 returned"""
103 103 if nodes is None:
104 104 rawmarkers = repo.obsstore
105 105 elif exclusive:
106 106 rawmarkers = exclusivemarkers(repo, nodes)
107 107 else:
108 108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109 109
110 110 for markerdata in rawmarkers:
111 111 yield marker(repo, markerdata)
112 112
113 113 def closestpredecessors(repo, nodeid):
114 114 """yield the list of next predecessors pointing on visible changectx nodes
115 115
116 116 This function respect the repoview filtering, filtered revision will be
117 117 considered missing.
118 118 """
119 119
120 120 precursors = repo.obsstore.predecessors
121 121 stack = [nodeid]
122 122 seen = set(stack)
123 123
124 124 while stack:
125 125 current = stack.pop()
126 126 currentpreccs = precursors.get(current, ())
127 127
128 128 for prec in currentpreccs:
129 129 precnodeid = prec[0]
130 130
131 131 # Basic cycle protection
132 132 if precnodeid in seen:
133 133 continue
134 134 seen.add(precnodeid)
135 135
136 136 if precnodeid in repo:
137 137 yield precnodeid
138 138 else:
139 139 stack.append(precnodeid)
140 140
141 141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 142 """Yield node for every precursors of <nodes>.
143 143
144 144 Some precursors may be unknown locally.
145 145
146 146 This is a linear yield unsuited to detecting folded changesets. It includes
147 147 initial nodes too."""
148 148
149 149 remaining = set(nodes)
150 150 seen = set(remaining)
151 151 while remaining:
152 152 current = remaining.pop()
153 153 yield current
154 154 for mark in obsstore.predecessors.get(current, ()):
155 155 # ignore marker flagged with specified flag
156 156 if mark[2] & ignoreflags:
157 157 continue
158 158 suc = mark[0]
159 159 if suc not in seen:
160 160 seen.add(suc)
161 161 remaining.add(suc)
162 162
163 163 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 164 """Yield node for every successor of <nodes>.
165 165
166 166 Some successors may be unknown locally.
167 167
168 168 This is a linear yield unsuited to detecting split changesets. It includes
169 169 initial nodes too."""
170 170 remaining = set(nodes)
171 171 seen = set(remaining)
172 172 while remaining:
173 173 current = remaining.pop()
174 174 yield current
175 175 for mark in obsstore.successors.get(current, ()):
176 176 # ignore marker flagged with specified flag
177 177 if mark[2] & ignoreflags:
178 178 continue
179 179 for suc in mark[1]:
180 180 if suc not in seen:
181 181 seen.add(suc)
182 182 remaining.add(suc)
183 183
184 184 def _filterprunes(markers):
185 185 """return a set with no prune markers"""
186 186 return set(m for m in markers if m[1])
187 187
188 188 def exclusivemarkers(repo, nodes):
189 189 """set of markers relevant to "nodes" but no other locally-known nodes
190 190
191 191 This function compute the set of markers "exclusive" to a locally-known
192 192 node. This means we walk the markers starting from <nodes> until we reach a
193 193 locally-known precursors outside of <nodes>. Element of <nodes> with
194 194 locally-known successors outside of <nodes> are ignored (since their
195 195 precursors markers are also relevant to these successors).
196 196
197 197 For example:
198 198
199 199 # (A0 rewritten as A1)
200 200 #
201 201 # A0 <-1- A1 # Marker "1" is exclusive to A1
202 202
203 203 or
204 204
205 205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 206 #
207 207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208 208
209 209 or
210 210
211 211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 212 #
213 213 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 214 # /
215 215 # <-1- A0
216 216 # \
217 217 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 218 #
219 219 # in addition:
220 220 #
221 221 # Markers "2,3" are exclusive to A1,A2
222 222 # Markers "1,2,3" are exclusive to A0,A1,A2
223 223
224 224 See test/test-obsolete-bundle-strip.t for more examples.
225 225
226 226 An example usage is strip. When stripping a changeset, we also want to
227 227 strip the markers exclusive to this changeset. Otherwise we would have
228 228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 229 marking a node as obsolete without any successors available locally.
230 230
231 231 As for relevant markers, the prune markers for children will be followed.
232 232 Of course, they will only be followed if the pruned children is
233 233 locally-known. Since the prune markers are relevant to the pruned node.
234 234 However, while prune markers are considered relevant to the parent of the
235 235 pruned changesets, prune markers for locally-known changeset (with no
236 236 successors) are considered exclusive to the pruned nodes. This allows
237 237 to strip the prune markers (with the rest of the exclusive chain) alongside
238 238 the pruned changesets.
239 239 """
240 240 # running on a filtered repository would be dangerous as markers could be
241 241 # reported as exclusive when they are relevant for other filtered nodes.
242 242 unfi = repo.unfiltered()
243 243
244 244 # shortcut to various useful item
245 245 nm = unfi.changelog.nodemap
246 246 precursorsmarkers = unfi.obsstore.predecessors
247 247 successormarkers = unfi.obsstore.successors
248 248 childrenmarkers = unfi.obsstore.children
249 249
250 250 # exclusive markers (return of the function)
251 251 exclmarkers = set()
252 252 # we need fast membership testing
253 253 nodes = set(nodes)
254 254 # looking for head in the obshistory
255 255 #
256 256 # XXX we are ignoring all issues in regard with cycle for now.
257 257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 258 stack.sort()
259 259 # nodes already stacked
260 260 seennodes = set(stack)
261 261 while stack:
262 262 current = stack.pop()
263 263 # fetch precursors markers
264 264 markers = list(precursorsmarkers.get(current, ()))
265 265 # extend the list with prune markers
266 266 for mark in successormarkers.get(current, ()):
267 267 if not mark[1]:
268 268 markers.append(mark)
269 269 # and markers from children (looking for prune)
270 270 for mark in childrenmarkers.get(current, ()):
271 271 if not mark[1]:
272 272 markers.append(mark)
273 273 # traverse the markers
274 274 for mark in markers:
275 275 if mark in exclmarkers:
276 276 # markers already selected
277 277 continue
278 278
279 279 # If the markers is about the current node, select it
280 280 #
281 281 # (this delay the addition of markers from children)
282 282 if mark[1] or mark[0] == current:
283 283 exclmarkers.add(mark)
284 284
285 285 # should we keep traversing through the precursors?
286 286 prec = mark[0]
287 287
288 288 # nodes in the stack or already processed
289 289 if prec in seennodes:
290 290 continue
291 291
292 292 # is this a locally known node ?
293 293 known = prec in nm
294 294 # if locally-known and not in the <nodes> set the traversal
295 295 # stop here.
296 296 if known and prec not in nodes:
297 297 continue
298 298
299 299 # do not keep going if there are unselected markers pointing to this
300 300 # nodes. If we end up traversing these unselected markers later the
301 301 # node will be taken care of at that point.
302 302 precmarkers = _filterprunes(successormarkers.get(prec))
303 303 if precmarkers.issubset(exclmarkers):
304 304 seennodes.add(prec)
305 305 stack.append(prec)
306 306
307 307 return exclmarkers
308 308
309 309 def foreground(repo, nodes):
310 310 """return all nodes in the "foreground" of other node
311 311
312 312 The foreground of a revision is anything reachable using parent -> children
313 313 or precursor -> successor relation. It is very similar to "descendant" but
314 314 augmented with obsolescence information.
315 315
316 316 Beware that possible obsolescence cycle may result if complex situation.
317 317 """
318 318 repo = repo.unfiltered()
319 319 foreground = set(repo.set('%ln::', nodes))
320 320 if repo.obsstore:
321 321 # We only need this complicated logic if there is obsolescence
322 322 # XXX will probably deserve an optimised revset.
323 323 nm = repo.changelog.nodemap
324 324 plen = -1
325 325 # compute the whole set of successors or descendants
326 326 while len(foreground) != plen:
327 327 plen = len(foreground)
328 328 succs = set(c.node() for c in foreground)
329 329 mutable = [c.node() for c in foreground if c.mutable()]
330 330 succs.update(allsuccessors(repo.obsstore, mutable))
331 331 known = (n for n in succs if n in nm)
332 332 foreground = set(repo.set('%ln::', known))
333 333 return set(c.node() for c in foreground)
334 334
335 335 # effectflag field
336 336 #
337 337 # Effect-flag is a 1-byte bit field used to store what changed between a
338 338 # changeset and its successor(s).
339 339 #
340 340 # The effect flag is stored in obs-markers metadata while we iterate on the
341 341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 342 # with an incompatible design for effect flag, we can store a new design under
343 343 # another field name so we don't break readers. We plan to extend the existing
344 344 # obsmarkers bit-field when the effect flag design will be stabilized.
345 345 #
346 346 # The effect-flag is placed behind an experimental flag
347 347 # `effect-flags` set to off by default.
348 348 #
349 349
350 350 EFFECTFLAGFIELD = "ef1"
351 351
352 352 DESCCHANGED = 1 << 0 # action changed the description
353 353 METACHANGED = 1 << 1 # action change the meta
354 354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 355 PARENTCHANGED = 1 << 2 # action change the parent
356 356 USERCHANGED = 1 << 4 # the user changed
357 357 DATECHANGED = 1 << 5 # the date changed
358 358 BRANCHCHANGED = 1 << 6 # the branch changed
359 359
360 360 METABLACKLIST = [
361 361 re.compile('^branch$'),
362 362 re.compile('^.*-source$'),
363 363 re.compile('^.*_source$'),
364 364 re.compile('^source$'),
365 365 ]
366 366
367 367 def metanotblacklisted(metaitem):
368 368 """ Check that the key of a meta item (extrakey, extravalue) does not
369 369 match at least one of the blacklist pattern
370 370 """
371 371 metakey = metaitem[0]
372 372
373 373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374 374
375 375 def _prepare_hunk(hunk):
376 376 """Drop all information but the username and patch"""
377 377 cleanhunk = []
378 378 for line in hunk.splitlines():
379 379 if line.startswith(b'# User') or not line.startswith(b'#'):
380 380 if line.startswith(b'@@'):
381 381 line = b'@@\n'
382 382 cleanhunk.append(line)
383 383 return cleanhunk
384 384
385 385 def _getdifflines(iterdiff):
386 386 """return a cleaned up lines"""
387 387 lines = next(iterdiff, None)
388 388
389 389 if lines is None:
390 390 return lines
391 391
392 392 return _prepare_hunk(lines)
393 393
394 394 def _cmpdiff(leftctx, rightctx):
395 395 """return True if both ctx introduce the "same diff"
396 396
397 397 This is a first and basic implementation, with many shortcoming.
398 398 """
399 399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 400 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 401 # with an unfiltered repository to safely compute the diff
402 402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 403 leftdiff = leftunfi.diff(opts=diffopts)
404 404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 405 rightdiff = rightunfi.diff(opts=diffopts)
406 406
407 407 left, right = (0, 0)
408 408 while None not in (left, right):
409 409 left = _getdifflines(leftdiff)
410 410 right = _getdifflines(rightdiff)
411 411
412 412 if left != right:
413 413 return False
414 414 return True
415 415
416 def geteffectflag(relation):
416 def geteffectflag(source, successors):
417 417 """ From an obs-marker relation, compute what changed between the
418 418 predecessor and the successor.
419 419 """
420 420 effects = 0
421 421
422 source = relation[0]
423
424 for changectx in relation[1]:
422 for changectx in successors:
425 423 # Check if description has changed
426 424 if changectx.description() != source.description():
427 425 effects |= DESCCHANGED
428 426
429 427 # Check if user has changed
430 428 if changectx.user() != source.user():
431 429 effects |= USERCHANGED
432 430
433 431 # Check if date has changed
434 432 if changectx.date() != source.date():
435 433 effects |= DATECHANGED
436 434
437 435 # Check if branch has changed
438 436 if changectx.branch() != source.branch():
439 437 effects |= BRANCHCHANGED
440 438
441 439 # Check if at least one of the parent has changed
442 440 if changectx.parents() != source.parents():
443 441 effects |= PARENTCHANGED
444 442
445 443 # Check if other meta has changed
446 444 changeextra = changectx.extra().items()
447 445 ctxmeta = list(filter(metanotblacklisted, changeextra))
448 446
449 447 sourceextra = source.extra().items()
450 448 srcmeta = list(filter(metanotblacklisted, sourceextra))
451 449
452 450 if ctxmeta != srcmeta:
453 451 effects |= METACHANGED
454 452
455 453 # Check if the diff has changed
456 454 if not _cmpdiff(source, changectx):
457 455 effects |= DIFFCHANGED
458 456
459 457 return effects
460 458
461 459 def getobsoleted(repo, tr):
462 460 """return the set of pre-existing revisions obsoleted by a transaction"""
463 461 torev = repo.unfiltered().changelog.nodemap.get
464 462 phase = repo._phasecache.phase
465 463 succsmarkers = repo.obsstore.successors.get
466 464 public = phases.public
467 465 addedmarkers = tr.changes['obsmarkers']
468 466 origrepolen = tr.changes['origrepolen']
469 467 seenrevs = set()
470 468 obsoleted = set()
471 469 for mark in addedmarkers:
472 470 node = mark[0]
473 471 rev = torev(node)
474 472 if rev is None or rev in seenrevs or rev >= origrepolen:
475 473 continue
476 474 seenrevs.add(rev)
477 475 if phase(repo, rev) == public:
478 476 continue
479 477 if set(succsmarkers(node) or []).issubset(addedmarkers):
480 478 obsoleted.add(rev)
481 479 return obsoleted
482 480
483 481 class _succs(list):
484 482 """small class to represent a successors with some metadata about it"""
485 483
486 484 def __init__(self, *args, **kwargs):
487 485 super(_succs, self).__init__(*args, **kwargs)
488 486 self.markers = set()
489 487
490 488 def copy(self):
491 489 new = _succs(self)
492 490 new.markers = self.markers.copy()
493 491 return new
494 492
495 493 @util.propertycache
496 494 def _set(self):
497 495 # immutable
498 496 return set(self)
499 497
500 498 def canmerge(self, other):
501 499 return self._set.issubset(other._set)
502 500
503 501 def successorssets(repo, initialnode, closest=False, cache=None):
504 502 """Return set of all latest successors of initial nodes
505 503
506 504 The successors set of a changeset A are the group of revisions that succeed
507 505 A. It succeeds A as a consistent whole, each revision being only a partial
508 506 replacement. By default, the successors set contains non-obsolete
509 507 changesets only, walking the obsolescence graph until reaching a leaf. If
510 508 'closest' is set to True, closest successors-sets are return (the
511 509 obsolescence walk stops on known changesets).
512 510
513 511 This function returns the full list of successor sets which is why it
514 512 returns a list of tuples and not just a single tuple. Each tuple is a valid
515 513 successors set. Note that (A,) may be a valid successors set for changeset A
516 514 (see below).
517 515
518 516 In most cases, a changeset A will have a single element (e.g. the changeset
519 517 A is replaced by A') in its successors set. Though, it is also common for a
520 518 changeset A to have no elements in its successor set (e.g. the changeset
521 519 has been pruned). Therefore, the returned list of successors sets will be
522 520 [(A',)] or [], respectively.
523 521
524 522 When a changeset A is split into A' and B', however, it will result in a
525 523 successors set containing more than a single element, i.e. [(A',B')].
526 524 Divergent changesets will result in multiple successors sets, i.e. [(A',),
527 525 (A'')].
528 526
529 527 If a changeset A is not obsolete, then it will conceptually have no
530 528 successors set. To distinguish this from a pruned changeset, the successor
531 529 set will contain itself only, i.e. [(A,)].
532 530
533 531 Finally, final successors unknown locally are considered to be pruned
534 532 (pruned: obsoleted without any successors). (Final: successors not affected
535 533 by markers).
536 534
537 535 The 'closest' mode respect the repoview filtering. For example, without
538 536 filter it will stop at the first locally known changeset, with 'visible'
539 537 filter it will stop on visible changesets).
540 538
541 539 The optional `cache` parameter is a dictionary that may contains
542 540 precomputed successors sets. It is meant to reuse the computation of a
543 541 previous call to `successorssets` when multiple calls are made at the same
544 542 time. The cache dictionary is updated in place. The caller is responsible
545 543 for its life span. Code that makes multiple calls to `successorssets`
546 544 *should* use this cache mechanism or risk a performance hit.
547 545
548 546 Since results are different depending of the 'closest' most, the same cache
549 547 cannot be reused for both mode.
550 548 """
551 549
552 550 succmarkers = repo.obsstore.successors
553 551
554 552 # Stack of nodes we search successors sets for
555 553 toproceed = [initialnode]
556 554 # set version of above list for fast loop detection
557 555 # element added to "toproceed" must be added here
558 556 stackedset = set(toproceed)
559 557 if cache is None:
560 558 cache = {}
561 559
562 560 # This while loop is the flattened version of a recursive search for
563 561 # successors sets
564 562 #
565 563 # def successorssets(x):
566 564 # successors = directsuccessors(x)
567 565 # ss = [[]]
568 566 # for succ in directsuccessors(x):
569 567 # # product as in itertools cartesian product
570 568 # ss = product(ss, successorssets(succ))
571 569 # return ss
572 570 #
573 571 # But we can not use plain recursive calls here:
574 572 # - that would blow the python call stack
575 573 # - obsolescence markers may have cycles, we need to handle them.
576 574 #
577 575 # The `toproceed` list act as our call stack. Every node we search
578 576 # successors set for are stacked there.
579 577 #
580 578 # The `stackedset` is set version of this stack used to check if a node is
581 579 # already stacked. This check is used to detect cycles and prevent infinite
582 580 # loop.
583 581 #
584 582 # successors set of all nodes are stored in the `cache` dictionary.
585 583 #
586 584 # After this while loop ends we use the cache to return the successors sets
587 585 # for the node requested by the caller.
588 586 while toproceed:
589 587 # Every iteration tries to compute the successors sets of the topmost
590 588 # node of the stack: CURRENT.
591 589 #
592 590 # There are four possible outcomes:
593 591 #
594 592 # 1) We already know the successors sets of CURRENT:
595 593 # -> mission accomplished, pop it from the stack.
596 594 # 2) Stop the walk:
597 595 # default case: Node is not obsolete
598 596 # closest case: Node is known at this repo filter level
599 597 # -> the node is its own successors sets. Add it to the cache.
600 598 # 3) We do not know successors set of direct successors of CURRENT:
601 599 # -> We add those successors to the stack.
602 600 # 4) We know successors sets of all direct successors of CURRENT:
603 601 # -> We can compute CURRENT successors set and add it to the
604 602 # cache.
605 603 #
606 604 current = toproceed[-1]
607 605
608 606 # case 2 condition is a bit hairy because of closest,
609 607 # we compute it on its own
610 608 case2condition = ((current not in succmarkers)
611 609 or (closest and current != initialnode
612 610 and current in repo))
613 611
614 612 if current in cache:
615 613 # case (1): We already know the successors sets
616 614 stackedset.remove(toproceed.pop())
617 615 elif case2condition:
618 616 # case (2): end of walk.
619 617 if current in repo:
620 618 # We have a valid successors.
621 619 cache[current] = [_succs((current,))]
622 620 else:
623 621 # Final obsolete version is unknown locally.
624 622 # Do not count that as a valid successors
625 623 cache[current] = []
626 624 else:
627 625 # cases (3) and (4)
628 626 #
629 627 # We proceed in two phases. Phase 1 aims to distinguish case (3)
630 628 # from case (4):
631 629 #
632 630 # For each direct successors of CURRENT, we check whether its
633 631 # successors sets are known. If they are not, we stack the
634 632 # unknown node and proceed to the next iteration of the while
635 633 # loop. (case 3)
636 634 #
637 635 # During this step, we may detect obsolescence cycles: a node
638 636 # with unknown successors sets but already in the call stack.
639 637 # In such a situation, we arbitrary set the successors sets of
640 638 # the node to nothing (node pruned) to break the cycle.
641 639 #
642 640 # If no break was encountered we proceed to phase 2.
643 641 #
644 642 # Phase 2 computes successors sets of CURRENT (case 4); see details
645 643 # in phase 2 itself.
646 644 #
647 645 # Note the two levels of iteration in each phase.
648 646 # - The first one handles obsolescence markers using CURRENT as
649 647 # precursor (successors markers of CURRENT).
650 648 #
651 649 # Having multiple entry here means divergence.
652 650 #
653 651 # - The second one handles successors defined in each marker.
654 652 #
655 653 # Having none means pruned node, multiple successors means split,
656 654 # single successors are standard replacement.
657 655 #
658 656 for mark in sorted(succmarkers[current]):
659 657 for suc in mark[1]:
660 658 if suc not in cache:
661 659 if suc in stackedset:
662 660 # cycle breaking
663 661 cache[suc] = []
664 662 else:
665 663 # case (3) If we have not computed successors sets
666 664 # of one of those successors we add it to the
667 665 # `toproceed` stack and stop all work for this
668 666 # iteration.
669 667 toproceed.append(suc)
670 668 stackedset.add(suc)
671 669 break
672 670 else:
673 671 continue
674 672 break
675 673 else:
676 674 # case (4): we know all successors sets of all direct
677 675 # successors
678 676 #
679 677 # Successors set contributed by each marker depends on the
680 678 # successors sets of all its "successors" node.
681 679 #
682 680 # Each different marker is a divergence in the obsolescence
683 681 # history. It contributes successors sets distinct from other
684 682 # markers.
685 683 #
686 684 # Within a marker, a successor may have divergent successors
687 685 # sets. In such a case, the marker will contribute multiple
688 686 # divergent successors sets. If multiple successors have
689 687 # divergent successors sets, a Cartesian product is used.
690 688 #
691 689 # At the end we post-process successors sets to remove
692 690 # duplicated entry and successors set that are strict subset of
693 691 # another one.
694 692 succssets = []
695 693 for mark in sorted(succmarkers[current]):
696 694 # successors sets contributed by this marker
697 695 base = _succs()
698 696 base.markers.add(mark)
699 697 markss = [base]
700 698 for suc in mark[1]:
701 699 # cardinal product with previous successors
702 700 productresult = []
703 701 for prefix in markss:
704 702 for suffix in cache[suc]:
705 703 newss = prefix.copy()
706 704 newss.markers.update(suffix.markers)
707 705 for part in suffix:
708 706 # do not duplicated entry in successors set
709 707 # first entry wins.
710 708 if part not in newss:
711 709 newss.append(part)
712 710 productresult.append(newss)
713 711 markss = productresult
714 712 succssets.extend(markss)
715 713 # remove duplicated and subset
716 714 seen = []
717 715 final = []
718 716 candidates = sorted((s for s in succssets if s),
719 717 key=len, reverse=True)
720 718 for cand in candidates:
721 719 for seensuccs in seen:
722 720 if cand.canmerge(seensuccs):
723 721 seensuccs.markers.update(cand.markers)
724 722 break
725 723 else:
726 724 final.append(cand)
727 725 seen.append(cand)
728 726 final.reverse() # put small successors set first
729 727 cache[current] = final
730 728 return cache[initialnode]
731 729
732 730 def successorsandmarkers(repo, ctx):
733 731 """compute the raw data needed for computing obsfate
734 732 Returns a list of dict, one dict per successors set
735 733 """
736 734 if not ctx.obsolete():
737 735 return None
738 736
739 737 ssets = successorssets(repo, ctx.node(), closest=True)
740 738
741 739 # closestsuccessors returns an empty list for pruned revisions, remap it
742 740 # into a list containing an empty list for future processing
743 741 if ssets == []:
744 742 ssets = [[]]
745 743
746 744 # Try to recover pruned markers
747 745 succsmap = repo.obsstore.successors
748 746 fullsuccessorsets = [] # successor set + markers
749 747 for sset in ssets:
750 748 if sset:
751 749 fullsuccessorsets.append(sset)
752 750 else:
753 751 # successorsset return an empty set() when ctx or one of its
754 752 # successors is pruned.
755 753 # In this case, walk the obs-markers tree again starting with ctx
756 754 # and find the relevant pruning obs-makers, the ones without
757 755 # successors.
758 756 # Having these markers allow us to compute some information about
759 757 # its fate, like who pruned this changeset and when.
760 758
761 759 # XXX we do not catch all prune markers (eg rewritten then pruned)
762 760 # (fix me later)
763 761 foundany = False
764 762 for mark in succsmap.get(ctx.node(), ()):
765 763 if not mark[1]:
766 764 foundany = True
767 765 sset = _succs()
768 766 sset.markers.add(mark)
769 767 fullsuccessorsets.append(sset)
770 768 if not foundany:
771 769 fullsuccessorsets.append(_succs())
772 770
773 771 values = []
774 772 for sset in fullsuccessorsets:
775 773 values.append({'successors': sset, 'markers': sset.markers})
776 774
777 775 return values
778 776
779 777 def _getobsfate(successorssets):
780 778 """ Compute a changeset obsolescence fate based on its successorssets.
781 779 Successors can be the tipmost ones or the immediate ones. This function
782 780 return values are not meant to be shown directly to users, it is meant to
783 781 be used by internal functions only.
784 782 Returns one fate from the following values:
785 783 - pruned
786 784 - diverged
787 785 - superseded
788 786 - superseded_split
789 787 """
790 788
791 789 if len(successorssets) == 0:
792 790 # The commit has been pruned
793 791 return 'pruned'
794 792 elif len(successorssets) > 1:
795 793 return 'diverged'
796 794 else:
797 795 # No divergence, only one set of successors
798 796 successors = successorssets[0]
799 797
800 798 if len(successors) == 1:
801 799 return 'superseded'
802 800 else:
803 801 return 'superseded_split'
804 802
805 803 def obsfateverb(successorset, markers):
806 804 """ Return the verb summarizing the successorset and potentially using
807 805 information from the markers
808 806 """
809 807 if not successorset:
810 808 verb = 'pruned'
811 809 elif len(successorset) == 1:
812 810 verb = 'rewritten'
813 811 else:
814 812 verb = 'split'
815 813 return verb
816 814
817 815 def markersdates(markers):
818 816 """returns the list of dates for a list of markers
819 817 """
820 818 return [m[4] for m in markers]
821 819
822 820 def markersusers(markers):
823 821 """ Returns a sorted list of markers users without duplicates
824 822 """
825 823 markersmeta = [dict(m[3]) for m in markers]
826 824 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
827 825 if meta.get('user'))
828 826
829 827 return sorted(users)
830 828
831 829 def markersoperations(markers):
832 830 """ Returns a sorted list of markers operations without duplicates
833 831 """
834 832 markersmeta = [dict(m[3]) for m in markers]
835 833 operations = set(meta.get('operation') for meta in markersmeta
836 834 if meta.get('operation'))
837 835
838 836 return sorted(operations)
839 837
840 838 def obsfateprinter(ui, repo, successors, markers, formatctx):
841 839 """ Build a obsfate string for a single successorset using all obsfate
842 840 related function defined in obsutil
843 841 """
844 842 quiet = ui.quiet
845 843 verbose = ui.verbose
846 844 normal = not verbose and not quiet
847 845
848 846 line = []
849 847
850 848 # Verb
851 849 line.append(obsfateverb(successors, markers))
852 850
853 851 # Operations
854 852 operations = markersoperations(markers)
855 853 if operations:
856 854 line.append(" using %s" % ", ".join(operations))
857 855
858 856 # Successors
859 857 if successors:
860 858 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
861 859 line.append(" as %s" % ", ".join(fmtsuccessors))
862 860
863 861 # Users
864 862 users = markersusers(markers)
865 863 # Filter out current user in not verbose mode to reduce amount of
866 864 # information
867 865 if not verbose:
868 866 currentuser = ui.username(acceptempty=True)
869 867 if len(users) == 1 and currentuser in users:
870 868 users = None
871 869
872 870 if (verbose or normal) and users:
873 871 line.append(" by %s" % ", ".join(users))
874 872
875 873 # Date
876 874 dates = markersdates(markers)
877 875
878 876 if dates and verbose:
879 877 min_date = min(dates)
880 878 max_date = max(dates)
881 879
882 880 if min_date == max_date:
883 881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 882 line.append(" (at %s)" % fmtmin_date)
885 883 else:
886 884 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
887 885 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
888 886 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
889 887
890 888 return "".join(line)
891 889
892 890
893 891 filteredmsgtable = {
894 892 "pruned": _("hidden revision '%s' is pruned"),
895 893 "diverged": _("hidden revision '%s' has diverged"),
896 894 "superseded": _("hidden revision '%s' was rewritten as: %s"),
897 895 "superseded_split": _("hidden revision '%s' was split as: %s"),
898 896 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
899 897 "%d more"),
900 898 }
901 899
902 900 def _getfilteredreason(repo, changeid, ctx):
903 901 """return a human-friendly string on why a obsolete changeset is hidden
904 902 """
905 903 successors = successorssets(repo, ctx.node())
906 904 fate = _getobsfate(successors)
907 905
908 906 # Be more precise in case the revision is superseded
909 907 if fate == 'pruned':
910 908 return filteredmsgtable['pruned'] % changeid
911 909 elif fate == 'diverged':
912 910 return filteredmsgtable['diverged'] % changeid
913 911 elif fate == 'superseded':
914 912 single_successor = nodemod.short(successors[0][0])
915 913 return filteredmsgtable['superseded'] % (changeid, single_successor)
916 914 elif fate == 'superseded_split':
917 915
918 916 succs = []
919 917 for node_id in successors[0]:
920 918 succs.append(nodemod.short(node_id))
921 919
922 920 if len(succs) <= 2:
923 921 fmtsuccs = ', '.join(succs)
924 922 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
925 923 else:
926 924 firstsuccessors = ', '.join(succs[:2])
927 925 remainingnumber = len(succs) - 2
928 926
929 927 args = (changeid, firstsuccessors, remainingnumber)
930 928 return filteredmsgtable['superseded_split_several'] % args
931 929
932 930 def divergentsets(repo, ctx):
933 931 """Compute sets of commits divergent with a given one"""
934 932 cache = {}
935 933 base = {}
936 934 for n in allpredecessors(repo.obsstore, [ctx.node()]):
937 935 if n == ctx.node():
938 936 # a node can't be a base for divergence with itself
939 937 continue
940 938 nsuccsets = successorssets(repo, n, cache)
941 939 for nsuccset in nsuccsets:
942 940 if ctx.node() in nsuccset:
943 941 # we are only interested in *other* successor sets
944 942 continue
945 943 if tuple(nsuccset) in base:
946 944 # we already know the latest base for this divergency
947 945 continue
948 946 base[tuple(nsuccset)] = n
949 947 return [{'divergentnodes': divset, 'commonpredecessor': b}
950 948 for divset, b in base.iteritems()]
951 949
952 950 def whyunstable(repo, ctx):
953 951 result = []
954 952 if ctx.orphan():
955 953 for parent in ctx.parents():
956 954 kind = None
957 955 if parent.orphan():
958 956 kind = 'orphan'
959 957 elif parent.obsolete():
960 958 kind = 'obsolete'
961 959 if kind is not None:
962 960 result.append({'instability': 'orphan',
963 961 'reason': '%s parent' % kind,
964 962 'node': parent.hex()})
965 963 if ctx.phasedivergent():
966 964 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
967 965 ignoreflags=bumpedfix)
968 966 immutable = [repo[p] for p in predecessors
969 967 if p in repo and not repo[p].mutable()]
970 968 for predecessor in immutable:
971 969 result.append({'instability': 'phase-divergent',
972 970 'reason': 'immutable predecessor',
973 971 'node': predecessor.hex()})
974 972 if ctx.contentdivergent():
975 973 dsets = divergentsets(repo, ctx)
976 974 for dset in dsets:
977 975 divnodes = [repo[n] for n in dset['divergentnodes']]
978 976 result.append({'instability': 'content-divergent',
979 977 'divergentnodes': divnodes,
980 978 'reason': 'predecessor',
981 979 'node': nodemod.hex(dset['commonpredecessor'])})
982 980 return result
General Comments 0
You need to be logged in to leave comments. Login now