##// END OF EJS Templates
obsolete: move marker flags to obsutil...
av6 -
r36971:b9bbcf9f default
parent child Browse files
Show More
@@ -1,1042 +1,1012 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84 from .utils import dateutil
85 85
86 86 parsers = policy.importmod(r'parsers')
87 87
88 88 _pack = struct.pack
89 89 _unpack = struct.unpack
90 90 _calcsize = struct.calcsize
91 91 propertycache = util.propertycache
92 92
93 93 # the obsolete feature is not mature enough to be enabled by default.
94 94 # you have to rely on third party extension extension to enable this.
95 95 _enabled = False
96 96
97 97 # Options for obsolescence
98 98 createmarkersopt = 'createmarkers'
99 99 allowunstableopt = 'allowunstable'
100 100 exchangeopt = 'exchange'
101 101
102 102 def _getoptionvalue(repo, option):
103 103 """Returns True if the given repository has the given obsolete option
104 104 enabled.
105 105 """
106 106 configkey = 'evolution.%s' % option
107 107 newconfig = repo.ui.configbool('experimental', configkey)
108 108
109 109 # Return the value only if defined
110 110 if newconfig is not None:
111 111 return newconfig
112 112
113 113 # Fallback on generic option
114 114 try:
115 115 return repo.ui.configbool('experimental', 'evolution')
116 116 except (error.ConfigError, AttributeError):
117 117 # Fallback on old-fashion config
118 118 # inconsistent config: experimental.evolution
119 119 result = set(repo.ui.configlist('experimental', 'evolution'))
120 120
121 121 if 'all' in result:
122 122 return True
123 123
124 124 # For migration purposes, temporarily return true if the config hasn't
125 125 # been set but _enabled is true.
126 126 if len(result) == 0 and _enabled:
127 127 return True
128 128
129 129 # Temporary hack for next check
130 130 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
131 131 if newconfig:
132 132 result.add('createmarkers')
133 133
134 134 return option in result
135 135
136 136 def isenabled(repo, option):
137 137 """Returns True if the given repository has the given obsolete option
138 138 enabled.
139 139 """
140 140 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
141 141 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
142 142 exchangevalue = _getoptionvalue(repo, exchangeopt)
143 143
144 144 # createmarkers must be enabled if other options are enabled
145 145 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
146 146 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
147 147 "if other obsolete options are enabled"))
148 148
149 149 return _getoptionvalue(repo, option)
150 150
151 ### obsolescence marker flag
152
153 ## bumpedfix flag
154 #
155 # When a changeset A' succeed to a changeset A which became public, we call A'
156 # "bumped" because it's a successors of a public changesets
157 #
158 # o A' (bumped)
159 # |`:
160 # | o A
161 # |/
162 # o Z
163 #
164 # The way to solve this situation is to create a new changeset Ad as children
165 # of A. This changeset have the same content than A'. So the diff from A to A'
166 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
167 #
168 # o Ad
169 # |`:
170 # | x A'
171 # |'|
172 # o | A
173 # |/
174 # o Z
175 #
176 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
177 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
178 # This flag mean that the successors express the changes between the public and
179 # bumped version and fix the situation, breaking the transitivity of
180 # "bumped" here.
181 bumpedfix = 1
182 usingsha256 = 2
151 bumpedfix = obsutil.bumpedfix
152 usingsha256 = obsutil.usingsha256
183 153
184 154 ## Parsing and writing of version "0"
185 155 #
186 156 # The header is followed by the markers. Each marker is made of:
187 157 #
188 158 # - 1 uint8 : number of new changesets "N", can be zero.
189 159 #
190 160 # - 1 uint32: metadata size "M" in bytes.
191 161 #
192 162 # - 1 byte: a bit field. It is reserved for flags used in common
193 163 # obsolete marker operations, to avoid repeated decoding of metadata
194 164 # entries.
195 165 #
196 166 # - 20 bytes: obsoleted changeset identifier.
197 167 #
198 168 # - N*20 bytes: new changesets identifiers.
199 169 #
200 170 # - M bytes: metadata as a sequence of nul-terminated strings. Each
201 171 # string contains a key and a value, separated by a colon ':', without
202 172 # additional encoding. Keys cannot contain '\0' or ':' and values
203 173 # cannot contain '\0'.
204 174 _fm0version = 0
205 175 _fm0fixed = '>BIB20s'
206 176 _fm0node = '20s'
207 177 _fm0fsize = _calcsize(_fm0fixed)
208 178 _fm0fnodesize = _calcsize(_fm0node)
209 179
210 180 def _fm0readmarkers(data, off, stop):
211 181 # Loop on markers
212 182 while off < stop:
213 183 # read fixed part
214 184 cur = data[off:off + _fm0fsize]
215 185 off += _fm0fsize
216 186 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
217 187 # read replacement
218 188 sucs = ()
219 189 if numsuc:
220 190 s = (_fm0fnodesize * numsuc)
221 191 cur = data[off:off + s]
222 192 sucs = _unpack(_fm0node * numsuc, cur)
223 193 off += s
224 194 # read metadata
225 195 # (metadata will be decoded on demand)
226 196 metadata = data[off:off + mdsize]
227 197 if len(metadata) != mdsize:
228 198 raise error.Abort(_('parsing obsolete marker: metadata is too '
229 199 'short, %d bytes expected, got %d')
230 200 % (mdsize, len(metadata)))
231 201 off += mdsize
232 202 metadata = _fm0decodemeta(metadata)
233 203 try:
234 204 when, offset = metadata.pop('date', '0 0').split(' ')
235 205 date = float(when), int(offset)
236 206 except ValueError:
237 207 date = (0., 0)
238 208 parents = None
239 209 if 'p2' in metadata:
240 210 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
241 211 elif 'p1' in metadata:
242 212 parents = (metadata.pop('p1', None),)
243 213 elif 'p0' in metadata:
244 214 parents = ()
245 215 if parents is not None:
246 216 try:
247 217 parents = tuple(node.bin(p) for p in parents)
248 218 # if parent content is not a nodeid, drop the data
249 219 for p in parents:
250 220 if len(p) != 20:
251 221 parents = None
252 222 break
253 223 except TypeError:
254 224 # if content cannot be translated to nodeid drop the data.
255 225 parents = None
256 226
257 227 metadata = tuple(sorted(metadata.iteritems()))
258 228
259 229 yield (pre, sucs, flags, metadata, date, parents)
260 230
261 231 def _fm0encodeonemarker(marker):
262 232 pre, sucs, flags, metadata, date, parents = marker
263 233 if flags & usingsha256:
264 234 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
265 235 metadata = dict(metadata)
266 236 time, tz = date
267 237 metadata['date'] = '%r %i' % (time, tz)
268 238 if parents is not None:
269 239 if not parents:
270 240 # mark that we explicitly recorded no parents
271 241 metadata['p0'] = ''
272 242 for i, p in enumerate(parents, 1):
273 243 metadata['p%i' % i] = node.hex(p)
274 244 metadata = _fm0encodemeta(metadata)
275 245 numsuc = len(sucs)
276 246 format = _fm0fixed + (_fm0node * numsuc)
277 247 data = [numsuc, len(metadata), flags, pre]
278 248 data.extend(sucs)
279 249 return _pack(format, *data) + metadata
280 250
281 251 def _fm0encodemeta(meta):
282 252 """Return encoded metadata string to string mapping.
283 253
284 254 Assume no ':' in key and no '\0' in both key and value."""
285 255 for key, value in meta.iteritems():
286 256 if ':' in key or '\0' in key:
287 257 raise ValueError("':' and '\0' are forbidden in metadata key'")
288 258 if '\0' in value:
289 259 raise ValueError("':' is forbidden in metadata value'")
290 260 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
291 261
292 262 def _fm0decodemeta(data):
293 263 """Return string to string dictionary from encoded version."""
294 264 d = {}
295 265 for l in data.split('\0'):
296 266 if l:
297 267 key, value = l.split(':')
298 268 d[key] = value
299 269 return d
300 270
301 271 ## Parsing and writing of version "1"
302 272 #
303 273 # The header is followed by the markers. Each marker is made of:
304 274 #
305 275 # - uint32: total size of the marker (including this field)
306 276 #
307 277 # - float64: date in seconds since epoch
308 278 #
309 279 # - int16: timezone offset in minutes
310 280 #
311 281 # - uint16: a bit field. It is reserved for flags used in common
312 282 # obsolete marker operations, to avoid repeated decoding of metadata
313 283 # entries.
314 284 #
315 285 # - uint8: number of successors "N", can be zero.
316 286 #
317 287 # - uint8: number of parents "P", can be zero.
318 288 #
319 289 # 0: parents data stored but no parent,
320 290 # 1: one parent stored,
321 291 # 2: two parents stored,
322 292 # 3: no parent data stored
323 293 #
324 294 # - uint8: number of metadata entries M
325 295 #
326 296 # - 20 or 32 bytes: predecessor changeset identifier.
327 297 #
328 298 # - N*(20 or 32) bytes: successors changesets identifiers.
329 299 #
330 300 # - P*(20 or 32) bytes: parents of the predecessors changesets.
331 301 #
332 302 # - M*(uint8, uint8): size of all metadata entries (key and value)
333 303 #
334 304 # - remaining bytes: the metadata, each (key, value) pair after the other.
335 305 _fm1version = 1
336 306 _fm1fixed = '>IdhHBBB20s'
337 307 _fm1nodesha1 = '20s'
338 308 _fm1nodesha256 = '32s'
339 309 _fm1nodesha1size = _calcsize(_fm1nodesha1)
340 310 _fm1nodesha256size = _calcsize(_fm1nodesha256)
341 311 _fm1fsize = _calcsize(_fm1fixed)
342 312 _fm1parentnone = 3
343 313 _fm1parentshift = 14
344 314 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
345 315 _fm1metapair = 'BB'
346 316 _fm1metapairsize = _calcsize(_fm1metapair)
347 317
348 318 def _fm1purereadmarkers(data, off, stop):
349 319 # make some global constants local for performance
350 320 noneflag = _fm1parentnone
351 321 sha2flag = usingsha256
352 322 sha1size = _fm1nodesha1size
353 323 sha2size = _fm1nodesha256size
354 324 sha1fmt = _fm1nodesha1
355 325 sha2fmt = _fm1nodesha256
356 326 metasize = _fm1metapairsize
357 327 metafmt = _fm1metapair
358 328 fsize = _fm1fsize
359 329 unpack = _unpack
360 330
361 331 # Loop on markers
362 332 ufixed = struct.Struct(_fm1fixed).unpack
363 333
364 334 while off < stop:
365 335 # read fixed part
366 336 o1 = off + fsize
367 337 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
368 338
369 339 if flags & sha2flag:
370 340 # FIXME: prec was read as a SHA1, needs to be amended
371 341
372 342 # read 0 or more successors
373 343 if numsuc == 1:
374 344 o2 = o1 + sha2size
375 345 sucs = (data[o1:o2],)
376 346 else:
377 347 o2 = o1 + sha2size * numsuc
378 348 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
379 349
380 350 # read parents
381 351 if numpar == noneflag:
382 352 o3 = o2
383 353 parents = None
384 354 elif numpar == 1:
385 355 o3 = o2 + sha2size
386 356 parents = (data[o2:o3],)
387 357 else:
388 358 o3 = o2 + sha2size * numpar
389 359 parents = unpack(sha2fmt * numpar, data[o2:o3])
390 360 else:
391 361 # read 0 or more successors
392 362 if numsuc == 1:
393 363 o2 = o1 + sha1size
394 364 sucs = (data[o1:o2],)
395 365 else:
396 366 o2 = o1 + sha1size * numsuc
397 367 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
398 368
399 369 # read parents
400 370 if numpar == noneflag:
401 371 o3 = o2
402 372 parents = None
403 373 elif numpar == 1:
404 374 o3 = o2 + sha1size
405 375 parents = (data[o2:o3],)
406 376 else:
407 377 o3 = o2 + sha1size * numpar
408 378 parents = unpack(sha1fmt * numpar, data[o2:o3])
409 379
410 380 # read metadata
411 381 off = o3 + metasize * nummeta
412 382 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
413 383 metadata = []
414 384 for idx in xrange(0, len(metapairsize), 2):
415 385 o1 = off + metapairsize[idx]
416 386 o2 = o1 + metapairsize[idx + 1]
417 387 metadata.append((data[off:o1], data[o1:o2]))
418 388 off = o2
419 389
420 390 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
421 391
422 392 def _fm1encodeonemarker(marker):
423 393 pre, sucs, flags, metadata, date, parents = marker
424 394 # determine node size
425 395 _fm1node = _fm1nodesha1
426 396 if flags & usingsha256:
427 397 _fm1node = _fm1nodesha256
428 398 numsuc = len(sucs)
429 399 numextranodes = numsuc
430 400 if parents is None:
431 401 numpar = _fm1parentnone
432 402 else:
433 403 numpar = len(parents)
434 404 numextranodes += numpar
435 405 formatnodes = _fm1node * numextranodes
436 406 formatmeta = _fm1metapair * len(metadata)
437 407 format = _fm1fixed + formatnodes + formatmeta
438 408 # tz is stored in minutes so we divide by 60
439 409 tz = date[1]//60
440 410 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
441 411 data.extend(sucs)
442 412 if parents is not None:
443 413 data.extend(parents)
444 414 totalsize = _calcsize(format)
445 415 for key, value in metadata:
446 416 lk = len(key)
447 417 lv = len(value)
448 418 if lk > 255:
449 419 msg = ('obsstore metadata key cannot be longer than 255 bytes'
450 420 ' (key "%s" is %u bytes)') % (key, lk)
451 421 raise error.ProgrammingError(msg)
452 422 if lv > 255:
453 423 msg = ('obsstore metadata value cannot be longer than 255 bytes'
454 424 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
455 425 raise error.ProgrammingError(msg)
456 426 data.append(lk)
457 427 data.append(lv)
458 428 totalsize += lk + lv
459 429 data[0] = totalsize
460 430 data = [_pack(format, *data)]
461 431 for key, value in metadata:
462 432 data.append(key)
463 433 data.append(value)
464 434 return ''.join(data)
465 435
466 436 def _fm1readmarkers(data, off, stop):
467 437 native = getattr(parsers, 'fm1readmarkers', None)
468 438 if not native:
469 439 return _fm1purereadmarkers(data, off, stop)
470 440 return native(data, off, stop)
471 441
472 442 # mapping to read/write various marker formats
473 443 # <version> -> (decoder, encoder)
474 444 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
475 445 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
476 446
477 447 def _readmarkerversion(data):
478 448 return _unpack('>B', data[0:1])[0]
479 449
480 450 @util.nogc
481 451 def _readmarkers(data, off=None, stop=None):
482 452 """Read and enumerate markers from raw data"""
483 453 diskversion = _readmarkerversion(data)
484 454 if not off:
485 455 off = 1 # skip 1 byte version number
486 456 if stop is None:
487 457 stop = len(data)
488 458 if diskversion not in formats:
489 459 msg = _('parsing obsolete marker: unknown version %r') % diskversion
490 460 raise error.UnknownVersion(msg, version=diskversion)
491 461 return diskversion, formats[diskversion][0](data, off, stop)
492 462
493 463 def encodeheader(version=_fm0version):
494 464 return _pack('>B', version)
495 465
496 466 def encodemarkers(markers, addheader=False, version=_fm0version):
497 467 # Kept separate from flushmarkers(), it will be reused for
498 468 # markers exchange.
499 469 encodeone = formats[version][1]
500 470 if addheader:
501 471 yield encodeheader(version)
502 472 for marker in markers:
503 473 yield encodeone(marker)
504 474
505 475 @util.nogc
506 476 def _addsuccessors(successors, markers):
507 477 for mark in markers:
508 478 successors.setdefault(mark[0], set()).add(mark)
509 479
510 480 @util.nogc
511 481 def _addpredecessors(predecessors, markers):
512 482 for mark in markers:
513 483 for suc in mark[1]:
514 484 predecessors.setdefault(suc, set()).add(mark)
515 485
516 486 @util.nogc
517 487 def _addchildren(children, markers):
518 488 for mark in markers:
519 489 parents = mark[5]
520 490 if parents is not None:
521 491 for p in parents:
522 492 children.setdefault(p, set()).add(mark)
523 493
524 494 def _checkinvalidmarkers(markers):
525 495 """search for marker with invalid data and raise error if needed
526 496
527 497 Exist as a separated function to allow the evolve extension for a more
528 498 subtle handling.
529 499 """
530 500 for mark in markers:
531 501 if node.nullid in mark[1]:
532 502 raise error.Abort(_('bad obsolescence marker detected: '
533 503 'invalid successors nullid'))
534 504
535 505 class obsstore(object):
536 506 """Store obsolete markers
537 507
538 508 Markers can be accessed with two mappings:
539 509 - predecessors[x] -> set(markers on predecessors edges of x)
540 510 - successors[x] -> set(markers on successors edges of x)
541 511 - children[x] -> set(markers on predecessors edges of children(x)
542 512 """
543 513
544 514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
545 515 # prec: nodeid, predecessors changesets
546 516 # succs: tuple of nodeid, successor changesets (0-N length)
547 517 # flag: integer, flag field carrying modifier for the markers (see doc)
548 518 # meta: binary blob, encoded metadata dictionary
549 519 # date: (float, int) tuple, date of marker creation
550 520 # parents: (tuple of nodeid) or None, parents of predecessors
551 521 # None is used when no data has been recorded
552 522
553 523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
554 524 # caches for various obsolescence related cache
555 525 self.caches = {}
556 526 self.svfs = svfs
557 527 self._defaultformat = defaultformat
558 528 self._readonly = readonly
559 529
560 530 def __iter__(self):
561 531 return iter(self._all)
562 532
563 533 def __len__(self):
564 534 return len(self._all)
565 535
566 536 def __nonzero__(self):
567 537 if not self._cached(r'_all'):
568 538 try:
569 539 return self.svfs.stat('obsstore').st_size > 1
570 540 except OSError as inst:
571 541 if inst.errno != errno.ENOENT:
572 542 raise
573 543 # just build an empty _all list if no obsstore exists, which
574 544 # avoids further stat() syscalls
575 545 return bool(self._all)
576 546
577 547 __bool__ = __nonzero__
578 548
579 549 @property
580 550 def readonly(self):
581 551 """True if marker creation is disabled
582 552
583 553 Remove me in the future when obsolete marker is always on."""
584 554 return self._readonly
585 555
586 556 def create(self, transaction, prec, succs=(), flag=0, parents=None,
587 557 date=None, metadata=None, ui=None):
588 558 """obsolete: add a new obsolete marker
589 559
590 560 * ensuring it is hashable
591 561 * check mandatory metadata
592 562 * encode metadata
593 563
594 564 If you are a human writing code creating marker you want to use the
595 565 `createmarkers` function in this module instead.
596 566
597 567 return True if a new marker have been added, False if the markers
598 568 already existed (no op).
599 569 """
600 570 if metadata is None:
601 571 metadata = {}
602 572 if date is None:
603 573 if 'date' in metadata:
604 574 # as a courtesy for out-of-tree extensions
605 575 date = dateutil.parsedate(metadata.pop('date'))
606 576 elif ui is not None:
607 577 date = ui.configdate('devel', 'default-date')
608 578 if date is None:
609 579 date = dateutil.makedate()
610 580 else:
611 581 date = dateutil.makedate()
612 582 if len(prec) != 20:
613 583 raise ValueError(prec)
614 584 for succ in succs:
615 585 if len(succ) != 20:
616 586 raise ValueError(succ)
617 587 if prec in succs:
618 588 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
619 589
620 590 metadata = tuple(sorted(metadata.iteritems()))
621 591
622 592 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
623 593 return bool(self.add(transaction, [marker]))
624 594
625 595 def add(self, transaction, markers):
626 596 """Add new markers to the store
627 597
628 598 Take care of filtering duplicate.
629 599 Return the number of new marker."""
630 600 if self._readonly:
631 601 raise error.Abort(_('creating obsolete markers is not enabled on '
632 602 'this repo'))
633 603 known = set()
634 604 getsuccessors = self.successors.get
635 605 new = []
636 606 for m in markers:
637 607 if m not in getsuccessors(m[0], ()) and m not in known:
638 608 known.add(m)
639 609 new.append(m)
640 610 if new:
641 611 f = self.svfs('obsstore', 'ab')
642 612 try:
643 613 offset = f.tell()
644 614 transaction.add('obsstore', offset)
645 615 # offset == 0: new file - add the version header
646 616 data = b''.join(encodemarkers(new, offset == 0, self._version))
647 617 f.write(data)
648 618 finally:
649 619 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
650 620 # call 'filecacheentry.refresh()' here
651 621 f.close()
652 622 addedmarkers = transaction.changes.get('obsmarkers')
653 623 if addedmarkers is not None:
654 624 addedmarkers.update(new)
655 625 self._addmarkers(new, data)
656 626 # new marker *may* have changed several set. invalidate the cache.
657 627 self.caches.clear()
658 628 # records the number of new markers for the transaction hooks
659 629 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
660 630 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
661 631 return len(new)
662 632
663 633 def mergemarkers(self, transaction, data):
664 634 """merge a binary stream of markers inside the obsstore
665 635
666 636 Returns the number of new markers added."""
667 637 version, markers = _readmarkers(data)
668 638 return self.add(transaction, markers)
669 639
670 640 @propertycache
671 641 def _data(self):
672 642 return self.svfs.tryread('obsstore')
673 643
674 644 @propertycache
675 645 def _version(self):
676 646 if len(self._data) >= 1:
677 647 return _readmarkerversion(self._data)
678 648 else:
679 649 return self._defaultformat
680 650
681 651 @propertycache
682 652 def _all(self):
683 653 data = self._data
684 654 if not data:
685 655 return []
686 656 self._version, markers = _readmarkers(data)
687 657 markers = list(markers)
688 658 _checkinvalidmarkers(markers)
689 659 return markers
690 660
691 661 @propertycache
692 662 def successors(self):
693 663 successors = {}
694 664 _addsuccessors(successors, self._all)
695 665 return successors
696 666
697 667 @propertycache
698 668 def predecessors(self):
699 669 predecessors = {}
700 670 _addpredecessors(predecessors, self._all)
701 671 return predecessors
702 672
703 673 @propertycache
704 674 def children(self):
705 675 children = {}
706 676 _addchildren(children, self._all)
707 677 return children
708 678
709 679 def _cached(self, attr):
710 680 return attr in self.__dict__
711 681
712 682 def _addmarkers(self, markers, rawdata):
713 683 markers = list(markers) # to allow repeated iteration
714 684 self._data = self._data + rawdata
715 685 self._all.extend(markers)
716 686 if self._cached(r'successors'):
717 687 _addsuccessors(self.successors, markers)
718 688 if self._cached(r'predecessors'):
719 689 _addpredecessors(self.predecessors, markers)
720 690 if self._cached(r'children'):
721 691 _addchildren(self.children, markers)
722 692 _checkinvalidmarkers(markers)
723 693
724 694 def relevantmarkers(self, nodes):
725 695 """return a set of all obsolescence markers relevant to a set of nodes.
726 696
727 697 "relevant" to a set of nodes mean:
728 698
729 699 - marker that use this changeset as successor
730 700 - prune marker of direct children on this changeset
731 701 - recursive application of the two rules on predecessors of these
732 702 markers
733 703
734 704 It is a set so you cannot rely on order."""
735 705
736 706 pendingnodes = set(nodes)
737 707 seenmarkers = set()
738 708 seennodes = set(pendingnodes)
739 709 precursorsmarkers = self.predecessors
740 710 succsmarkers = self.successors
741 711 children = self.children
742 712 while pendingnodes:
743 713 direct = set()
744 714 for current in pendingnodes:
745 715 direct.update(precursorsmarkers.get(current, ()))
746 716 pruned = [m for m in children.get(current, ()) if not m[1]]
747 717 direct.update(pruned)
748 718 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 719 direct.update(pruned)
750 720 direct -= seenmarkers
751 721 pendingnodes = set([m[0] for m in direct])
752 722 seenmarkers |= direct
753 723 pendingnodes -= seennodes
754 724 seennodes |= pendingnodes
755 725 return seenmarkers
756 726
757 727 def makestore(ui, repo):
758 728 """Create an obsstore instance from a repo."""
759 729 # read default format for new obsstore.
760 730 # developer config: format.obsstore-version
761 731 defaultformat = ui.configint('format', 'obsstore-version')
762 732 # rely on obsstore class default when possible.
763 733 kwargs = {}
764 734 if defaultformat is not None:
765 735 kwargs[r'defaultformat'] = defaultformat
766 736 readonly = not isenabled(repo, createmarkersopt)
767 737 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 738 if store and readonly:
769 739 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 740 % len(list(store)))
771 741 return store
772 742
773 743 def commonversion(versions):
774 744 """Return the newest version listed in both versions and our local formats.
775 745
776 746 Returns None if no common version exists.
777 747 """
778 748 versions.sort(reverse=True)
779 749 # search for highest version known on both side
780 750 for v in versions:
781 751 if v in formats:
782 752 return v
783 753 return None
784 754
785 755 # arbitrary picked to fit into 8K limit from HTTP server
786 756 # you have to take in account:
787 757 # - the version header
788 758 # - the base85 encoding
789 759 _maxpayload = 5300
790 760
791 761 def _pushkeyescape(markers):
792 762 """encode markers into a dict suitable for pushkey exchange
793 763
794 764 - binary data is base85 encoded
795 765 - split in chunks smaller than 5300 bytes"""
796 766 keys = {}
797 767 parts = []
798 768 currentlen = _maxpayload * 2 # ensure we create a new part
799 769 for marker in markers:
800 770 nextdata = _fm0encodeonemarker(marker)
801 771 if (len(nextdata) + currentlen > _maxpayload):
802 772 currentpart = []
803 773 currentlen = 0
804 774 parts.append(currentpart)
805 775 currentpart.append(nextdata)
806 776 currentlen += len(nextdata)
807 777 for idx, part in enumerate(reversed(parts)):
808 778 data = ''.join([_pack('>B', _fm0version)] + part)
809 779 keys['dump%i' % idx] = util.b85encode(data)
810 780 return keys
811 781
812 782 def listmarkers(repo):
813 783 """List markers over pushkey"""
814 784 if not repo.obsstore:
815 785 return {}
816 786 return _pushkeyescape(sorted(repo.obsstore))
817 787
818 788 def pushmarker(repo, key, old, new):
819 789 """Push markers over pushkey"""
820 790 if not key.startswith('dump'):
821 791 repo.ui.warn(_('unknown key: %r') % key)
822 792 return False
823 793 if old:
824 794 repo.ui.warn(_('unexpected old value for %r') % key)
825 795 return False
826 796 data = util.b85decode(new)
827 797 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
828 798 repo.obsstore.mergemarkers(tr, data)
829 799 repo.invalidatevolatilesets()
830 800 return True
831 801
832 802 # mapping of 'set-name' -> <function to compute this set>
833 803 cachefuncs = {}
834 804 def cachefor(name):
835 805 """Decorator to register a function as computing the cache for a set"""
836 806 def decorator(func):
837 807 if name in cachefuncs:
838 808 msg = "duplicated registration for volatileset '%s' (existing: %r)"
839 809 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
840 810 cachefuncs[name] = func
841 811 return func
842 812 return decorator
843 813
844 814 def getrevs(repo, name):
845 815 """Return the set of revision that belong to the <name> set
846 816
847 817 Such access may compute the set and cache it for future use"""
848 818 repo = repo.unfiltered()
849 819 if not repo.obsstore:
850 820 return frozenset()
851 821 if name not in repo.obsstore.caches:
852 822 repo.obsstore.caches[name] = cachefuncs[name](repo)
853 823 return repo.obsstore.caches[name]
854 824
855 825 # To be simple we need to invalidate obsolescence cache when:
856 826 #
857 827 # - new changeset is added:
858 828 # - public phase is changed
859 829 # - obsolescence marker are added
860 830 # - strip is used a repo
861 831 def clearobscaches(repo):
862 832 """Remove all obsolescence related cache from a repo
863 833
864 834 This remove all cache in obsstore is the obsstore already exist on the
865 835 repo.
866 836
867 837 (We could be smarter here given the exact event that trigger the cache
868 838 clearing)"""
869 839 # only clear cache is there is obsstore data in this repo
870 840 if 'obsstore' in repo._filecache:
871 841 repo.obsstore.caches.clear()
872 842
873 843 def _mutablerevs(repo):
874 844 """the set of mutable revision in the repository"""
875 845 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
876 846
877 847 @cachefor('obsolete')
878 848 def _computeobsoleteset(repo):
879 849 """the set of obsolete revisions"""
880 850 getnode = repo.changelog.node
881 851 notpublic = _mutablerevs(repo)
882 852 isobs = repo.obsstore.successors.__contains__
883 853 obs = set(r for r in notpublic if isobs(getnode(r)))
884 854 return obs
885 855
886 856 @cachefor('orphan')
887 857 def _computeorphanset(repo):
888 858 """the set of non obsolete revisions with obsolete parents"""
889 859 pfunc = repo.changelog.parentrevs
890 860 mutable = _mutablerevs(repo)
891 861 obsolete = getrevs(repo, 'obsolete')
892 862 others = mutable - obsolete
893 863 unstable = set()
894 864 for r in sorted(others):
895 865 # A rev is unstable if one of its parent is obsolete or unstable
896 866 # this works since we traverse following growing rev order
897 867 for p in pfunc(r):
898 868 if p in obsolete or p in unstable:
899 869 unstable.add(r)
900 870 break
901 871 return unstable
902 872
903 873 @cachefor('suspended')
904 874 def _computesuspendedset(repo):
905 875 """the set of obsolete parents with non obsolete descendants"""
906 876 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
907 877 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
908 878
909 879 @cachefor('extinct')
910 880 def _computeextinctset(repo):
911 881 """the set of obsolete parents without non obsolete descendants"""
912 882 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
913 883
914 884 @cachefor('phasedivergent')
915 885 def _computephasedivergentset(repo):
916 886 """the set of revs trying to obsolete public revisions"""
917 887 bumped = set()
918 888 # util function (avoid attribute lookup in the loop)
919 889 phase = repo._phasecache.phase # would be faster to grab the full list
920 890 public = phases.public
921 891 cl = repo.changelog
922 892 torev = cl.nodemap.get
923 893 tonode = cl.node
924 894 for rev in repo.revs('(not public()) and (not obsolete())'):
925 895 # We only evaluate mutable, non-obsolete revision
926 896 node = tonode(rev)
927 897 # (future) A cache of predecessors may worth if split is very common
928 898 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
929 899 ignoreflags=bumpedfix):
930 900 prev = torev(pnode) # unfiltered! but so is phasecache
931 901 if (prev is not None) and (phase(repo, prev) <= public):
932 902 # we have a public predecessor
933 903 bumped.add(rev)
934 904 break # Next draft!
935 905 return bumped
936 906
937 907 @cachefor('contentdivergent')
938 908 def _computecontentdivergentset(repo):
939 909 """the set of rev that compete to be the final successors of some revision.
940 910 """
941 911 divergent = set()
942 912 obsstore = repo.obsstore
943 913 newermap = {}
944 914 tonode = repo.changelog.node
945 915 for rev in repo.revs('(not public()) - obsolete()'):
946 916 node = tonode(rev)
947 917 mark = obsstore.predecessors.get(node, ())
948 918 toprocess = set(mark)
949 919 seen = set()
950 920 while toprocess:
951 921 prec = toprocess.pop()[0]
952 922 if prec in seen:
953 923 continue # emergency cycle hanging prevention
954 924 seen.add(prec)
955 925 if prec not in newermap:
956 926 obsutil.successorssets(repo, prec, cache=newermap)
957 927 newer = [n for n in newermap[prec] if n]
958 928 if len(newer) > 1:
959 929 divergent.add(rev)
960 930 break
961 931 toprocess.update(obsstore.predecessors.get(prec, ()))
962 932 return divergent
963 933
964 934
965 935 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
966 936 operation=None):
967 937 """Add obsolete markers between changesets in a repo
968 938
969 939 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
970 940 tuple. `old` and `news` are changectx. metadata is an optional dictionary
971 941 containing metadata for this marker only. It is merged with the global
972 942 metadata specified through the `metadata` argument of this function,
973 943
974 944 Trying to obsolete a public changeset will raise an exception.
975 945
976 946 Current user and date are used except if specified otherwise in the
977 947 metadata attribute.
978 948
979 949 This function operates within a transaction of its own, but does
980 950 not take any lock on the repo.
981 951 """
982 952 # prepare metadata
983 953 if metadata is None:
984 954 metadata = {}
985 955 if 'user' not in metadata:
986 956 develuser = repo.ui.config('devel', 'user.obsmarker')
987 957 if develuser:
988 958 metadata['user'] = develuser
989 959 else:
990 960 metadata['user'] = repo.ui.username()
991 961
992 962 # Operation metadata handling
993 963 useoperation = repo.ui.configbool('experimental',
994 964 'evolution.track-operation')
995 965 if useoperation and operation:
996 966 metadata['operation'] = operation
997 967
998 968 # Effect flag metadata handling
999 969 saveeffectflag = repo.ui.configbool('experimental',
1000 970 'evolution.effect-flags')
1001 971
1002 972 with repo.transaction('add-obsolescence-marker') as tr:
1003 973 markerargs = []
1004 974 for rel in relations:
1005 975 prec = rel[0]
1006 976 sucs = rel[1]
1007 977 localmetadata = metadata.copy()
1008 978 if 2 < len(rel):
1009 979 localmetadata.update(rel[2])
1010 980
1011 981 if not prec.mutable():
1012 982 raise error.Abort(_("cannot obsolete public changeset: %s")
1013 983 % prec,
1014 984 hint="see 'hg help phases' for details")
1015 985 nprec = prec.node()
1016 986 nsucs = tuple(s.node() for s in sucs)
1017 987 npare = None
1018 988 if not nsucs:
1019 989 npare = tuple(p.node() for p in prec.parents())
1020 990 if nprec in nsucs:
1021 991 raise error.Abort(_("changeset %s cannot obsolete itself")
1022 992 % prec)
1023 993
1024 994 # Effect flag can be different by relation
1025 995 if saveeffectflag:
1026 996 # The effect flag is saved in a versioned field name for future
1027 997 # evolution
1028 998 effectflag = obsutil.geteffectflag(rel)
1029 999 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1030 1000
1031 1001 # Creating the marker causes the hidden cache to become invalid,
1032 1002 # which causes recomputation when we ask for prec.parents() above.
1033 1003 # Resulting in n^2 behavior. So let's prepare all of the args
1034 1004 # first, then create the markers.
1035 1005 markerargs.append((nprec, nsucs, npare, localmetadata))
1036 1006
1037 1007 for args in markerargs:
1038 1008 nprec, nsucs, npare, localmetadata = args
1039 1009 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1040 1010 date=date, metadata=localmetadata,
1041 1011 ui=repo.ui)
1042 1012 repo.filteredrevcache.clear()
@@ -1,892 +1,925 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 node as nodemod,
15 15 phases,
16 16 util,
17 17 )
18 18 from .utils import dateutil
19 19
20 ### obsolescence marker flag
21
22 ## bumpedfix flag
23 #
24 # When a changeset A' succeed to a changeset A which became public, we call A'
25 # "bumped" because it's a successors of a public changesets
26 #
27 # o A' (bumped)
28 # |`:
29 # | o A
30 # |/
31 # o Z
32 #
33 # The way to solve this situation is to create a new changeset Ad as children
34 # of A. This changeset have the same content than A'. So the diff from A to A'
35 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
36 #
37 # o Ad
38 # |`:
39 # | x A'
40 # |'|
41 # o | A
42 # |/
43 # o Z
44 #
45 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
46 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
47 # This flag mean that the successors express the changes between the public and
48 # bumped version and fix the situation, breaking the transitivity of
49 # "bumped" here.
50 bumpedfix = 1
51 usingsha256 = 2
52
20 53 class marker(object):
21 54 """Wrap obsolete marker raw data"""
22 55
23 56 def __init__(self, repo, data):
24 57 # the repo argument will be used to create changectx in later version
25 58 self._repo = repo
26 59 self._data = data
27 60 self._decodedmeta = None
28 61
29 62 def __hash__(self):
30 63 return hash(self._data)
31 64
32 65 def __eq__(self, other):
33 66 if type(other) != type(self):
34 67 return False
35 68 return self._data == other._data
36 69
37 70 def prednode(self):
38 71 """Predecessor changeset node identifier"""
39 72 return self._data[0]
40 73
41 74 def succnodes(self):
42 75 """List of successor changesets node identifiers"""
43 76 return self._data[1]
44 77
45 78 def parentnodes(self):
46 79 """Parents of the predecessors (None if not recorded)"""
47 80 return self._data[5]
48 81
49 82 def metadata(self):
50 83 """Decoded metadata dictionary"""
51 84 return dict(self._data[3])
52 85
53 86 def date(self):
54 87 """Creation date as (unixtime, offset)"""
55 88 return self._data[4]
56 89
57 90 def flags(self):
58 91 """The flags field of the marker"""
59 92 return self._data[2]
60 93
61 94 def getmarkers(repo, nodes=None, exclusive=False):
62 95 """returns markers known in a repository
63 96
64 97 If <nodes> is specified, only markers "relevant" to those nodes are are
65 98 returned"""
66 99 if nodes is None:
67 100 rawmarkers = repo.obsstore
68 101 elif exclusive:
69 102 rawmarkers = exclusivemarkers(repo, nodes)
70 103 else:
71 104 rawmarkers = repo.obsstore.relevantmarkers(nodes)
72 105
73 106 for markerdata in rawmarkers:
74 107 yield marker(repo, markerdata)
75 108
76 109 def closestpredecessors(repo, nodeid):
77 110 """yield the list of next predecessors pointing on visible changectx nodes
78 111
79 112 This function respect the repoview filtering, filtered revision will be
80 113 considered missing.
81 114 """
82 115
83 116 precursors = repo.obsstore.predecessors
84 117 stack = [nodeid]
85 118 seen = set(stack)
86 119
87 120 while stack:
88 121 current = stack.pop()
89 122 currentpreccs = precursors.get(current, ())
90 123
91 124 for prec in currentpreccs:
92 125 precnodeid = prec[0]
93 126
94 127 # Basic cycle protection
95 128 if precnodeid in seen:
96 129 continue
97 130 seen.add(precnodeid)
98 131
99 132 if precnodeid in repo:
100 133 yield precnodeid
101 134 else:
102 135 stack.append(precnodeid)
103 136
104 137 def allpredecessors(obsstore, nodes, ignoreflags=0):
105 138 """Yield node for every precursors of <nodes>.
106 139
107 140 Some precursors may be unknown locally.
108 141
109 142 This is a linear yield unsuited to detecting folded changesets. It includes
110 143 initial nodes too."""
111 144
112 145 remaining = set(nodes)
113 146 seen = set(remaining)
114 147 while remaining:
115 148 current = remaining.pop()
116 149 yield current
117 150 for mark in obsstore.predecessors.get(current, ()):
118 151 # ignore marker flagged with specified flag
119 152 if mark[2] & ignoreflags:
120 153 continue
121 154 suc = mark[0]
122 155 if suc not in seen:
123 156 seen.add(suc)
124 157 remaining.add(suc)
125 158
126 159 def allsuccessors(obsstore, nodes, ignoreflags=0):
127 160 """Yield node for every successor of <nodes>.
128 161
129 162 Some successors may be unknown locally.
130 163
131 164 This is a linear yield unsuited to detecting split changesets. It includes
132 165 initial nodes too."""
133 166 remaining = set(nodes)
134 167 seen = set(remaining)
135 168 while remaining:
136 169 current = remaining.pop()
137 170 yield current
138 171 for mark in obsstore.successors.get(current, ()):
139 172 # ignore marker flagged with specified flag
140 173 if mark[2] & ignoreflags:
141 174 continue
142 175 for suc in mark[1]:
143 176 if suc not in seen:
144 177 seen.add(suc)
145 178 remaining.add(suc)
146 179
147 180 def _filterprunes(markers):
148 181 """return a set with no prune markers"""
149 182 return set(m for m in markers if m[1])
150 183
151 184 def exclusivemarkers(repo, nodes):
152 185 """set of markers relevant to "nodes" but no other locally-known nodes
153 186
154 187 This function compute the set of markers "exclusive" to a locally-known
155 188 node. This means we walk the markers starting from <nodes> until we reach a
156 189 locally-known precursors outside of <nodes>. Element of <nodes> with
157 190 locally-known successors outside of <nodes> are ignored (since their
158 191 precursors markers are also relevant to these successors).
159 192
160 193 For example:
161 194
162 195 # (A0 rewritten as A1)
163 196 #
164 197 # A0 <-1- A1 # Marker "1" is exclusive to A1
165 198
166 199 or
167 200
168 201 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
169 202 #
170 203 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
171 204
172 205 or
173 206
174 207 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
175 208 #
176 209 # <-2- A1 # Marker "2" is exclusive to A0,A1
177 210 # /
178 211 # <-1- A0
179 212 # \
180 213 # <-3- A2 # Marker "3" is exclusive to A0,A2
181 214 #
182 215 # in addition:
183 216 #
184 217 # Markers "2,3" are exclusive to A1,A2
185 218 # Markers "1,2,3" are exclusive to A0,A1,A2
186 219
187 220 See test/test-obsolete-bundle-strip.t for more examples.
188 221
189 222 An example usage is strip. When stripping a changeset, we also want to
190 223 strip the markers exclusive to this changeset. Otherwise we would have
191 224 "dangling"" obsolescence markers from its precursors: Obsolescence markers
192 225 marking a node as obsolete without any successors available locally.
193 226
194 227 As for relevant markers, the prune markers for children will be followed.
195 228 Of course, they will only be followed if the pruned children is
196 229 locally-known. Since the prune markers are relevant to the pruned node.
197 230 However, while prune markers are considered relevant to the parent of the
198 231 pruned changesets, prune markers for locally-known changeset (with no
199 232 successors) are considered exclusive to the pruned nodes. This allows
200 233 to strip the prune markers (with the rest of the exclusive chain) alongside
201 234 the pruned changesets.
202 235 """
203 236 # running on a filtered repository would be dangerous as markers could be
204 237 # reported as exclusive when they are relevant for other filtered nodes.
205 238 unfi = repo.unfiltered()
206 239
207 240 # shortcut to various useful item
208 241 nm = unfi.changelog.nodemap
209 242 precursorsmarkers = unfi.obsstore.predecessors
210 243 successormarkers = unfi.obsstore.successors
211 244 childrenmarkers = unfi.obsstore.children
212 245
213 246 # exclusive markers (return of the function)
214 247 exclmarkers = set()
215 248 # we need fast membership testing
216 249 nodes = set(nodes)
217 250 # looking for head in the obshistory
218 251 #
219 252 # XXX we are ignoring all issues in regard with cycle for now.
220 253 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
221 254 stack.sort()
222 255 # nodes already stacked
223 256 seennodes = set(stack)
224 257 while stack:
225 258 current = stack.pop()
226 259 # fetch precursors markers
227 260 markers = list(precursorsmarkers.get(current, ()))
228 261 # extend the list with prune markers
229 262 for mark in successormarkers.get(current, ()):
230 263 if not mark[1]:
231 264 markers.append(mark)
232 265 # and markers from children (looking for prune)
233 266 for mark in childrenmarkers.get(current, ()):
234 267 if not mark[1]:
235 268 markers.append(mark)
236 269 # traverse the markers
237 270 for mark in markers:
238 271 if mark in exclmarkers:
239 272 # markers already selected
240 273 continue
241 274
242 275 # If the markers is about the current node, select it
243 276 #
244 277 # (this delay the addition of markers from children)
245 278 if mark[1] or mark[0] == current:
246 279 exclmarkers.add(mark)
247 280
248 281 # should we keep traversing through the precursors?
249 282 prec = mark[0]
250 283
251 284 # nodes in the stack or already processed
252 285 if prec in seennodes:
253 286 continue
254 287
255 288 # is this a locally known node ?
256 289 known = prec in nm
257 290 # if locally-known and not in the <nodes> set the traversal
258 291 # stop here.
259 292 if known and prec not in nodes:
260 293 continue
261 294
262 295 # do not keep going if there are unselected markers pointing to this
263 296 # nodes. If we end up traversing these unselected markers later the
264 297 # node will be taken care of at that point.
265 298 precmarkers = _filterprunes(successormarkers.get(prec))
266 299 if precmarkers.issubset(exclmarkers):
267 300 seennodes.add(prec)
268 301 stack.append(prec)
269 302
270 303 return exclmarkers
271 304
272 305 def foreground(repo, nodes):
273 306 """return all nodes in the "foreground" of other node
274 307
275 308 The foreground of a revision is anything reachable using parent -> children
276 309 or precursor -> successor relation. It is very similar to "descendant" but
277 310 augmented with obsolescence information.
278 311
279 312 Beware that possible obsolescence cycle may result if complex situation.
280 313 """
281 314 repo = repo.unfiltered()
282 315 foreground = set(repo.set('%ln::', nodes))
283 316 if repo.obsstore:
284 317 # We only need this complicated logic if there is obsolescence
285 318 # XXX will probably deserve an optimised revset.
286 319 nm = repo.changelog.nodemap
287 320 plen = -1
288 321 # compute the whole set of successors or descendants
289 322 while len(foreground) != plen:
290 323 plen = len(foreground)
291 324 succs = set(c.node() for c in foreground)
292 325 mutable = [c.node() for c in foreground if c.mutable()]
293 326 succs.update(allsuccessors(repo.obsstore, mutable))
294 327 known = (n for n in succs if n in nm)
295 328 foreground = set(repo.set('%ln::', known))
296 329 return set(c.node() for c in foreground)
297 330
298 331 # effectflag field
299 332 #
300 333 # Effect-flag is a 1-byte bit field used to store what changed between a
301 334 # changeset and its successor(s).
302 335 #
303 336 # The effect flag is stored in obs-markers metadata while we iterate on the
304 337 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
305 338 # with an incompatible design for effect flag, we can store a new design under
306 339 # another field name so we don't break readers. We plan to extend the existing
307 340 # obsmarkers bit-field when the effect flag design will be stabilized.
308 341 #
309 342 # The effect-flag is placed behind an experimental flag
310 343 # `effect-flags` set to off by default.
311 344 #
312 345
313 346 EFFECTFLAGFIELD = "ef1"
314 347
315 348 DESCCHANGED = 1 << 0 # action changed the description
316 349 METACHANGED = 1 << 1 # action change the meta
317 350 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
318 351 PARENTCHANGED = 1 << 2 # action change the parent
319 352 USERCHANGED = 1 << 4 # the user changed
320 353 DATECHANGED = 1 << 5 # the date changed
321 354 BRANCHCHANGED = 1 << 6 # the branch changed
322 355
323 356 METABLACKLIST = [
324 357 re.compile('^branch$'),
325 358 re.compile('^.*-source$'),
326 359 re.compile('^.*_source$'),
327 360 re.compile('^source$'),
328 361 ]
329 362
330 363 def metanotblacklisted(metaitem):
331 364 """ Check that the key of a meta item (extrakey, extravalue) does not
332 365 match at least one of the blacklist pattern
333 366 """
334 367 metakey = metaitem[0]
335 368
336 369 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
337 370
338 371 def _prepare_hunk(hunk):
339 372 """Drop all information but the username and patch"""
340 373 cleanhunk = []
341 374 for line in hunk.splitlines():
342 375 if line.startswith(b'# User') or not line.startswith(b'#'):
343 376 if line.startswith(b'@@'):
344 377 line = b'@@\n'
345 378 cleanhunk.append(line)
346 379 return cleanhunk
347 380
348 381 def _getdifflines(iterdiff):
349 382 """return a cleaned up lines"""
350 383 lines = next(iterdiff, None)
351 384
352 385 if lines is None:
353 386 return lines
354 387
355 388 return _prepare_hunk(lines)
356 389
357 390 def _cmpdiff(leftctx, rightctx):
358 391 """return True if both ctx introduce the "same diff"
359 392
360 393 This is a first and basic implementation, with many shortcoming.
361 394 """
362 395
363 396 # Leftctx or right ctx might be filtered, so we need to use the contexts
364 397 # with an unfiltered repository to safely compute the diff
365 398 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
366 399 leftdiff = leftunfi.diff(git=1)
367 400 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
368 401 rightdiff = rightunfi.diff(git=1)
369 402
370 403 left, right = (0, 0)
371 404 while None not in (left, right):
372 405 left = _getdifflines(leftdiff)
373 406 right = _getdifflines(rightdiff)
374 407
375 408 if left != right:
376 409 return False
377 410 return True
378 411
379 412 def geteffectflag(relation):
380 413 """ From an obs-marker relation, compute what changed between the
381 414 predecessor and the successor.
382 415 """
383 416 effects = 0
384 417
385 418 source = relation[0]
386 419
387 420 for changectx in relation[1]:
388 421 # Check if description has changed
389 422 if changectx.description() != source.description():
390 423 effects |= DESCCHANGED
391 424
392 425 # Check if user has changed
393 426 if changectx.user() != source.user():
394 427 effects |= USERCHANGED
395 428
396 429 # Check if date has changed
397 430 if changectx.date() != source.date():
398 431 effects |= DATECHANGED
399 432
400 433 # Check if branch has changed
401 434 if changectx.branch() != source.branch():
402 435 effects |= BRANCHCHANGED
403 436
404 437 # Check if at least one of the parent has changed
405 438 if changectx.parents() != source.parents():
406 439 effects |= PARENTCHANGED
407 440
408 441 # Check if other meta has changed
409 442 changeextra = changectx.extra().items()
410 443 ctxmeta = list(filter(metanotblacklisted, changeextra))
411 444
412 445 sourceextra = source.extra().items()
413 446 srcmeta = list(filter(metanotblacklisted, sourceextra))
414 447
415 448 if ctxmeta != srcmeta:
416 449 effects |= METACHANGED
417 450
418 451 # Check if the diff has changed
419 452 if not _cmpdiff(source, changectx):
420 453 effects |= DIFFCHANGED
421 454
422 455 return effects
423 456
424 457 def getobsoleted(repo, tr):
425 458 """return the set of pre-existing revisions obsoleted by a transaction"""
426 459 torev = repo.unfiltered().changelog.nodemap.get
427 460 phase = repo._phasecache.phase
428 461 succsmarkers = repo.obsstore.successors.get
429 462 public = phases.public
430 463 addedmarkers = tr.changes.get('obsmarkers')
431 464 addedrevs = tr.changes.get('revs')
432 465 seenrevs = set()
433 466 obsoleted = set()
434 467 for mark in addedmarkers:
435 468 node = mark[0]
436 469 rev = torev(node)
437 470 if rev is None or rev in seenrevs or rev in addedrevs:
438 471 continue
439 472 seenrevs.add(rev)
440 473 if phase(repo, rev) == public:
441 474 continue
442 475 if set(succsmarkers(node) or []).issubset(addedmarkers):
443 476 obsoleted.add(rev)
444 477 return obsoleted
445 478
446 479 class _succs(list):
447 480 """small class to represent a successors with some metadata about it"""
448 481
449 482 def __init__(self, *args, **kwargs):
450 483 super(_succs, self).__init__(*args, **kwargs)
451 484 self.markers = set()
452 485
453 486 def copy(self):
454 487 new = _succs(self)
455 488 new.markers = self.markers.copy()
456 489 return new
457 490
458 491 @util.propertycache
459 492 def _set(self):
460 493 # immutable
461 494 return set(self)
462 495
463 496 def canmerge(self, other):
464 497 return self._set.issubset(other._set)
465 498
466 499 def successorssets(repo, initialnode, closest=False, cache=None):
467 500 """Return set of all latest successors of initial nodes
468 501
469 502 The successors set of a changeset A are the group of revisions that succeed
470 503 A. It succeeds A as a consistent whole, each revision being only a partial
471 504 replacement. By default, the successors set contains non-obsolete
472 505 changesets only, walking the obsolescence graph until reaching a leaf. If
473 506 'closest' is set to True, closest successors-sets are return (the
474 507 obsolescence walk stops on known changesets).
475 508
476 509 This function returns the full list of successor sets which is why it
477 510 returns a list of tuples and not just a single tuple. Each tuple is a valid
478 511 successors set. Note that (A,) may be a valid successors set for changeset A
479 512 (see below).
480 513
481 514 In most cases, a changeset A will have a single element (e.g. the changeset
482 515 A is replaced by A') in its successors set. Though, it is also common for a
483 516 changeset A to have no elements in its successor set (e.g. the changeset
484 517 has been pruned). Therefore, the returned list of successors sets will be
485 518 [(A',)] or [], respectively.
486 519
487 520 When a changeset A is split into A' and B', however, it will result in a
488 521 successors set containing more than a single element, i.e. [(A',B')].
489 522 Divergent changesets will result in multiple successors sets, i.e. [(A',),
490 523 (A'')].
491 524
492 525 If a changeset A is not obsolete, then it will conceptually have no
493 526 successors set. To distinguish this from a pruned changeset, the successor
494 527 set will contain itself only, i.e. [(A,)].
495 528
496 529 Finally, final successors unknown locally are considered to be pruned
497 530 (pruned: obsoleted without any successors). (Final: successors not affected
498 531 by markers).
499 532
500 533 The 'closest' mode respect the repoview filtering. For example, without
501 534 filter it will stop at the first locally known changeset, with 'visible'
502 535 filter it will stop on visible changesets).
503 536
504 537 The optional `cache` parameter is a dictionary that may contains
505 538 precomputed successors sets. It is meant to reuse the computation of a
506 539 previous call to `successorssets` when multiple calls are made at the same
507 540 time. The cache dictionary is updated in place. The caller is responsible
508 541 for its life span. Code that makes multiple calls to `successorssets`
509 542 *should* use this cache mechanism or risk a performance hit.
510 543
511 544 Since results are different depending of the 'closest' most, the same cache
512 545 cannot be reused for both mode.
513 546 """
514 547
515 548 succmarkers = repo.obsstore.successors
516 549
517 550 # Stack of nodes we search successors sets for
518 551 toproceed = [initialnode]
519 552 # set version of above list for fast loop detection
520 553 # element added to "toproceed" must be added here
521 554 stackedset = set(toproceed)
522 555 if cache is None:
523 556 cache = {}
524 557
525 558 # This while loop is the flattened version of a recursive search for
526 559 # successors sets
527 560 #
528 561 # def successorssets(x):
529 562 # successors = directsuccessors(x)
530 563 # ss = [[]]
531 564 # for succ in directsuccessors(x):
532 565 # # product as in itertools cartesian product
533 566 # ss = product(ss, successorssets(succ))
534 567 # return ss
535 568 #
536 569 # But we can not use plain recursive calls here:
537 570 # - that would blow the python call stack
538 571 # - obsolescence markers may have cycles, we need to handle them.
539 572 #
540 573 # The `toproceed` list act as our call stack. Every node we search
541 574 # successors set for are stacked there.
542 575 #
543 576 # The `stackedset` is set version of this stack used to check if a node is
544 577 # already stacked. This check is used to detect cycles and prevent infinite
545 578 # loop.
546 579 #
547 580 # successors set of all nodes are stored in the `cache` dictionary.
548 581 #
549 582 # After this while loop ends we use the cache to return the successors sets
550 583 # for the node requested by the caller.
551 584 while toproceed:
552 585 # Every iteration tries to compute the successors sets of the topmost
553 586 # node of the stack: CURRENT.
554 587 #
555 588 # There are four possible outcomes:
556 589 #
557 590 # 1) We already know the successors sets of CURRENT:
558 591 # -> mission accomplished, pop it from the stack.
559 592 # 2) Stop the walk:
560 593 # default case: Node is not obsolete
561 594 # closest case: Node is known at this repo filter level
562 595 # -> the node is its own successors sets. Add it to the cache.
563 596 # 3) We do not know successors set of direct successors of CURRENT:
564 597 # -> We add those successors to the stack.
565 598 # 4) We know successors sets of all direct successors of CURRENT:
566 599 # -> We can compute CURRENT successors set and add it to the
567 600 # cache.
568 601 #
569 602 current = toproceed[-1]
570 603
571 604 # case 2 condition is a bit hairy because of closest,
572 605 # we compute it on its own
573 606 case2condition = ((current not in succmarkers)
574 607 or (closest and current != initialnode
575 608 and current in repo))
576 609
577 610 if current in cache:
578 611 # case (1): We already know the successors sets
579 612 stackedset.remove(toproceed.pop())
580 613 elif case2condition:
581 614 # case (2): end of walk.
582 615 if current in repo:
583 616 # We have a valid successors.
584 617 cache[current] = [_succs((current,))]
585 618 else:
586 619 # Final obsolete version is unknown locally.
587 620 # Do not count that as a valid successors
588 621 cache[current] = []
589 622 else:
590 623 # cases (3) and (4)
591 624 #
592 625 # We proceed in two phases. Phase 1 aims to distinguish case (3)
593 626 # from case (4):
594 627 #
595 628 # For each direct successors of CURRENT, we check whether its
596 629 # successors sets are known. If they are not, we stack the
597 630 # unknown node and proceed to the next iteration of the while
598 631 # loop. (case 3)
599 632 #
600 633 # During this step, we may detect obsolescence cycles: a node
601 634 # with unknown successors sets but already in the call stack.
602 635 # In such a situation, we arbitrary set the successors sets of
603 636 # the node to nothing (node pruned) to break the cycle.
604 637 #
605 638 # If no break was encountered we proceed to phase 2.
606 639 #
607 640 # Phase 2 computes successors sets of CURRENT (case 4); see details
608 641 # in phase 2 itself.
609 642 #
610 643 # Note the two levels of iteration in each phase.
611 644 # - The first one handles obsolescence markers using CURRENT as
612 645 # precursor (successors markers of CURRENT).
613 646 #
614 647 # Having multiple entry here means divergence.
615 648 #
616 649 # - The second one handles successors defined in each marker.
617 650 #
618 651 # Having none means pruned node, multiple successors means split,
619 652 # single successors are standard replacement.
620 653 #
621 654 for mark in sorted(succmarkers[current]):
622 655 for suc in mark[1]:
623 656 if suc not in cache:
624 657 if suc in stackedset:
625 658 # cycle breaking
626 659 cache[suc] = []
627 660 else:
628 661 # case (3) If we have not computed successors sets
629 662 # of one of those successors we add it to the
630 663 # `toproceed` stack and stop all work for this
631 664 # iteration.
632 665 toproceed.append(suc)
633 666 stackedset.add(suc)
634 667 break
635 668 else:
636 669 continue
637 670 break
638 671 else:
639 672 # case (4): we know all successors sets of all direct
640 673 # successors
641 674 #
642 675 # Successors set contributed by each marker depends on the
643 676 # successors sets of all its "successors" node.
644 677 #
645 678 # Each different marker is a divergence in the obsolescence
646 679 # history. It contributes successors sets distinct from other
647 680 # markers.
648 681 #
649 682 # Within a marker, a successor may have divergent successors
650 683 # sets. In such a case, the marker will contribute multiple
651 684 # divergent successors sets. If multiple successors have
652 685 # divergent successors sets, a Cartesian product is used.
653 686 #
654 687 # At the end we post-process successors sets to remove
655 688 # duplicated entry and successors set that are strict subset of
656 689 # another one.
657 690 succssets = []
658 691 for mark in sorted(succmarkers[current]):
659 692 # successors sets contributed by this marker
660 693 base = _succs()
661 694 base.markers.add(mark)
662 695 markss = [base]
663 696 for suc in mark[1]:
664 697 # cardinal product with previous successors
665 698 productresult = []
666 699 for prefix in markss:
667 700 for suffix in cache[suc]:
668 701 newss = prefix.copy()
669 702 newss.markers.update(suffix.markers)
670 703 for part in suffix:
671 704 # do not duplicated entry in successors set
672 705 # first entry wins.
673 706 if part not in newss:
674 707 newss.append(part)
675 708 productresult.append(newss)
676 709 markss = productresult
677 710 succssets.extend(markss)
678 711 # remove duplicated and subset
679 712 seen = []
680 713 final = []
681 714 candidates = sorted((s for s in succssets if s),
682 715 key=len, reverse=True)
683 716 for cand in candidates:
684 717 for seensuccs in seen:
685 718 if cand.canmerge(seensuccs):
686 719 seensuccs.markers.update(cand.markers)
687 720 break
688 721 else:
689 722 final.append(cand)
690 723 seen.append(cand)
691 724 final.reverse() # put small successors set first
692 725 cache[current] = final
693 726 return cache[initialnode]
694 727
695 728 def successorsandmarkers(repo, ctx):
696 729 """compute the raw data needed for computing obsfate
697 730 Returns a list of dict, one dict per successors set
698 731 """
699 732 if not ctx.obsolete():
700 733 return None
701 734
702 735 ssets = successorssets(repo, ctx.node(), closest=True)
703 736
704 737 # closestsuccessors returns an empty list for pruned revisions, remap it
705 738 # into a list containing an empty list for future processing
706 739 if ssets == []:
707 740 ssets = [[]]
708 741
709 742 # Try to recover pruned markers
710 743 succsmap = repo.obsstore.successors
711 744 fullsuccessorsets = [] # successor set + markers
712 745 for sset in ssets:
713 746 if sset:
714 747 fullsuccessorsets.append(sset)
715 748 else:
716 749 # successorsset return an empty set() when ctx or one of its
717 750 # successors is pruned.
718 751 # In this case, walk the obs-markers tree again starting with ctx
719 752 # and find the relevant pruning obs-makers, the ones without
720 753 # successors.
721 754 # Having these markers allow us to compute some information about
722 755 # its fate, like who pruned this changeset and when.
723 756
724 757 # XXX we do not catch all prune markers (eg rewritten then pruned)
725 758 # (fix me later)
726 759 foundany = False
727 760 for mark in succsmap.get(ctx.node(), ()):
728 761 if not mark[1]:
729 762 foundany = True
730 763 sset = _succs()
731 764 sset.markers.add(mark)
732 765 fullsuccessorsets.append(sset)
733 766 if not foundany:
734 767 fullsuccessorsets.append(_succs())
735 768
736 769 values = []
737 770 for sset in fullsuccessorsets:
738 771 values.append({'successors': sset, 'markers': sset.markers})
739 772
740 773 return values
741 774
742 775 def _getobsfate(successorssets):
743 776 """ Compute a changeset obsolescence fate based on its successorssets.
744 777 Successors can be the tipmost ones or the immediate ones. This function
745 778 return values are not meant to be shown directly to users, it is meant to
746 779 be used by internal functions only.
747 780 Returns one fate from the following values:
748 781 - pruned
749 782 - diverged
750 783 - superseded
751 784 - superseded_split
752 785 """
753 786
754 787 if len(successorssets) == 0:
755 788 # The commit has been pruned
756 789 return 'pruned'
757 790 elif len(successorssets) > 1:
758 791 return 'diverged'
759 792 else:
760 793 # No divergence, only one set of successors
761 794 successors = successorssets[0]
762 795
763 796 if len(successors) == 1:
764 797 return 'superseded'
765 798 else:
766 799 return 'superseded_split'
767 800
768 801 def obsfateverb(successorset, markers):
769 802 """ Return the verb summarizing the successorset and potentially using
770 803 information from the markers
771 804 """
772 805 if not successorset:
773 806 verb = 'pruned'
774 807 elif len(successorset) == 1:
775 808 verb = 'rewritten'
776 809 else:
777 810 verb = 'split'
778 811 return verb
779 812
780 813 def markersdates(markers):
781 814 """returns the list of dates for a list of markers
782 815 """
783 816 return [m[4] for m in markers]
784 817
785 818 def markersusers(markers):
786 819 """ Returns a sorted list of markers users without duplicates
787 820 """
788 821 markersmeta = [dict(m[3]) for m in markers]
789 822 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
790 823
791 824 return sorted(users)
792 825
793 826 def markersoperations(markers):
794 827 """ Returns a sorted list of markers operations without duplicates
795 828 """
796 829 markersmeta = [dict(m[3]) for m in markers]
797 830 operations = set(meta.get('operation') for meta in markersmeta
798 831 if meta.get('operation'))
799 832
800 833 return sorted(operations)
801 834
802 835 def obsfateprinter(successors, markers, ui):
803 836 """ Build a obsfate string for a single successorset using all obsfate
804 837 related function defined in obsutil
805 838 """
806 839 quiet = ui.quiet
807 840 verbose = ui.verbose
808 841 normal = not verbose and not quiet
809 842
810 843 line = []
811 844
812 845 # Verb
813 846 line.append(obsfateverb(successors, markers))
814 847
815 848 # Operations
816 849 operations = markersoperations(markers)
817 850 if operations:
818 851 line.append(" using %s" % ", ".join(operations))
819 852
820 853 # Successors
821 854 if successors:
822 855 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
823 856 line.append(" as %s" % ", ".join(fmtsuccessors))
824 857
825 858 # Users
826 859 users = markersusers(markers)
827 860 # Filter out current user in not verbose mode to reduce amount of
828 861 # information
829 862 if not verbose:
830 863 currentuser = ui.username(acceptempty=True)
831 864 if len(users) == 1 and currentuser in users:
832 865 users = None
833 866
834 867 if (verbose or normal) and users:
835 868 line.append(" by %s" % ", ".join(users))
836 869
837 870 # Date
838 871 dates = markersdates(markers)
839 872
840 873 if dates and verbose:
841 874 min_date = min(dates)
842 875 max_date = max(dates)
843 876
844 877 if min_date == max_date:
845 878 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
846 879 line.append(" (at %s)" % fmtmin_date)
847 880 else:
848 881 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
849 882 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
850 883 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
851 884
852 885 return "".join(line)
853 886
854 887
855 888 filteredmsgtable = {
856 889 "pruned": _("hidden revision '%s' is pruned"),
857 890 "diverged": _("hidden revision '%s' has diverged"),
858 891 "superseded": _("hidden revision '%s' was rewritten as: %s"),
859 892 "superseded_split": _("hidden revision '%s' was split as: %s"),
860 893 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
861 894 "%d more"),
862 895 }
863 896
864 897 def _getfilteredreason(repo, changeid, ctx):
865 898 """return a human-friendly string on why a obsolete changeset is hidden
866 899 """
867 900 successors = successorssets(repo, ctx.node())
868 901 fate = _getobsfate(successors)
869 902
870 903 # Be more precise in case the revision is superseded
871 904 if fate == 'pruned':
872 905 return filteredmsgtable['pruned'] % changeid
873 906 elif fate == 'diverged':
874 907 return filteredmsgtable['diverged'] % changeid
875 908 elif fate == 'superseded':
876 909 single_successor = nodemod.short(successors[0][0])
877 910 return filteredmsgtable['superseded'] % (changeid, single_successor)
878 911 elif fate == 'superseded_split':
879 912
880 913 succs = []
881 914 for node_id in successors[0]:
882 915 succs.append(nodemod.short(node_id))
883 916
884 917 if len(succs) <= 2:
885 918 fmtsuccs = ', '.join(succs)
886 919 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
887 920 else:
888 921 firstsuccessors = ', '.join(succs[:2])
889 922 remainingnumber = len(succs) - 2
890 923
891 924 args = (changeid, firstsuccessors, remainingnumber)
892 925 return filteredmsgtable['superseded_split_several'] % args
General Comments 0
You need to be logged in to leave comments. Login now