##// END OF EJS Templates
obsolete: drop usage of changectx in '_computephasedivergentset'...
Boris Feld -
r35134:82680919 default
parent child Browse files
Show More
@@ -1,1126 +1,1126 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def _getoptionvalue(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 configkey = 'evolution.%s' % option
106 106 newconfig = repo.ui.configbool('experimental', configkey)
107 107
108 108 # Return the value only if defined
109 109 if newconfig is not None:
110 110 return newconfig
111 111
112 112 # Fallback on generic option
113 113 try:
114 114 return repo.ui.configbool('experimental', 'evolution')
115 115 except (error.ConfigError, AttributeError):
116 116 # Fallback on old-fashion config
117 117 # inconsistent config: experimental.evolution
118 118 result = set(repo.ui.configlist('experimental', 'evolution'))
119 119
120 120 if 'all' in result:
121 121 return True
122 122
123 123 # For migration purposes, temporarily return true if the config hasn't
124 124 # been set but _enabled is true.
125 125 if len(result) == 0 and _enabled:
126 126 return True
127 127
128 128 # Temporary hack for next check
129 129 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
130 130 if newconfig:
131 131 result.add('createmarkers')
132 132
133 133 return option in result
134 134
135 135 def isenabled(repo, option):
136 136 """Returns True if the given repository has the given obsolete option
137 137 enabled.
138 138 """
139 139 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
140 140 unstabluevalue = _getoptionvalue(repo, allowunstableopt)
141 141 exchangevalue = _getoptionvalue(repo, exchangeopt)
142 142
143 143 # createmarkers must be enabled if other options are enabled
144 144 if ((unstabluevalue or exchangevalue) and not createmarkersvalue):
145 145 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
146 146 "if other obsolete options are enabled"))
147 147
148 148 return _getoptionvalue(repo, option)
149 149
150 150 ### obsolescence marker flag
151 151
152 152 ## bumpedfix flag
153 153 #
154 154 # When a changeset A' succeed to a changeset A which became public, we call A'
155 155 # "bumped" because it's a successors of a public changesets
156 156 #
157 157 # o A' (bumped)
158 158 # |`:
159 159 # | o A
160 160 # |/
161 161 # o Z
162 162 #
163 163 # The way to solve this situation is to create a new changeset Ad as children
164 164 # of A. This changeset have the same content than A'. So the diff from A to A'
165 165 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
166 166 #
167 167 # o Ad
168 168 # |`:
169 169 # | x A'
170 170 # |'|
171 171 # o | A
172 172 # |/
173 173 # o Z
174 174 #
175 175 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
176 176 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
177 177 # This flag mean that the successors express the changes between the public and
178 178 # bumped version and fix the situation, breaking the transitivity of
179 179 # "bumped" here.
180 180 bumpedfix = 1
181 181 usingsha256 = 2
182 182
183 183 ## Parsing and writing of version "0"
184 184 #
185 185 # The header is followed by the markers. Each marker is made of:
186 186 #
187 187 # - 1 uint8 : number of new changesets "N", can be zero.
188 188 #
189 189 # - 1 uint32: metadata size "M" in bytes.
190 190 #
191 191 # - 1 byte: a bit field. It is reserved for flags used in common
192 192 # obsolete marker operations, to avoid repeated decoding of metadata
193 193 # entries.
194 194 #
195 195 # - 20 bytes: obsoleted changeset identifier.
196 196 #
197 197 # - N*20 bytes: new changesets identifiers.
198 198 #
199 199 # - M bytes: metadata as a sequence of nul-terminated strings. Each
200 200 # string contains a key and a value, separated by a colon ':', without
201 201 # additional encoding. Keys cannot contain '\0' or ':' and values
202 202 # cannot contain '\0'.
203 203 _fm0version = 0
204 204 _fm0fixed = '>BIB20s'
205 205 _fm0node = '20s'
206 206 _fm0fsize = _calcsize(_fm0fixed)
207 207 _fm0fnodesize = _calcsize(_fm0node)
208 208
209 209 def _fm0readmarkers(data, off, stop):
210 210 # Loop on markers
211 211 while off < stop:
212 212 # read fixed part
213 213 cur = data[off:off + _fm0fsize]
214 214 off += _fm0fsize
215 215 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
216 216 # read replacement
217 217 sucs = ()
218 218 if numsuc:
219 219 s = (_fm0fnodesize * numsuc)
220 220 cur = data[off:off + s]
221 221 sucs = _unpack(_fm0node * numsuc, cur)
222 222 off += s
223 223 # read metadata
224 224 # (metadata will be decoded on demand)
225 225 metadata = data[off:off + mdsize]
226 226 if len(metadata) != mdsize:
227 227 raise error.Abort(_('parsing obsolete marker: metadata is too '
228 228 'short, %d bytes expected, got %d')
229 229 % (mdsize, len(metadata)))
230 230 off += mdsize
231 231 metadata = _fm0decodemeta(metadata)
232 232 try:
233 233 when, offset = metadata.pop('date', '0 0').split(' ')
234 234 date = float(when), int(offset)
235 235 except ValueError:
236 236 date = (0., 0)
237 237 parents = None
238 238 if 'p2' in metadata:
239 239 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
240 240 elif 'p1' in metadata:
241 241 parents = (metadata.pop('p1', None),)
242 242 elif 'p0' in metadata:
243 243 parents = ()
244 244 if parents is not None:
245 245 try:
246 246 parents = tuple(node.bin(p) for p in parents)
247 247 # if parent content is not a nodeid, drop the data
248 248 for p in parents:
249 249 if len(p) != 20:
250 250 parents = None
251 251 break
252 252 except TypeError:
253 253 # if content cannot be translated to nodeid drop the data.
254 254 parents = None
255 255
256 256 metadata = tuple(sorted(metadata.iteritems()))
257 257
258 258 yield (pre, sucs, flags, metadata, date, parents)
259 259
260 260 def _fm0encodeonemarker(marker):
261 261 pre, sucs, flags, metadata, date, parents = marker
262 262 if flags & usingsha256:
263 263 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
264 264 metadata = dict(metadata)
265 265 time, tz = date
266 266 metadata['date'] = '%r %i' % (time, tz)
267 267 if parents is not None:
268 268 if not parents:
269 269 # mark that we explicitly recorded no parents
270 270 metadata['p0'] = ''
271 271 for i, p in enumerate(parents, 1):
272 272 metadata['p%i' % i] = node.hex(p)
273 273 metadata = _fm0encodemeta(metadata)
274 274 numsuc = len(sucs)
275 275 format = _fm0fixed + (_fm0node * numsuc)
276 276 data = [numsuc, len(metadata), flags, pre]
277 277 data.extend(sucs)
278 278 return _pack(format, *data) + metadata
279 279
280 280 def _fm0encodemeta(meta):
281 281 """Return encoded metadata string to string mapping.
282 282
283 283 Assume no ':' in key and no '\0' in both key and value."""
284 284 for key, value in meta.iteritems():
285 285 if ':' in key or '\0' in key:
286 286 raise ValueError("':' and '\0' are forbidden in metadata key'")
287 287 if '\0' in value:
288 288 raise ValueError("':' is forbidden in metadata value'")
289 289 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
290 290
291 291 def _fm0decodemeta(data):
292 292 """Return string to string dictionary from encoded version."""
293 293 d = {}
294 294 for l in data.split('\0'):
295 295 if l:
296 296 key, value = l.split(':')
297 297 d[key] = value
298 298 return d
299 299
300 300 ## Parsing and writing of version "1"
301 301 #
302 302 # The header is followed by the markers. Each marker is made of:
303 303 #
304 304 # - uint32: total size of the marker (including this field)
305 305 #
306 306 # - float64: date in seconds since epoch
307 307 #
308 308 # - int16: timezone offset in minutes
309 309 #
310 310 # - uint16: a bit field. It is reserved for flags used in common
311 311 # obsolete marker operations, to avoid repeated decoding of metadata
312 312 # entries.
313 313 #
314 314 # - uint8: number of successors "N", can be zero.
315 315 #
316 316 # - uint8: number of parents "P", can be zero.
317 317 #
318 318 # 0: parents data stored but no parent,
319 319 # 1: one parent stored,
320 320 # 2: two parents stored,
321 321 # 3: no parent data stored
322 322 #
323 323 # - uint8: number of metadata entries M
324 324 #
325 325 # - 20 or 32 bytes: predecessor changeset identifier.
326 326 #
327 327 # - N*(20 or 32) bytes: successors changesets identifiers.
328 328 #
329 329 # - P*(20 or 32) bytes: parents of the predecessors changesets.
330 330 #
331 331 # - M*(uint8, uint8): size of all metadata entries (key and value)
332 332 #
333 333 # - remaining bytes: the metadata, each (key, value) pair after the other.
334 334 _fm1version = 1
335 335 _fm1fixed = '>IdhHBBB20s'
336 336 _fm1nodesha1 = '20s'
337 337 _fm1nodesha256 = '32s'
338 338 _fm1nodesha1size = _calcsize(_fm1nodesha1)
339 339 _fm1nodesha256size = _calcsize(_fm1nodesha256)
340 340 _fm1fsize = _calcsize(_fm1fixed)
341 341 _fm1parentnone = 3
342 342 _fm1parentshift = 14
343 343 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
344 344 _fm1metapair = 'BB'
345 345 _fm1metapairsize = _calcsize(_fm1metapair)
346 346
347 347 def _fm1purereadmarkers(data, off, stop):
348 348 # make some global constants local for performance
349 349 noneflag = _fm1parentnone
350 350 sha2flag = usingsha256
351 351 sha1size = _fm1nodesha1size
352 352 sha2size = _fm1nodesha256size
353 353 sha1fmt = _fm1nodesha1
354 354 sha2fmt = _fm1nodesha256
355 355 metasize = _fm1metapairsize
356 356 metafmt = _fm1metapair
357 357 fsize = _fm1fsize
358 358 unpack = _unpack
359 359
360 360 # Loop on markers
361 361 ufixed = struct.Struct(_fm1fixed).unpack
362 362
363 363 while off < stop:
364 364 # read fixed part
365 365 o1 = off + fsize
366 366 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
367 367
368 368 if flags & sha2flag:
369 369 # FIXME: prec was read as a SHA1, needs to be amended
370 370
371 371 # read 0 or more successors
372 372 if numsuc == 1:
373 373 o2 = o1 + sha2size
374 374 sucs = (data[o1:o2],)
375 375 else:
376 376 o2 = o1 + sha2size * numsuc
377 377 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
378 378
379 379 # read parents
380 380 if numpar == noneflag:
381 381 o3 = o2
382 382 parents = None
383 383 elif numpar == 1:
384 384 o3 = o2 + sha2size
385 385 parents = (data[o2:o3],)
386 386 else:
387 387 o3 = o2 + sha2size * numpar
388 388 parents = unpack(sha2fmt * numpar, data[o2:o3])
389 389 else:
390 390 # read 0 or more successors
391 391 if numsuc == 1:
392 392 o2 = o1 + sha1size
393 393 sucs = (data[o1:o2],)
394 394 else:
395 395 o2 = o1 + sha1size * numsuc
396 396 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
397 397
398 398 # read parents
399 399 if numpar == noneflag:
400 400 o3 = o2
401 401 parents = None
402 402 elif numpar == 1:
403 403 o3 = o2 + sha1size
404 404 parents = (data[o2:o3],)
405 405 else:
406 406 o3 = o2 + sha1size * numpar
407 407 parents = unpack(sha1fmt * numpar, data[o2:o3])
408 408
409 409 # read metadata
410 410 off = o3 + metasize * nummeta
411 411 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
412 412 metadata = []
413 413 for idx in xrange(0, len(metapairsize), 2):
414 414 o1 = off + metapairsize[idx]
415 415 o2 = o1 + metapairsize[idx + 1]
416 416 metadata.append((data[off:o1], data[o1:o2]))
417 417 off = o2
418 418
419 419 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
420 420
421 421 def _fm1encodeonemarker(marker):
422 422 pre, sucs, flags, metadata, date, parents = marker
423 423 # determine node size
424 424 _fm1node = _fm1nodesha1
425 425 if flags & usingsha256:
426 426 _fm1node = _fm1nodesha256
427 427 numsuc = len(sucs)
428 428 numextranodes = numsuc
429 429 if parents is None:
430 430 numpar = _fm1parentnone
431 431 else:
432 432 numpar = len(parents)
433 433 numextranodes += numpar
434 434 formatnodes = _fm1node * numextranodes
435 435 formatmeta = _fm1metapair * len(metadata)
436 436 format = _fm1fixed + formatnodes + formatmeta
437 437 # tz is stored in minutes so we divide by 60
438 438 tz = date[1]//60
439 439 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
440 440 data.extend(sucs)
441 441 if parents is not None:
442 442 data.extend(parents)
443 443 totalsize = _calcsize(format)
444 444 for key, value in metadata:
445 445 lk = len(key)
446 446 lv = len(value)
447 447 if lk > 255:
448 448 msg = ('obsstore metadata key cannot be longer than 255 bytes'
449 449 ' (key "%s" is %u bytes)') % (key, lk)
450 450 raise error.ProgrammingError(msg)
451 451 if lv > 255:
452 452 msg = ('obsstore metadata value cannot be longer than 255 bytes'
453 453 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
454 454 raise error.ProgrammingError(msg)
455 455 data.append(lk)
456 456 data.append(lv)
457 457 totalsize += lk + lv
458 458 data[0] = totalsize
459 459 data = [_pack(format, *data)]
460 460 for key, value in metadata:
461 461 data.append(key)
462 462 data.append(value)
463 463 return ''.join(data)
464 464
465 465 def _fm1readmarkers(data, off, stop):
466 466 native = getattr(parsers, 'fm1readmarkers', None)
467 467 if not native:
468 468 return _fm1purereadmarkers(data, off, stop)
469 469 return native(data, off, stop)
470 470
471 471 # mapping to read/write various marker formats
472 472 # <version> -> (decoder, encoder)
473 473 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
474 474 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
475 475
476 476 def _readmarkerversion(data):
477 477 return _unpack('>B', data[0:1])[0]
478 478
479 479 @util.nogc
480 480 def _readmarkers(data, off=None, stop=None):
481 481 """Read and enumerate markers from raw data"""
482 482 diskversion = _readmarkerversion(data)
483 483 if not off:
484 484 off = 1 # skip 1 byte version number
485 485 if stop is None:
486 486 stop = len(data)
487 487 if diskversion not in formats:
488 488 msg = _('parsing obsolete marker: unknown version %r') % diskversion
489 489 raise error.UnknownVersion(msg, version=diskversion)
490 490 return diskversion, formats[diskversion][0](data, off, stop)
491 491
492 492 def encodeheader(version=_fm0version):
493 493 return _pack('>B', version)
494 494
495 495 def encodemarkers(markers, addheader=False, version=_fm0version):
496 496 # Kept separate from flushmarkers(), it will be reused for
497 497 # markers exchange.
498 498 encodeone = formats[version][1]
499 499 if addheader:
500 500 yield encodeheader(version)
501 501 for marker in markers:
502 502 yield encodeone(marker)
503 503
504 504 @util.nogc
505 505 def _addsuccessors(successors, markers):
506 506 for mark in markers:
507 507 successors.setdefault(mark[0], set()).add(mark)
508 508
509 509 def _addprecursors(*args, **kwargs):
510 510 msg = ("'obsolete._addprecursors' is deprecated, "
511 511 "use 'obsolete._addpredecessors'")
512 512 util.nouideprecwarn(msg, '4.4')
513 513
514 514 return _addpredecessors(*args, **kwargs)
515 515
516 516 @util.nogc
517 517 def _addpredecessors(predecessors, markers):
518 518 for mark in markers:
519 519 for suc in mark[1]:
520 520 predecessors.setdefault(suc, set()).add(mark)
521 521
522 522 @util.nogc
523 523 def _addchildren(children, markers):
524 524 for mark in markers:
525 525 parents = mark[5]
526 526 if parents is not None:
527 527 for p in parents:
528 528 children.setdefault(p, set()).add(mark)
529 529
530 530 def _checkinvalidmarkers(markers):
531 531 """search for marker with invalid data and raise error if needed
532 532
533 533 Exist as a separated function to allow the evolve extension for a more
534 534 subtle handling.
535 535 """
536 536 for mark in markers:
537 537 if node.nullid in mark[1]:
538 538 raise error.Abort(_('bad obsolescence marker detected: '
539 539 'invalid successors nullid'))
540 540
541 541 class obsstore(object):
542 542 """Store obsolete markers
543 543
544 544 Markers can be accessed with two mappings:
545 545 - predecessors[x] -> set(markers on predecessors edges of x)
546 546 - successors[x] -> set(markers on successors edges of x)
547 547 - children[x] -> set(markers on predecessors edges of children(x)
548 548 """
549 549
550 550 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
551 551 # prec: nodeid, predecessors changesets
552 552 # succs: tuple of nodeid, successor changesets (0-N length)
553 553 # flag: integer, flag field carrying modifier for the markers (see doc)
554 554 # meta: binary blob, encoded metadata dictionary
555 555 # date: (float, int) tuple, date of marker creation
556 556 # parents: (tuple of nodeid) or None, parents of predecessors
557 557 # None is used when no data has been recorded
558 558
559 559 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
560 560 # caches for various obsolescence related cache
561 561 self.caches = {}
562 562 self.svfs = svfs
563 563 self._defaultformat = defaultformat
564 564 self._readonly = readonly
565 565
566 566 def __iter__(self):
567 567 return iter(self._all)
568 568
569 569 def __len__(self):
570 570 return len(self._all)
571 571
572 572 def __nonzero__(self):
573 573 if not self._cached('_all'):
574 574 try:
575 575 return self.svfs.stat('obsstore').st_size > 1
576 576 except OSError as inst:
577 577 if inst.errno != errno.ENOENT:
578 578 raise
579 579 # just build an empty _all list if no obsstore exists, which
580 580 # avoids further stat() syscalls
581 581 return bool(self._all)
582 582
583 583 __bool__ = __nonzero__
584 584
585 585 @property
586 586 def readonly(self):
587 587 """True if marker creation is disabled
588 588
589 589 Remove me in the future when obsolete marker is always on."""
590 590 return self._readonly
591 591
592 592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 593 date=None, metadata=None, ui=None):
594 594 """obsolete: add a new obsolete marker
595 595
596 596 * ensuring it is hashable
597 597 * check mandatory metadata
598 598 * encode metadata
599 599
600 600 If you are a human writing code creating marker you want to use the
601 601 `createmarkers` function in this module instead.
602 602
603 603 return True if a new marker have been added, False if the markers
604 604 already existed (no op).
605 605 """
606 606 if metadata is None:
607 607 metadata = {}
608 608 if date is None:
609 609 if 'date' in metadata:
610 610 # as a courtesy for out-of-tree extensions
611 611 date = util.parsedate(metadata.pop('date'))
612 612 elif ui is not None:
613 613 date = ui.configdate('devel', 'default-date')
614 614 if date is None:
615 615 date = util.makedate()
616 616 else:
617 617 date = util.makedate()
618 618 if len(prec) != 20:
619 619 raise ValueError(prec)
620 620 for succ in succs:
621 621 if len(succ) != 20:
622 622 raise ValueError(succ)
623 623 if prec in succs:
624 624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625 625
626 626 metadata = tuple(sorted(metadata.iteritems()))
627 627
628 628 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
629 629 return bool(self.add(transaction, [marker]))
630 630
631 631 def add(self, transaction, markers):
632 632 """Add new markers to the store
633 633
634 634 Take care of filtering duplicate.
635 635 Return the number of new marker."""
636 636 if self._readonly:
637 637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 638 'this repo'))
639 639 known = set()
640 640 getsuccessors = self.successors.get
641 641 new = []
642 642 for m in markers:
643 643 if m not in getsuccessors(m[0], ()) and m not in known:
644 644 known.add(m)
645 645 new.append(m)
646 646 if new:
647 647 f = self.svfs('obsstore', 'ab')
648 648 try:
649 649 offset = f.tell()
650 650 transaction.add('obsstore', offset)
651 651 # offset == 0: new file - add the version header
652 652 data = b''.join(encodemarkers(new, offset == 0, self._version))
653 653 f.write(data)
654 654 finally:
655 655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 656 # call 'filecacheentry.refresh()' here
657 657 f.close()
658 658 addedmarkers = transaction.changes.get('obsmarkers')
659 659 if addedmarkers is not None:
660 660 addedmarkers.update(new)
661 661 self._addmarkers(new, data)
662 662 # new marker *may* have changed several set. invalidate the cache.
663 663 self.caches.clear()
664 664 # records the number of new markers for the transaction hooks
665 665 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
666 666 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
667 667 return len(new)
668 668
669 669 def mergemarkers(self, transaction, data):
670 670 """merge a binary stream of markers inside the obsstore
671 671
672 672 Returns the number of new markers added."""
673 673 version, markers = _readmarkers(data)
674 674 return self.add(transaction, markers)
675 675
676 676 @propertycache
677 677 def _data(self):
678 678 return self.svfs.tryread('obsstore')
679 679
680 680 @propertycache
681 681 def _version(self):
682 682 if len(self._data) >= 1:
683 683 return _readmarkerversion(self._data)
684 684 else:
685 685 return self._defaultformat
686 686
687 687 @propertycache
688 688 def _all(self):
689 689 data = self._data
690 690 if not data:
691 691 return []
692 692 self._version, markers = _readmarkers(data)
693 693 markers = list(markers)
694 694 _checkinvalidmarkers(markers)
695 695 return markers
696 696
697 697 @propertycache
698 698 def successors(self):
699 699 successors = {}
700 700 _addsuccessors(successors, self._all)
701 701 return successors
702 702
703 703 @property
704 704 def precursors(self):
705 705 msg = ("'obsstore.precursors' is deprecated, "
706 706 "use 'obsstore.predecessors'")
707 707 util.nouideprecwarn(msg, '4.4')
708 708
709 709 return self.predecessors
710 710
711 711 @propertycache
712 712 def predecessors(self):
713 713 predecessors = {}
714 714 _addpredecessors(predecessors, self._all)
715 715 return predecessors
716 716
717 717 @propertycache
718 718 def children(self):
719 719 children = {}
720 720 _addchildren(children, self._all)
721 721 return children
722 722
723 723 def _cached(self, attr):
724 724 return attr in self.__dict__
725 725
726 726 def _addmarkers(self, markers, rawdata):
727 727 markers = list(markers) # to allow repeated iteration
728 728 self._data = self._data + rawdata
729 729 self._all.extend(markers)
730 730 if self._cached('successors'):
731 731 _addsuccessors(self.successors, markers)
732 732 if self._cached('predecessors'):
733 733 _addpredecessors(self.predecessors, markers)
734 734 if self._cached('children'):
735 735 _addchildren(self.children, markers)
736 736 _checkinvalidmarkers(markers)
737 737
738 738 def relevantmarkers(self, nodes):
739 739 """return a set of all obsolescence markers relevant to a set of nodes.
740 740
741 741 "relevant" to a set of nodes mean:
742 742
743 743 - marker that use this changeset as successor
744 744 - prune marker of direct children on this changeset
745 745 - recursive application of the two rules on predecessors of these
746 746 markers
747 747
748 748 It is a set so you cannot rely on order."""
749 749
750 750 pendingnodes = set(nodes)
751 751 seenmarkers = set()
752 752 seennodes = set(pendingnodes)
753 753 precursorsmarkers = self.predecessors
754 754 succsmarkers = self.successors
755 755 children = self.children
756 756 while pendingnodes:
757 757 direct = set()
758 758 for current in pendingnodes:
759 759 direct.update(precursorsmarkers.get(current, ()))
760 760 pruned = [m for m in children.get(current, ()) if not m[1]]
761 761 direct.update(pruned)
762 762 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
763 763 direct.update(pruned)
764 764 direct -= seenmarkers
765 765 pendingnodes = set([m[0] for m in direct])
766 766 seenmarkers |= direct
767 767 pendingnodes -= seennodes
768 768 seennodes |= pendingnodes
769 769 return seenmarkers
770 770
771 771 def makestore(ui, repo):
772 772 """Create an obsstore instance from a repo."""
773 773 # read default format for new obsstore.
774 774 # developer config: format.obsstore-version
775 775 defaultformat = ui.configint('format', 'obsstore-version')
776 776 # rely on obsstore class default when possible.
777 777 kwargs = {}
778 778 if defaultformat is not None:
779 779 kwargs['defaultformat'] = defaultformat
780 780 readonly = not isenabled(repo, createmarkersopt)
781 781 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
782 782 if store and readonly:
783 783 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
784 784 % len(list(store)))
785 785 return store
786 786
787 787 def commonversion(versions):
788 788 """Return the newest version listed in both versions and our local formats.
789 789
790 790 Returns None if no common version exists.
791 791 """
792 792 versions.sort(reverse=True)
793 793 # search for highest version known on both side
794 794 for v in versions:
795 795 if v in formats:
796 796 return v
797 797 return None
798 798
799 799 # arbitrary picked to fit into 8K limit from HTTP server
800 800 # you have to take in account:
801 801 # - the version header
802 802 # - the base85 encoding
803 803 _maxpayload = 5300
804 804
805 805 def _pushkeyescape(markers):
806 806 """encode markers into a dict suitable for pushkey exchange
807 807
808 808 - binary data is base85 encoded
809 809 - split in chunks smaller than 5300 bytes"""
810 810 keys = {}
811 811 parts = []
812 812 currentlen = _maxpayload * 2 # ensure we create a new part
813 813 for marker in markers:
814 814 nextdata = _fm0encodeonemarker(marker)
815 815 if (len(nextdata) + currentlen > _maxpayload):
816 816 currentpart = []
817 817 currentlen = 0
818 818 parts.append(currentpart)
819 819 currentpart.append(nextdata)
820 820 currentlen += len(nextdata)
821 821 for idx, part in enumerate(reversed(parts)):
822 822 data = ''.join([_pack('>B', _fm0version)] + part)
823 823 keys['dump%i' % idx] = util.b85encode(data)
824 824 return keys
825 825
826 826 def listmarkers(repo):
827 827 """List markers over pushkey"""
828 828 if not repo.obsstore:
829 829 return {}
830 830 return _pushkeyescape(sorted(repo.obsstore))
831 831
832 832 def pushmarker(repo, key, old, new):
833 833 """Push markers over pushkey"""
834 834 if not key.startswith('dump'):
835 835 repo.ui.warn(_('unknown key: %r') % key)
836 836 return False
837 837 if old:
838 838 repo.ui.warn(_('unexpected old value for %r') % key)
839 839 return False
840 840 data = util.b85decode(new)
841 841 lock = repo.lock()
842 842 try:
843 843 tr = repo.transaction('pushkey: obsolete markers')
844 844 try:
845 845 repo.obsstore.mergemarkers(tr, data)
846 846 repo.invalidatevolatilesets()
847 847 tr.close()
848 848 return True
849 849 finally:
850 850 tr.release()
851 851 finally:
852 852 lock.release()
853 853
854 854 # keep compatibility for the 4.3 cycle
855 855 def allprecursors(obsstore, nodes, ignoreflags=0):
856 856 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
857 857 util.nouideprecwarn(movemsg, '4.3')
858 858 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
859 859
860 860 def allsuccessors(obsstore, nodes, ignoreflags=0):
861 861 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
862 862 util.nouideprecwarn(movemsg, '4.3')
863 863 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
864 864
865 865 def marker(repo, data):
866 866 movemsg = 'obsolete.marker moved to obsutil.marker'
867 867 repo.ui.deprecwarn(movemsg, '4.3')
868 868 return obsutil.marker(repo, data)
869 869
870 870 def getmarkers(repo, nodes=None, exclusive=False):
871 871 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
872 872 repo.ui.deprecwarn(movemsg, '4.3')
873 873 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
874 874
875 875 def exclusivemarkers(repo, nodes):
876 876 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
877 877 repo.ui.deprecwarn(movemsg, '4.3')
878 878 return obsutil.exclusivemarkers(repo, nodes)
879 879
880 880 def foreground(repo, nodes):
881 881 movemsg = 'obsolete.foreground moved to obsutil.foreground'
882 882 repo.ui.deprecwarn(movemsg, '4.3')
883 883 return obsutil.foreground(repo, nodes)
884 884
885 885 def successorssets(repo, initialnode, cache=None):
886 886 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
887 887 repo.ui.deprecwarn(movemsg, '4.3')
888 888 return obsutil.successorssets(repo, initialnode, cache=cache)
889 889
890 890 # mapping of 'set-name' -> <function to compute this set>
891 891 cachefuncs = {}
892 892 def cachefor(name):
893 893 """Decorator to register a function as computing the cache for a set"""
894 894 def decorator(func):
895 895 if name in cachefuncs:
896 896 msg = "duplicated registration for volatileset '%s' (existing: %r)"
897 897 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
898 898 cachefuncs[name] = func
899 899 return func
900 900 return decorator
901 901
902 902 def getrevs(repo, name):
903 903 """Return the set of revision that belong to the <name> set
904 904
905 905 Such access may compute the set and cache it for future use"""
906 906 repo = repo.unfiltered()
907 907 if not repo.obsstore:
908 908 return frozenset()
909 909 if name not in repo.obsstore.caches:
910 910 repo.obsstore.caches[name] = cachefuncs[name](repo)
911 911 return repo.obsstore.caches[name]
912 912
913 913 # To be simple we need to invalidate obsolescence cache when:
914 914 #
915 915 # - new changeset is added:
916 916 # - public phase is changed
917 917 # - obsolescence marker are added
918 918 # - strip is used a repo
919 919 def clearobscaches(repo):
920 920 """Remove all obsolescence related cache from a repo
921 921
922 922 This remove all cache in obsstore is the obsstore already exist on the
923 923 repo.
924 924
925 925 (We could be smarter here given the exact event that trigger the cache
926 926 clearing)"""
927 927 # only clear cache is there is obsstore data in this repo
928 928 if 'obsstore' in repo._filecache:
929 929 repo.obsstore.caches.clear()
930 930
931 931 def _mutablerevs(repo):
932 932 """the set of mutable revision in the repository"""
933 933 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
934 934
935 935 @cachefor('obsolete')
936 936 def _computeobsoleteset(repo):
937 937 """the set of obsolete revisions"""
938 938 getnode = repo.changelog.node
939 939 notpublic = _mutablerevs(repo)
940 940 isobs = repo.obsstore.successors.__contains__
941 941 obs = set(r for r in notpublic if isobs(getnode(r)))
942 942 return obs
943 943
944 944 @cachefor('unstable')
945 945 def _computeunstableset(repo):
946 946 msg = ("'unstable' volatile set is deprecated, "
947 947 "use 'orphan'")
948 948 repo.ui.deprecwarn(msg, '4.4')
949 949
950 950 return _computeorphanset(repo)
951 951
952 952 @cachefor('orphan')
953 953 def _computeorphanset(repo):
954 954 """the set of non obsolete revisions with obsolete parents"""
955 955 pfunc = repo.changelog.parentrevs
956 956 mutable = _mutablerevs(repo)
957 957 obsolete = getrevs(repo, 'obsolete')
958 958 others = mutable - obsolete
959 959 unstable = set()
960 960 for r in sorted(others):
961 961 # A rev is unstable if one of its parent is obsolete or unstable
962 962 # this works since we traverse following growing rev order
963 963 for p in pfunc(r):
964 964 if p in obsolete or p in unstable:
965 965 unstable.add(r)
966 966 break
967 967 return unstable
968 968
969 969 @cachefor('suspended')
970 970 def _computesuspendedset(repo):
971 971 """the set of obsolete parents with non obsolete descendants"""
972 972 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
973 973 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
974 974
975 975 @cachefor('extinct')
976 976 def _computeextinctset(repo):
977 977 """the set of obsolete parents without non obsolete descendants"""
978 978 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
979 979
980 980 @cachefor('bumped')
981 981 def _computebumpedset(repo):
982 982 msg = ("'bumped' volatile set is deprecated, "
983 983 "use 'phasedivergent'")
984 984 repo.ui.deprecwarn(msg, '4.4')
985 985
986 986 return _computephasedivergentset(repo)
987 987
988 988 @cachefor('phasedivergent')
989 989 def _computephasedivergentset(repo):
990 990 """the set of revs trying to obsolete public revisions"""
991 991 bumped = set()
992 992 # util function (avoid attribute lookup in the loop)
993 993 phase = repo._phasecache.phase # would be faster to grab the full list
994 994 public = phases.public
995 995 cl = repo.changelog
996 996 torev = cl.nodemap.get
997 for ctx in repo.set('(not public()) and (not obsolete())'):
998 rev = ctx.rev()
997 tonode = cl.node
998 for rev in repo.revs('(not public()) and (not obsolete())'):
999 999 # We only evaluate mutable, non-obsolete revision
1000 node = ctx.node()
1000 node = tonode(rev)
1001 1001 # (future) A cache of predecessors may worth if split is very common
1002 1002 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
1003 1003 ignoreflags=bumpedfix):
1004 1004 prev = torev(pnode) # unfiltered! but so is phasecache
1005 1005 if (prev is not None) and (phase(repo, prev) <= public):
1006 1006 # we have a public predecessor
1007 1007 bumped.add(rev)
1008 1008 break # Next draft!
1009 1009 return bumped
1010 1010
1011 1011 @cachefor('divergent')
1012 1012 def _computedivergentset(repo):
1013 1013 msg = ("'divergent' volatile set is deprecated, "
1014 1014 "use 'contentdivergent'")
1015 1015 repo.ui.deprecwarn(msg, '4.4')
1016 1016
1017 1017 return _computecontentdivergentset(repo)
1018 1018
1019 1019 @cachefor('contentdivergent')
1020 1020 def _computecontentdivergentset(repo):
1021 1021 """the set of rev that compete to be the final successors of some revision.
1022 1022 """
1023 1023 divergent = set()
1024 1024 obsstore = repo.obsstore
1025 1025 newermap = {}
1026 1026 for ctx in repo.set('(not public()) - obsolete()'):
1027 1027 mark = obsstore.predecessors.get(ctx.node(), ())
1028 1028 toprocess = set(mark)
1029 1029 seen = set()
1030 1030 while toprocess:
1031 1031 prec = toprocess.pop()[0]
1032 1032 if prec in seen:
1033 1033 continue # emergency cycle hanging prevention
1034 1034 seen.add(prec)
1035 1035 if prec not in newermap:
1036 1036 obsutil.successorssets(repo, prec, cache=newermap)
1037 1037 newer = [n for n in newermap[prec] if n]
1038 1038 if len(newer) > 1:
1039 1039 divergent.add(ctx.rev())
1040 1040 break
1041 1041 toprocess.update(obsstore.predecessors.get(prec, ()))
1042 1042 return divergent
1043 1043
1044 1044
1045 1045 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1046 1046 operation=None):
1047 1047 """Add obsolete markers between changesets in a repo
1048 1048
1049 1049 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1050 1050 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1051 1051 containing metadata for this marker only. It is merged with the global
1052 1052 metadata specified through the `metadata` argument of this function,
1053 1053
1054 1054 Trying to obsolete a public changeset will raise an exception.
1055 1055
1056 1056 Current user and date are used except if specified otherwise in the
1057 1057 metadata attribute.
1058 1058
1059 1059 This function operates within a transaction of its own, but does
1060 1060 not take any lock on the repo.
1061 1061 """
1062 1062 # prepare metadata
1063 1063 if metadata is None:
1064 1064 metadata = {}
1065 1065 if 'user' not in metadata:
1066 1066 develuser = repo.ui.config('devel', 'user.obsmarker')
1067 1067 if develuser:
1068 1068 metadata['user'] = develuser
1069 1069 else:
1070 1070 metadata['user'] = repo.ui.username()
1071 1071
1072 1072 # Operation metadata handling
1073 1073 useoperation = repo.ui.configbool('experimental',
1074 1074 'evolution.track-operation')
1075 1075 if useoperation and operation:
1076 1076 metadata['operation'] = operation
1077 1077
1078 1078 # Effect flag metadata handling
1079 1079 saveeffectflag = repo.ui.configbool('experimental',
1080 1080 'evolution.effect-flags')
1081 1081
1082 1082 tr = repo.transaction('add-obsolescence-marker')
1083 1083 try:
1084 1084 markerargs = []
1085 1085 for rel in relations:
1086 1086 prec = rel[0]
1087 1087 sucs = rel[1]
1088 1088 localmetadata = metadata.copy()
1089 1089 if 2 < len(rel):
1090 1090 localmetadata.update(rel[2])
1091 1091
1092 1092 if not prec.mutable():
1093 1093 raise error.Abort(_("cannot obsolete public changeset: %s")
1094 1094 % prec,
1095 1095 hint="see 'hg help phases' for details")
1096 1096 nprec = prec.node()
1097 1097 nsucs = tuple(s.node() for s in sucs)
1098 1098 npare = None
1099 1099 if not nsucs:
1100 1100 npare = tuple(p.node() for p in prec.parents())
1101 1101 if nprec in nsucs:
1102 1102 raise error.Abort(_("changeset %s cannot obsolete itself")
1103 1103 % prec)
1104 1104
1105 1105 # Effect flag can be different by relation
1106 1106 if saveeffectflag:
1107 1107 # The effect flag is saved in a versioned field name for future
1108 1108 # evolution
1109 1109 effectflag = obsutil.geteffectflag(rel)
1110 1110 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1111 1111
1112 1112 # Creating the marker causes the hidden cache to become invalid,
1113 1113 # which causes recomputation when we ask for prec.parents() above.
1114 1114 # Resulting in n^2 behavior. So let's prepare all of the args
1115 1115 # first, then create the markers.
1116 1116 markerargs.append((nprec, nsucs, npare, localmetadata))
1117 1117
1118 1118 for args in markerargs:
1119 1119 nprec, nsucs, npare, localmetadata = args
1120 1120 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1121 1121 date=date, metadata=localmetadata,
1122 1122 ui=repo.ui)
1123 1123 repo.filteredrevcache.clear()
1124 1124 tr.close()
1125 1125 finally:
1126 1126 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now