##// END OF EJS Templates
obsolete: rename _addprecursors into _addpredecessors...
Boris Feld -
r33698:32d4f815 default
parent child Browse files
Show More
@@ -1,1034 +1,1041 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 183 while off < stop:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: precursor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize(_fm1metapair)
318 318
319 319 def _fm1purereadmarkers(data, off, stop):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 ufixed = struct.Struct(_fm1fixed).unpack
334 334
335 335 while off < stop:
336 336 # read fixed part
337 337 o1 = off + fsize
338 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 339
340 340 if flags & sha2flag:
341 341 # FIXME: prec was read as a SHA1, needs to be amended
342 342
343 343 # read 0 or more successors
344 344 if numsuc == 1:
345 345 o2 = o1 + sha2size
346 346 sucs = (data[o1:o2],)
347 347 else:
348 348 o2 = o1 + sha2size * numsuc
349 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 350
351 351 # read parents
352 352 if numpar == noneflag:
353 353 o3 = o2
354 354 parents = None
355 355 elif numpar == 1:
356 356 o3 = o2 + sha2size
357 357 parents = (data[o2:o3],)
358 358 else:
359 359 o3 = o2 + sha2size * numpar
360 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 361 else:
362 362 # read 0 or more successors
363 363 if numsuc == 1:
364 364 o2 = o1 + sha1size
365 365 sucs = (data[o1:o2],)
366 366 else:
367 367 o2 = o1 + sha1size * numsuc
368 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 369
370 370 # read parents
371 371 if numpar == noneflag:
372 372 o3 = o2
373 373 parents = None
374 374 elif numpar == 1:
375 375 o3 = o2 + sha1size
376 376 parents = (data[o2:o3],)
377 377 else:
378 378 o3 = o2 + sha1size * numpar
379 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 380
381 381 # read metadata
382 382 off = o3 + metasize * nummeta
383 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 384 metadata = []
385 385 for idx in xrange(0, len(metapairsize), 2):
386 386 o1 = off + metapairsize[idx]
387 387 o2 = o1 + metapairsize[idx + 1]
388 388 metadata.append((data[off:o1], data[o1:o2]))
389 389 off = o2
390 390
391 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 392
393 393 def _fm1encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 # determine node size
396 396 _fm1node = _fm1nodesha1
397 397 if flags & usingsha256:
398 398 _fm1node = _fm1nodesha256
399 399 numsuc = len(sucs)
400 400 numextranodes = numsuc
401 401 if parents is None:
402 402 numpar = _fm1parentnone
403 403 else:
404 404 numpar = len(parents)
405 405 numextranodes += numpar
406 406 formatnodes = _fm1node * numextranodes
407 407 formatmeta = _fm1metapair * len(metadata)
408 408 format = _fm1fixed + formatnodes + formatmeta
409 409 # tz is stored in minutes so we divide by 60
410 410 tz = date[1]//60
411 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 412 data.extend(sucs)
413 413 if parents is not None:
414 414 data.extend(parents)
415 415 totalsize = _calcsize(format)
416 416 for key, value in metadata:
417 417 lk = len(key)
418 418 lv = len(value)
419 419 data.append(lk)
420 420 data.append(lv)
421 421 totalsize += lk + lv
422 422 data[0] = totalsize
423 423 data = [_pack(format, *data)]
424 424 for key, value in metadata:
425 425 data.append(key)
426 426 data.append(value)
427 427 return ''.join(data)
428 428
429 429 def _fm1readmarkers(data, off, stop):
430 430 native = getattr(parsers, 'fm1readmarkers', None)
431 431 if not native:
432 432 return _fm1purereadmarkers(data, off, stop)
433 433 return native(data, off, stop)
434 434
435 435 # mapping to read/write various marker formats
436 436 # <version> -> (decoder, encoder)
437 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439 439
440 440 def _readmarkerversion(data):
441 441 return _unpack('>B', data[0:1])[0]
442 442
443 443 @util.nogc
444 444 def _readmarkers(data, off=None, stop=None):
445 445 """Read and enumerate markers from raw data"""
446 446 diskversion = _readmarkerversion(data)
447 447 if not off:
448 448 off = 1 # skip 1 byte version number
449 449 if stop is None:
450 450 stop = len(data)
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off, stop)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 def _addprecursors(*args, **kwargs):
474 msg = ("'obsolete._addprecursors' is deprecated, "
475 "use 'obsolete._addpredecessors'")
476 util.nouideprecwarn(msg, '4.4')
477
478 return _addpredecessors(*args, **kwargs)
479
473 480 @util.nogc
474 def _addprecursors(precursors, markers):
481 def _addpredecessors(predecessors, markers):
475 482 for mark in markers:
476 483 for suc in mark[1]:
477 precursors.setdefault(suc, set()).add(mark)
484 predecessors.setdefault(suc, set()).add(mark)
478 485
479 486 @util.nogc
480 487 def _addchildren(children, markers):
481 488 for mark in markers:
482 489 parents = mark[5]
483 490 if parents is not None:
484 491 for p in parents:
485 492 children.setdefault(p, set()).add(mark)
486 493
487 494 def _checkinvalidmarkers(markers):
488 495 """search for marker with invalid data and raise error if needed
489 496
490 497 Exist as a separated function to allow the evolve extension for a more
491 498 subtle handling.
492 499 """
493 500 for mark in markers:
494 501 if node.nullid in mark[1]:
495 502 raise error.Abort(_('bad obsolescence marker detected: '
496 503 'invalid successors nullid'))
497 504
498 505 class obsstore(object):
499 506 """Store obsolete markers
500 507
501 508 Markers can be accessed with two mappings:
502 509 - precursors[x] -> set(markers on precursors edges of x)
503 510 - successors[x] -> set(markers on successors edges of x)
504 511 - children[x] -> set(markers on precursors edges of children(x)
505 512 """
506 513
507 514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 515 # prec: nodeid, precursor changesets
509 516 # succs: tuple of nodeid, successor changesets (0-N length)
510 517 # flag: integer, flag field carrying modifier for the markers (see doc)
511 518 # meta: binary blob, encoded metadata dictionary
512 519 # date: (float, int) tuple, date of marker creation
513 520 # parents: (tuple of nodeid) or None, parents of precursors
514 521 # None is used when no data has been recorded
515 522
516 523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 524 # caches for various obsolescence related cache
518 525 self.caches = {}
519 526 self.svfs = svfs
520 527 self._defaultformat = defaultformat
521 528 self._readonly = readonly
522 529
523 530 def __iter__(self):
524 531 return iter(self._all)
525 532
526 533 def __len__(self):
527 534 return len(self._all)
528 535
529 536 def __nonzero__(self):
530 537 if not self._cached('_all'):
531 538 try:
532 539 return self.svfs.stat('obsstore').st_size > 1
533 540 except OSError as inst:
534 541 if inst.errno != errno.ENOENT:
535 542 raise
536 543 # just build an empty _all list if no obsstore exists, which
537 544 # avoids further stat() syscalls
538 545 pass
539 546 return bool(self._all)
540 547
541 548 __bool__ = __nonzero__
542 549
543 550 @property
544 551 def readonly(self):
545 552 """True if marker creation is disabled
546 553
547 554 Remove me in the future when obsolete marker is always on."""
548 555 return self._readonly
549 556
550 557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 558 date=None, metadata=None, ui=None):
552 559 """obsolete: add a new obsolete marker
553 560
554 561 * ensuring it is hashable
555 562 * check mandatory metadata
556 563 * encode metadata
557 564
558 565 If you are a human writing code creating marker you want to use the
559 566 `createmarkers` function in this module instead.
560 567
561 568 return True if a new marker have been added, False if the markers
562 569 already existed (no op).
563 570 """
564 571 if metadata is None:
565 572 metadata = {}
566 573 if date is None:
567 574 if 'date' in metadata:
568 575 # as a courtesy for out-of-tree extensions
569 576 date = util.parsedate(metadata.pop('date'))
570 577 elif ui is not None:
571 578 date = ui.configdate('devel', 'default-date')
572 579 if date is None:
573 580 date = util.makedate()
574 581 else:
575 582 date = util.makedate()
576 583 if len(prec) != 20:
577 584 raise ValueError(prec)
578 585 for succ in succs:
579 586 if len(succ) != 20:
580 587 raise ValueError(succ)
581 588 if prec in succs:
582 589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583 590
584 591 metadata = tuple(sorted(metadata.iteritems()))
585 592
586 593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
587 594 return bool(self.add(transaction, [marker]))
588 595
589 596 def add(self, transaction, markers):
590 597 """Add new markers to the store
591 598
592 599 Take care of filtering duplicate.
593 600 Return the number of new marker."""
594 601 if self._readonly:
595 602 raise error.Abort(_('creating obsolete markers is not enabled on '
596 603 'this repo'))
597 604 known = set()
598 605 getsuccessors = self.successors.get
599 606 new = []
600 607 for m in markers:
601 608 if m not in getsuccessors(m[0], ()) and m not in known:
602 609 known.add(m)
603 610 new.append(m)
604 611 if new:
605 612 f = self.svfs('obsstore', 'ab')
606 613 try:
607 614 offset = f.tell()
608 615 transaction.add('obsstore', offset)
609 616 # offset == 0: new file - add the version header
610 617 data = b''.join(encodemarkers(new, offset == 0, self._version))
611 618 f.write(data)
612 619 finally:
613 620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 621 # call 'filecacheentry.refresh()' here
615 622 f.close()
616 623 addedmarkers = transaction.changes.get('obsmarkers')
617 624 if addedmarkers is not None:
618 625 addedmarkers.update(new)
619 626 self._addmarkers(new, data)
620 627 # new marker *may* have changed several set. invalidate the cache.
621 628 self.caches.clear()
622 629 # records the number of new markers for the transaction hooks
623 630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
624 631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
625 632 return len(new)
626 633
627 634 def mergemarkers(self, transaction, data):
628 635 """merge a binary stream of markers inside the obsstore
629 636
630 637 Returns the number of new markers added."""
631 638 version, markers = _readmarkers(data)
632 639 return self.add(transaction, markers)
633 640
634 641 @propertycache
635 642 def _data(self):
636 643 return self.svfs.tryread('obsstore')
637 644
638 645 @propertycache
639 646 def _version(self):
640 647 if len(self._data) >= 1:
641 648 return _readmarkerversion(self._data)
642 649 else:
643 650 return self._defaultformat
644 651
645 652 @propertycache
646 653 def _all(self):
647 654 data = self._data
648 655 if not data:
649 656 return []
650 657 self._version, markers = _readmarkers(data)
651 658 markers = list(markers)
652 659 _checkinvalidmarkers(markers)
653 660 return markers
654 661
655 662 @propertycache
656 663 def successors(self):
657 664 successors = {}
658 665 _addsuccessors(successors, self._all)
659 666 return successors
660 667
661 668 @propertycache
662 669 def precursors(self):
663 precursors = {}
664 _addprecursors(precursors, self._all)
665 return precursors
670 predecessors = {}
671 _addpredecessors(predecessors, self._all)
672 return predecessors
666 673
667 674 @propertycache
668 675 def children(self):
669 676 children = {}
670 677 _addchildren(children, self._all)
671 678 return children
672 679
673 680 def _cached(self, attr):
674 681 return attr in self.__dict__
675 682
676 683 def _addmarkers(self, markers, rawdata):
677 684 markers = list(markers) # to allow repeated iteration
678 685 self._data = self._data + rawdata
679 686 self._all.extend(markers)
680 687 if self._cached('successors'):
681 688 _addsuccessors(self.successors, markers)
682 689 if self._cached('precursors'):
683 _addprecursors(self.precursors, markers)
690 _addpredecessors(self.precursors, markers)
684 691 if self._cached('children'):
685 692 _addchildren(self.children, markers)
686 693 _checkinvalidmarkers(markers)
687 694
688 695 def relevantmarkers(self, nodes):
689 696 """return a set of all obsolescence markers relevant to a set of nodes.
690 697
691 698 "relevant" to a set of nodes mean:
692 699
693 700 - marker that use this changeset as successor
694 701 - prune marker of direct children on this changeset
695 702 - recursive application of the two rules on precursors of these markers
696 703
697 704 It is a set so you cannot rely on order."""
698 705
699 706 pendingnodes = set(nodes)
700 707 seenmarkers = set()
701 708 seennodes = set(pendingnodes)
702 709 precursorsmarkers = self.precursors
703 710 succsmarkers = self.successors
704 711 children = self.children
705 712 while pendingnodes:
706 713 direct = set()
707 714 for current in pendingnodes:
708 715 direct.update(precursorsmarkers.get(current, ()))
709 716 pruned = [m for m in children.get(current, ()) if not m[1]]
710 717 direct.update(pruned)
711 718 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
712 719 direct.update(pruned)
713 720 direct -= seenmarkers
714 721 pendingnodes = set([m[0] for m in direct])
715 722 seenmarkers |= direct
716 723 pendingnodes -= seennodes
717 724 seennodes |= pendingnodes
718 725 return seenmarkers
719 726
720 727 def makestore(ui, repo):
721 728 """Create an obsstore instance from a repo."""
722 729 # read default format for new obsstore.
723 730 # developer config: format.obsstore-version
724 731 defaultformat = ui.configint('format', 'obsstore-version')
725 732 # rely on obsstore class default when possible.
726 733 kwargs = {}
727 734 if defaultformat is not None:
728 735 kwargs['defaultformat'] = defaultformat
729 736 readonly = not isenabled(repo, createmarkersopt)
730 737 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
731 738 if store and readonly:
732 739 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
733 740 % len(list(store)))
734 741 return store
735 742
736 743 def commonversion(versions):
737 744 """Return the newest version listed in both versions and our local formats.
738 745
739 746 Returns None if no common version exists.
740 747 """
741 748 versions.sort(reverse=True)
742 749 # search for highest version known on both side
743 750 for v in versions:
744 751 if v in formats:
745 752 return v
746 753 return None
747 754
748 755 # arbitrary picked to fit into 8K limit from HTTP server
749 756 # you have to take in account:
750 757 # - the version header
751 758 # - the base85 encoding
752 759 _maxpayload = 5300
753 760
754 761 def _pushkeyescape(markers):
755 762 """encode markers into a dict suitable for pushkey exchange
756 763
757 764 - binary data is base85 encoded
758 765 - split in chunks smaller than 5300 bytes"""
759 766 keys = {}
760 767 parts = []
761 768 currentlen = _maxpayload * 2 # ensure we create a new part
762 769 for marker in markers:
763 770 nextdata = _fm0encodeonemarker(marker)
764 771 if (len(nextdata) + currentlen > _maxpayload):
765 772 currentpart = []
766 773 currentlen = 0
767 774 parts.append(currentpart)
768 775 currentpart.append(nextdata)
769 776 currentlen += len(nextdata)
770 777 for idx, part in enumerate(reversed(parts)):
771 778 data = ''.join([_pack('>B', _fm0version)] + part)
772 779 keys['dump%i' % idx] = util.b85encode(data)
773 780 return keys
774 781
775 782 def listmarkers(repo):
776 783 """List markers over pushkey"""
777 784 if not repo.obsstore:
778 785 return {}
779 786 return _pushkeyescape(sorted(repo.obsstore))
780 787
781 788 def pushmarker(repo, key, old, new):
782 789 """Push markers over pushkey"""
783 790 if not key.startswith('dump'):
784 791 repo.ui.warn(_('unknown key: %r') % key)
785 792 return False
786 793 if old:
787 794 repo.ui.warn(_('unexpected old value for %r') % key)
788 795 return False
789 796 data = util.b85decode(new)
790 797 lock = repo.lock()
791 798 try:
792 799 tr = repo.transaction('pushkey: obsolete markers')
793 800 try:
794 801 repo.obsstore.mergemarkers(tr, data)
795 802 repo.invalidatevolatilesets()
796 803 tr.close()
797 804 return True
798 805 finally:
799 806 tr.release()
800 807 finally:
801 808 lock.release()
802 809
803 810 # keep compatibility for the 4.3 cycle
804 811 def allprecursors(obsstore, nodes, ignoreflags=0):
805 812 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
806 813 util.nouideprecwarn(movemsg, '4.3')
807 814 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
808 815
809 816 def allsuccessors(obsstore, nodes, ignoreflags=0):
810 817 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
811 818 util.nouideprecwarn(movemsg, '4.3')
812 819 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
813 820
814 821 def marker(repo, data):
815 822 movemsg = 'obsolete.marker moved to obsutil.marker'
816 823 repo.ui.deprecwarn(movemsg, '4.3')
817 824 return obsutil.marker(repo, data)
818 825
819 826 def getmarkers(repo, nodes=None, exclusive=False):
820 827 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
821 828 repo.ui.deprecwarn(movemsg, '4.3')
822 829 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
823 830
824 831 def exclusivemarkers(repo, nodes):
825 832 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
826 833 repo.ui.deprecwarn(movemsg, '4.3')
827 834 return obsutil.exclusivemarkers(repo, nodes)
828 835
829 836 def foreground(repo, nodes):
830 837 movemsg = 'obsolete.foreground moved to obsutil.foreground'
831 838 repo.ui.deprecwarn(movemsg, '4.3')
832 839 return obsutil.foreground(repo, nodes)
833 840
834 841 def successorssets(repo, initialnode, cache=None):
835 842 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
836 843 repo.ui.deprecwarn(movemsg, '4.3')
837 844 return obsutil.successorssets(repo, initialnode, cache=cache)
838 845
839 846 # mapping of 'set-name' -> <function to compute this set>
840 847 cachefuncs = {}
841 848 def cachefor(name):
842 849 """Decorator to register a function as computing the cache for a set"""
843 850 def decorator(func):
844 851 if name in cachefuncs:
845 852 msg = "duplicated registration for volatileset '%s' (existing: %r)"
846 853 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
847 854 cachefuncs[name] = func
848 855 return func
849 856 return decorator
850 857
851 858 def getrevs(repo, name):
852 859 """Return the set of revision that belong to the <name> set
853 860
854 861 Such access may compute the set and cache it for future use"""
855 862 repo = repo.unfiltered()
856 863 if not repo.obsstore:
857 864 return frozenset()
858 865 if name not in repo.obsstore.caches:
859 866 repo.obsstore.caches[name] = cachefuncs[name](repo)
860 867 return repo.obsstore.caches[name]
861 868
862 869 # To be simple we need to invalidate obsolescence cache when:
863 870 #
864 871 # - new changeset is added:
865 872 # - public phase is changed
866 873 # - obsolescence marker are added
867 874 # - strip is used a repo
868 875 def clearobscaches(repo):
869 876 """Remove all obsolescence related cache from a repo
870 877
871 878 This remove all cache in obsstore is the obsstore already exist on the
872 879 repo.
873 880
874 881 (We could be smarter here given the exact event that trigger the cache
875 882 clearing)"""
876 883 # only clear cache is there is obsstore data in this repo
877 884 if 'obsstore' in repo._filecache:
878 885 repo.obsstore.caches.clear()
879 886
880 887 def _mutablerevs(repo):
881 888 """the set of mutable revision in the repository"""
882 889 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
883 890
884 891 @cachefor('obsolete')
885 892 def _computeobsoleteset(repo):
886 893 """the set of obsolete revisions"""
887 894 getnode = repo.changelog.node
888 895 notpublic = _mutablerevs(repo)
889 896 isobs = repo.obsstore.successors.__contains__
890 897 obs = set(r for r in notpublic if isobs(getnode(r)))
891 898 return obs
892 899
893 900 @cachefor('unstable')
894 901 def _computeunstableset(repo):
895 902 """the set of non obsolete revisions with obsolete parents"""
896 903 pfunc = repo.changelog.parentrevs
897 904 mutable = _mutablerevs(repo)
898 905 obsolete = getrevs(repo, 'obsolete')
899 906 others = mutable - obsolete
900 907 unstable = set()
901 908 for r in sorted(others):
902 909 # A rev is unstable if one of its parent is obsolete or unstable
903 910 # this works since we traverse following growing rev order
904 911 for p in pfunc(r):
905 912 if p in obsolete or p in unstable:
906 913 unstable.add(r)
907 914 break
908 915 return unstable
909 916
910 917 @cachefor('suspended')
911 918 def _computesuspendedset(repo):
912 919 """the set of obsolete parents with non obsolete descendants"""
913 920 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
914 921 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
915 922
916 923 @cachefor('extinct')
917 924 def _computeextinctset(repo):
918 925 """the set of obsolete parents without non obsolete descendants"""
919 926 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
920 927
921 928
922 929 @cachefor('bumped')
923 930 def _computebumpedset(repo):
924 931 """the set of revs trying to obsolete public revisions"""
925 932 bumped = set()
926 933 # util function (avoid attribute lookup in the loop)
927 934 phase = repo._phasecache.phase # would be faster to grab the full list
928 935 public = phases.public
929 936 cl = repo.changelog
930 937 torev = cl.nodemap.get
931 938 for ctx in repo.set('(not public()) and (not obsolete())'):
932 939 rev = ctx.rev()
933 940 # We only evaluate mutable, non-obsolete revision
934 941 node = ctx.node()
935 942 # (future) A cache of precursors may worth if split is very common
936 943 for pnode in obsutil.allprecursors(repo.obsstore, [node],
937 944 ignoreflags=bumpedfix):
938 945 prev = torev(pnode) # unfiltered! but so is phasecache
939 946 if (prev is not None) and (phase(repo, prev) <= public):
940 947 # we have a public precursor
941 948 bumped.add(rev)
942 949 break # Next draft!
943 950 return bumped
944 951
945 952 @cachefor('divergent')
946 953 def _computedivergentset(repo):
947 954 """the set of rev that compete to be the final successors of some revision.
948 955 """
949 956 divergent = set()
950 957 obsstore = repo.obsstore
951 958 newermap = {}
952 959 for ctx in repo.set('(not public()) - obsolete()'):
953 960 mark = obsstore.precursors.get(ctx.node(), ())
954 961 toprocess = set(mark)
955 962 seen = set()
956 963 while toprocess:
957 964 prec = toprocess.pop()[0]
958 965 if prec in seen:
959 966 continue # emergency cycle hanging prevention
960 967 seen.add(prec)
961 968 if prec not in newermap:
962 969 obsutil.successorssets(repo, prec, cache=newermap)
963 970 newer = [n for n in newermap[prec] if n]
964 971 if len(newer) > 1:
965 972 divergent.add(ctx.rev())
966 973 break
967 974 toprocess.update(obsstore.precursors.get(prec, ()))
968 975 return divergent
969 976
970 977
971 978 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
972 979 operation=None):
973 980 """Add obsolete markers between changesets in a repo
974 981
975 982 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
976 983 tuple. `old` and `news` are changectx. metadata is an optional dictionary
977 984 containing metadata for this marker only. It is merged with the global
978 985 metadata specified through the `metadata` argument of this function,
979 986
980 987 Trying to obsolete a public changeset will raise an exception.
981 988
982 989 Current user and date are used except if specified otherwise in the
983 990 metadata attribute.
984 991
985 992 This function operates within a transaction of its own, but does
986 993 not take any lock on the repo.
987 994 """
988 995 # prepare metadata
989 996 if metadata is None:
990 997 metadata = {}
991 998 if 'user' not in metadata:
992 999 metadata['user'] = repo.ui.username()
993 1000 useoperation = repo.ui.configbool('experimental',
994 1001 'evolution.track-operation')
995 1002 if useoperation and operation:
996 1003 metadata['operation'] = operation
997 1004 tr = repo.transaction('add-obsolescence-marker')
998 1005 try:
999 1006 markerargs = []
1000 1007 for rel in relations:
1001 1008 prec = rel[0]
1002 1009 sucs = rel[1]
1003 1010 localmetadata = metadata.copy()
1004 1011 if 2 < len(rel):
1005 1012 localmetadata.update(rel[2])
1006 1013
1007 1014 if not prec.mutable():
1008 1015 raise error.Abort(_("cannot obsolete public changeset: %s")
1009 1016 % prec,
1010 1017 hint="see 'hg help phases' for details")
1011 1018 nprec = prec.node()
1012 1019 nsucs = tuple(s.node() for s in sucs)
1013 1020 npare = None
1014 1021 if not nsucs:
1015 1022 npare = tuple(p.node() for p in prec.parents())
1016 1023 if nprec in nsucs:
1017 1024 raise error.Abort(_("changeset %s cannot obsolete itself")
1018 1025 % prec)
1019 1026
1020 1027 # Creating the marker causes the hidden cache to become invalid,
1021 1028 # which causes recomputation when we ask for prec.parents() above.
1022 1029 # Resulting in n^2 behavior. So let's prepare all of the args
1023 1030 # first, then create the markers.
1024 1031 markerargs.append((nprec, nsucs, npare, localmetadata))
1025 1032
1026 1033 for args in markerargs:
1027 1034 nprec, nsucs, npare, localmetadata = args
1028 1035 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1029 1036 date=date, metadata=localmetadata,
1030 1037 ui=repo.ui)
1031 1038 repo.filteredrevcache.clear()
1032 1039 tr.close()
1033 1040 finally:
1034 1041 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now