##// END OF EJS Templates
obsstore: keep self._data updated with _addmarkers...
Jun Wu -
r33479:8b48dad6 default
parent child Browse files
Show More
@@ -1,1034 +1,1035 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off):
182 182 # Loop on markers
183 183 l = len(data)
184 184 while off + _fm0fsize <= l:
185 185 # read fixed part
186 186 cur = data[off:off + _fm0fsize]
187 187 off += _fm0fsize
188 188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 189 # read replacement
190 190 sucs = ()
191 191 if numsuc:
192 192 s = (_fm0fnodesize * numsuc)
193 193 cur = data[off:off + s]
194 194 sucs = _unpack(_fm0node * numsuc, cur)
195 195 off += s
196 196 # read metadata
197 197 # (metadata will be decoded on demand)
198 198 metadata = data[off:off + mdsize]
199 199 if len(metadata) != mdsize:
200 200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 201 'short, %d bytes expected, got %d')
202 202 % (mdsize, len(metadata)))
203 203 off += mdsize
204 204 metadata = _fm0decodemeta(metadata)
205 205 try:
206 206 when, offset = metadata.pop('date', '0 0').split(' ')
207 207 date = float(when), int(offset)
208 208 except ValueError:
209 209 date = (0., 0)
210 210 parents = None
211 211 if 'p2' in metadata:
212 212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 213 elif 'p1' in metadata:
214 214 parents = (metadata.pop('p1', None),)
215 215 elif 'p0' in metadata:
216 216 parents = ()
217 217 if parents is not None:
218 218 try:
219 219 parents = tuple(node.bin(p) for p in parents)
220 220 # if parent content is not a nodeid, drop the data
221 221 for p in parents:
222 222 if len(p) != 20:
223 223 parents = None
224 224 break
225 225 except TypeError:
226 226 # if content cannot be translated to nodeid drop the data.
227 227 parents = None
228 228
229 229 metadata = tuple(sorted(metadata.iteritems()))
230 230
231 231 yield (pre, sucs, flags, metadata, date, parents)
232 232
233 233 def _fm0encodeonemarker(marker):
234 234 pre, sucs, flags, metadata, date, parents = marker
235 235 if flags & usingsha256:
236 236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 237 metadata = dict(metadata)
238 238 time, tz = date
239 239 metadata['date'] = '%r %i' % (time, tz)
240 240 if parents is not None:
241 241 if not parents:
242 242 # mark that we explicitly recorded no parents
243 243 metadata['p0'] = ''
244 244 for i, p in enumerate(parents, 1):
245 245 metadata['p%i' % i] = node.hex(p)
246 246 metadata = _fm0encodemeta(metadata)
247 247 numsuc = len(sucs)
248 248 format = _fm0fixed + (_fm0node * numsuc)
249 249 data = [numsuc, len(metadata), flags, pre]
250 250 data.extend(sucs)
251 251 return _pack(format, *data) + metadata
252 252
253 253 def _fm0encodemeta(meta):
254 254 """Return encoded metadata string to string mapping.
255 255
256 256 Assume no ':' in key and no '\0' in both key and value."""
257 257 for key, value in meta.iteritems():
258 258 if ':' in key or '\0' in key:
259 259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 260 if '\0' in value:
261 261 raise ValueError("':' is forbidden in metadata value'")
262 262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 263
264 264 def _fm0decodemeta(data):
265 265 """Return string to string dictionary from encoded version."""
266 266 d = {}
267 267 for l in data.split('\0'):
268 268 if l:
269 269 key, value = l.split(':')
270 270 d[key] = value
271 271 return d
272 272
273 273 ## Parsing and writing of version "1"
274 274 #
275 275 # The header is followed by the markers. Each marker is made of:
276 276 #
277 277 # - uint32: total size of the marker (including this field)
278 278 #
279 279 # - float64: date in seconds since epoch
280 280 #
281 281 # - int16: timezone offset in minutes
282 282 #
283 283 # - uint16: a bit field. It is reserved for flags used in common
284 284 # obsolete marker operations, to avoid repeated decoding of metadata
285 285 # entries.
286 286 #
287 287 # - uint8: number of successors "N", can be zero.
288 288 #
289 289 # - uint8: number of parents "P", can be zero.
290 290 #
291 291 # 0: parents data stored but no parent,
292 292 # 1: one parent stored,
293 293 # 2: two parents stored,
294 294 # 3: no parent data stored
295 295 #
296 296 # - uint8: number of metadata entries M
297 297 #
298 298 # - 20 or 32 bytes: precursor changeset identifier.
299 299 #
300 300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 301 #
302 302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 303 #
304 304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 305 #
306 306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 307 _fm1version = 1
308 308 _fm1fixed = '>IdhHBBB20s'
309 309 _fm1nodesha1 = '20s'
310 310 _fm1nodesha256 = '32s'
311 311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 313 _fm1fsize = _calcsize(_fm1fixed)
314 314 _fm1parentnone = 3
315 315 _fm1parentshift = 14
316 316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 317 _fm1metapair = 'BB'
318 318 _fm1metapairsize = _calcsize('BB')
319 319
320 320 def _fm1purereadmarkers(data, off):
321 321 # make some global constants local for performance
322 322 noneflag = _fm1parentnone
323 323 sha2flag = usingsha256
324 324 sha1size = _fm1nodesha1size
325 325 sha2size = _fm1nodesha256size
326 326 sha1fmt = _fm1nodesha1
327 327 sha2fmt = _fm1nodesha256
328 328 metasize = _fm1metapairsize
329 329 metafmt = _fm1metapair
330 330 fsize = _fm1fsize
331 331 unpack = _unpack
332 332
333 333 # Loop on markers
334 334 stop = len(data) - _fm1fsize
335 335 ufixed = struct.Struct(_fm1fixed).unpack
336 336
337 337 while off <= stop:
338 338 # read fixed part
339 339 o1 = off + fsize
340 340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 341
342 342 if flags & sha2flag:
343 343 # FIXME: prec was read as a SHA1, needs to be amended
344 344
345 345 # read 0 or more successors
346 346 if numsuc == 1:
347 347 o2 = o1 + sha2size
348 348 sucs = (data[o1:o2],)
349 349 else:
350 350 o2 = o1 + sha2size * numsuc
351 351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 352
353 353 # read parents
354 354 if numpar == noneflag:
355 355 o3 = o2
356 356 parents = None
357 357 elif numpar == 1:
358 358 o3 = o2 + sha2size
359 359 parents = (data[o2:o3],)
360 360 else:
361 361 o3 = o2 + sha2size * numpar
362 362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 363 else:
364 364 # read 0 or more successors
365 365 if numsuc == 1:
366 366 o2 = o1 + sha1size
367 367 sucs = (data[o1:o2],)
368 368 else:
369 369 o2 = o1 + sha1size * numsuc
370 370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 371
372 372 # read parents
373 373 if numpar == noneflag:
374 374 o3 = o2
375 375 parents = None
376 376 elif numpar == 1:
377 377 o3 = o2 + sha1size
378 378 parents = (data[o2:o3],)
379 379 else:
380 380 o3 = o2 + sha1size * numpar
381 381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 382
383 383 # read metadata
384 384 off = o3 + metasize * nummeta
385 385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 386 metadata = []
387 387 for idx in xrange(0, len(metapairsize), 2):
388 388 o1 = off + metapairsize[idx]
389 389 o2 = o1 + metapairsize[idx + 1]
390 390 metadata.append((data[off:o1], data[o1:o2]))
391 391 off = o2
392 392
393 393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 394
395 395 def _fm1encodeonemarker(marker):
396 396 pre, sucs, flags, metadata, date, parents = marker
397 397 # determine node size
398 398 _fm1node = _fm1nodesha1
399 399 if flags & usingsha256:
400 400 _fm1node = _fm1nodesha256
401 401 numsuc = len(sucs)
402 402 numextranodes = numsuc
403 403 if parents is None:
404 404 numpar = _fm1parentnone
405 405 else:
406 406 numpar = len(parents)
407 407 numextranodes += numpar
408 408 formatnodes = _fm1node * numextranodes
409 409 formatmeta = _fm1metapair * len(metadata)
410 410 format = _fm1fixed + formatnodes + formatmeta
411 411 # tz is stored in minutes so we divide by 60
412 412 tz = date[1]//60
413 413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 414 data.extend(sucs)
415 415 if parents is not None:
416 416 data.extend(parents)
417 417 totalsize = _calcsize(format)
418 418 for key, value in metadata:
419 419 lk = len(key)
420 420 lv = len(value)
421 421 data.append(lk)
422 422 data.append(lv)
423 423 totalsize += lk + lv
424 424 data[0] = totalsize
425 425 data = [_pack(format, *data)]
426 426 for key, value in metadata:
427 427 data.append(key)
428 428 data.append(value)
429 429 return ''.join(data)
430 430
431 431 def _fm1readmarkers(data, off):
432 432 native = getattr(parsers, 'fm1readmarkers', None)
433 433 if not native:
434 434 return _fm1purereadmarkers(data, off)
435 435 stop = len(data) - _fm1fsize
436 436 return native(data, off, stop)
437 437
438 438 # mapping to read/write various marker formats
439 439 # <version> -> (decoder, encoder)
440 440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 442
443 443 def _readmarkerversion(data):
444 444 return _unpack('>B', data[0:1])[0]
445 445
446 446 @util.nogc
447 447 def _readmarkers(data):
448 448 """Read and enumerate markers from raw data"""
449 449 diskversion = _readmarkerversion(data)
450 450 off = 1
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 @util.nogc
474 474 def _addprecursors(precursors, markers):
475 475 for mark in markers:
476 476 for suc in mark[1]:
477 477 precursors.setdefault(suc, set()).add(mark)
478 478
479 479 @util.nogc
480 480 def _addchildren(children, markers):
481 481 for mark in markers:
482 482 parents = mark[5]
483 483 if parents is not None:
484 484 for p in parents:
485 485 children.setdefault(p, set()).add(mark)
486 486
487 487 def _checkinvalidmarkers(markers):
488 488 """search for marker with invalid data and raise error if needed
489 489
490 490 Exist as a separated function to allow the evolve extension for a more
491 491 subtle handling.
492 492 """
493 493 for mark in markers:
494 494 if node.nullid in mark[1]:
495 495 raise error.Abort(_('bad obsolescence marker detected: '
496 496 'invalid successors nullid'))
497 497
498 498 class obsstore(object):
499 499 """Store obsolete markers
500 500
501 501 Markers can be accessed with two mappings:
502 502 - precursors[x] -> set(markers on precursors edges of x)
503 503 - successors[x] -> set(markers on successors edges of x)
504 504 - children[x] -> set(markers on precursors edges of children(x)
505 505 """
506 506
507 507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 508 # prec: nodeid, precursor changesets
509 509 # succs: tuple of nodeid, successor changesets (0-N length)
510 510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 511 # meta: binary blob, encoded metadata dictionary
512 512 # date: (float, int) tuple, date of marker creation
513 513 # parents: (tuple of nodeid) or None, parents of precursors
514 514 # None is used when no data has been recorded
515 515
516 516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 517 # caches for various obsolescence related cache
518 518 self.caches = {}
519 519 self.svfs = svfs
520 520 self._defaultformat = defaultformat
521 521 self._readonly = readonly
522 522
523 523 def __iter__(self):
524 524 return iter(self._all)
525 525
526 526 def __len__(self):
527 527 return len(self._all)
528 528
529 529 def __nonzero__(self):
530 530 if not self._cached('_all'):
531 531 try:
532 532 return self.svfs.stat('obsstore').st_size > 1
533 533 except OSError as inst:
534 534 if inst.errno != errno.ENOENT:
535 535 raise
536 536 # just build an empty _all list if no obsstore exists, which
537 537 # avoids further stat() syscalls
538 538 pass
539 539 return bool(self._all)
540 540
541 541 __bool__ = __nonzero__
542 542
543 543 @property
544 544 def readonly(self):
545 545 """True if marker creation is disabled
546 546
547 547 Remove me in the future when obsolete marker is always on."""
548 548 return self._readonly
549 549
550 550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 551 date=None, metadata=None, ui=None):
552 552 """obsolete: add a new obsolete marker
553 553
554 554 * ensuring it is hashable
555 555 * check mandatory metadata
556 556 * encode metadata
557 557
558 558 If you are a human writing code creating marker you want to use the
559 559 `createmarkers` function in this module instead.
560 560
561 561 return True if a new marker have been added, False if the markers
562 562 already existed (no op).
563 563 """
564 564 if metadata is None:
565 565 metadata = {}
566 566 if date is None:
567 567 if 'date' in metadata:
568 568 # as a courtesy for out-of-tree extensions
569 569 date = util.parsedate(metadata.pop('date'))
570 570 elif ui is not None:
571 571 date = ui.configdate('devel', 'default-date')
572 572 if date is None:
573 573 date = util.makedate()
574 574 else:
575 575 date = util.makedate()
576 576 if len(prec) != 20:
577 577 raise ValueError(prec)
578 578 for succ in succs:
579 579 if len(succ) != 20:
580 580 raise ValueError(succ)
581 581 if prec in succs:
582 582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583 583
584 584 metadata = tuple(sorted(metadata.iteritems()))
585 585
586 586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 587 return bool(self.add(transaction, [marker]))
588 588
589 589 def add(self, transaction, markers):
590 590 """Add new markers to the store
591 591
592 592 Take care of filtering duplicate.
593 593 Return the number of new marker."""
594 594 if self._readonly:
595 595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 596 'this repo'))
597 597 known = set()
598 598 getsuccessors = self.successors.get
599 599 new = []
600 600 for m in markers:
601 601 if m not in getsuccessors(m[0], ()) and m not in known:
602 602 known.add(m)
603 603 new.append(m)
604 604 if new:
605 605 f = self.svfs('obsstore', 'ab')
606 606 try:
607 607 offset = f.tell()
608 608 transaction.add('obsstore', offset)
609 609 # offset == 0: new file - add the version header
610 for bytes in encodemarkers(new, offset == 0, self._version):
611 f.write(bytes)
610 data = b''.join(encodemarkers(new, offset == 0, self._version))
611 f.write(data)
612 612 finally:
613 613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 614 # call 'filecacheentry.refresh()' here
615 615 f.close()
616 616 addedmarkers = transaction.changes.get('obsmarkers')
617 617 if addedmarkers is not None:
618 618 addedmarkers.update(new)
619 self._addmarkers(new)
619 self._addmarkers(new, data)
620 620 # new marker *may* have changed several set. invalidate the cache.
621 621 self.caches.clear()
622 622 # records the number of new markers for the transaction hooks
623 623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
624 624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
625 625 return len(new)
626 626
627 627 def mergemarkers(self, transaction, data):
628 628 """merge a binary stream of markers inside the obsstore
629 629
630 630 Returns the number of new markers added."""
631 631 version, markers = _readmarkers(data)
632 632 return self.add(transaction, markers)
633 633
634 634 @propertycache
635 635 def _data(self):
636 636 return self.svfs.tryread('obsstore')
637 637
638 638 @propertycache
639 639 def _version(self):
640 640 if len(self._data) >= 1:
641 641 return _readmarkerversion(self._data)
642 642 else:
643 643 return self._defaultformat
644 644
645 645 @propertycache
646 646 def _all(self):
647 647 data = self._data
648 648 if not data:
649 649 return []
650 650 self._version, markers = _readmarkers(data)
651 651 markers = list(markers)
652 652 _checkinvalidmarkers(markers)
653 653 return markers
654 654
655 655 @propertycache
656 656 def successors(self):
657 657 successors = {}
658 658 _addsuccessors(successors, self._all)
659 659 return successors
660 660
661 661 @propertycache
662 662 def precursors(self):
663 663 precursors = {}
664 664 _addprecursors(precursors, self._all)
665 665 return precursors
666 666
667 667 @propertycache
668 668 def children(self):
669 669 children = {}
670 670 _addchildren(children, self._all)
671 671 return children
672 672
673 673 def _cached(self, attr):
674 674 return attr in self.__dict__
675 675
676 def _addmarkers(self, markers):
676 def _addmarkers(self, markers, rawdata):
677 677 markers = list(markers) # to allow repeated iteration
678 self._data = self._data + rawdata
678 679 self._all.extend(markers)
679 680 if self._cached('successors'):
680 681 _addsuccessors(self.successors, markers)
681 682 if self._cached('precursors'):
682 683 _addprecursors(self.precursors, markers)
683 684 if self._cached('children'):
684 685 _addchildren(self.children, markers)
685 686 _checkinvalidmarkers(markers)
686 687
687 688 def relevantmarkers(self, nodes):
688 689 """return a set of all obsolescence markers relevant to a set of nodes.
689 690
690 691 "relevant" to a set of nodes mean:
691 692
692 693 - marker that use this changeset as successor
693 694 - prune marker of direct children on this changeset
694 695 - recursive application of the two rules on precursors of these markers
695 696
696 697 It is a set so you cannot rely on order."""
697 698
698 699 pendingnodes = set(nodes)
699 700 seenmarkers = set()
700 701 seennodes = set(pendingnodes)
701 702 precursorsmarkers = self.precursors
702 703 succsmarkers = self.successors
703 704 children = self.children
704 705 while pendingnodes:
705 706 direct = set()
706 707 for current in pendingnodes:
707 708 direct.update(precursorsmarkers.get(current, ()))
708 709 pruned = [m for m in children.get(current, ()) if not m[1]]
709 710 direct.update(pruned)
710 711 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
711 712 direct.update(pruned)
712 713 direct -= seenmarkers
713 714 pendingnodes = set([m[0] for m in direct])
714 715 seenmarkers |= direct
715 716 pendingnodes -= seennodes
716 717 seennodes |= pendingnodes
717 718 return seenmarkers
718 719
719 720 def makestore(ui, repo):
720 721 """Create an obsstore instance from a repo."""
721 722 # read default format for new obsstore.
722 723 # developer config: format.obsstore-version
723 724 defaultformat = ui.configint('format', 'obsstore-version')
724 725 # rely on obsstore class default when possible.
725 726 kwargs = {}
726 727 if defaultformat is not None:
727 728 kwargs['defaultformat'] = defaultformat
728 729 readonly = not isenabled(repo, createmarkersopt)
729 730 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
730 731 if store and readonly:
731 732 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
732 733 % len(list(store)))
733 734 return store
734 735
735 736 def commonversion(versions):
736 737 """Return the newest version listed in both versions and our local formats.
737 738
738 739 Returns None if no common version exists.
739 740 """
740 741 versions.sort(reverse=True)
741 742 # search for highest version known on both side
742 743 for v in versions:
743 744 if v in formats:
744 745 return v
745 746 return None
746 747
747 748 # arbitrary picked to fit into 8K limit from HTTP server
748 749 # you have to take in account:
749 750 # - the version header
750 751 # - the base85 encoding
751 752 _maxpayload = 5300
752 753
753 754 def _pushkeyescape(markers):
754 755 """encode markers into a dict suitable for pushkey exchange
755 756
756 757 - binary data is base85 encoded
757 758 - split in chunks smaller than 5300 bytes"""
758 759 keys = {}
759 760 parts = []
760 761 currentlen = _maxpayload * 2 # ensure we create a new part
761 762 for marker in markers:
762 763 nextdata = _fm0encodeonemarker(marker)
763 764 if (len(nextdata) + currentlen > _maxpayload):
764 765 currentpart = []
765 766 currentlen = 0
766 767 parts.append(currentpart)
767 768 currentpart.append(nextdata)
768 769 currentlen += len(nextdata)
769 770 for idx, part in enumerate(reversed(parts)):
770 771 data = ''.join([_pack('>B', _fm0version)] + part)
771 772 keys['dump%i' % idx] = util.b85encode(data)
772 773 return keys
773 774
774 775 def listmarkers(repo):
775 776 """List markers over pushkey"""
776 777 if not repo.obsstore:
777 778 return {}
778 779 return _pushkeyescape(sorted(repo.obsstore))
779 780
780 781 def pushmarker(repo, key, old, new):
781 782 """Push markers over pushkey"""
782 783 if not key.startswith('dump'):
783 784 repo.ui.warn(_('unknown key: %r') % key)
784 785 return False
785 786 if old:
786 787 repo.ui.warn(_('unexpected old value for %r') % key)
787 788 return False
788 789 data = util.b85decode(new)
789 790 lock = repo.lock()
790 791 try:
791 792 tr = repo.transaction('pushkey: obsolete markers')
792 793 try:
793 794 repo.obsstore.mergemarkers(tr, data)
794 795 repo.invalidatevolatilesets()
795 796 tr.close()
796 797 return True
797 798 finally:
798 799 tr.release()
799 800 finally:
800 801 lock.release()
801 802
802 803 # keep compatibility for the 4.3 cycle
803 804 def allprecursors(obsstore, nodes, ignoreflags=0):
804 805 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
805 806 util.nouideprecwarn(movemsg, '4.3')
806 807 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
807 808
808 809 def allsuccessors(obsstore, nodes, ignoreflags=0):
809 810 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
810 811 util.nouideprecwarn(movemsg, '4.3')
811 812 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
812 813
813 814 def marker(repo, data):
814 815 movemsg = 'obsolete.marker moved to obsutil.marker'
815 816 repo.ui.deprecwarn(movemsg, '4.3')
816 817 return obsutil.marker(repo, data)
817 818
818 819 def getmarkers(repo, nodes=None, exclusive=False):
819 820 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
820 821 repo.ui.deprecwarn(movemsg, '4.3')
821 822 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
822 823
823 824 def exclusivemarkers(repo, nodes):
824 825 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
825 826 repo.ui.deprecwarn(movemsg, '4.3')
826 827 return obsutil.exclusivemarkers(repo, nodes)
827 828
828 829 def foreground(repo, nodes):
829 830 movemsg = 'obsolete.foreground moved to obsutil.foreground'
830 831 repo.ui.deprecwarn(movemsg, '4.3')
831 832 return obsutil.foreground(repo, nodes)
832 833
833 834 def successorssets(repo, initialnode, cache=None):
834 835 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
835 836 repo.ui.deprecwarn(movemsg, '4.3')
836 837 return obsutil.successorssets(repo, initialnode, cache=cache)
837 838
838 839 # mapping of 'set-name' -> <function to compute this set>
839 840 cachefuncs = {}
840 841 def cachefor(name):
841 842 """Decorator to register a function as computing the cache for a set"""
842 843 def decorator(func):
843 844 if name in cachefuncs:
844 845 msg = "duplicated registration for volatileset '%s' (existing: %r)"
845 846 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
846 847 cachefuncs[name] = func
847 848 return func
848 849 return decorator
849 850
850 851 def getrevs(repo, name):
851 852 """Return the set of revision that belong to the <name> set
852 853
853 854 Such access may compute the set and cache it for future use"""
854 855 repo = repo.unfiltered()
855 856 if not repo.obsstore:
856 857 return frozenset()
857 858 if name not in repo.obsstore.caches:
858 859 repo.obsstore.caches[name] = cachefuncs[name](repo)
859 860 return repo.obsstore.caches[name]
860 861
861 862 # To be simple we need to invalidate obsolescence cache when:
862 863 #
863 864 # - new changeset is added:
864 865 # - public phase is changed
865 866 # - obsolescence marker are added
866 867 # - strip is used a repo
867 868 def clearobscaches(repo):
868 869 """Remove all obsolescence related cache from a repo
869 870
870 871 This remove all cache in obsstore is the obsstore already exist on the
871 872 repo.
872 873
873 874 (We could be smarter here given the exact event that trigger the cache
874 875 clearing)"""
875 876 # only clear cache is there is obsstore data in this repo
876 877 if 'obsstore' in repo._filecache:
877 878 repo.obsstore.caches.clear()
878 879
879 880 def _mutablerevs(repo):
880 881 """the set of mutable revision in the repository"""
881 882 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
882 883
883 884 @cachefor('obsolete')
884 885 def _computeobsoleteset(repo):
885 886 """the set of obsolete revisions"""
886 887 getnode = repo.changelog.node
887 888 notpublic = _mutablerevs(repo)
888 889 isobs = repo.obsstore.successors.__contains__
889 890 obs = set(r for r in notpublic if isobs(getnode(r)))
890 891 return obs
891 892
892 893 @cachefor('unstable')
893 894 def _computeunstableset(repo):
894 895 """the set of non obsolete revisions with obsolete parents"""
895 896 pfunc = repo.changelog.parentrevs
896 897 mutable = _mutablerevs(repo)
897 898 obsolete = getrevs(repo, 'obsolete')
898 899 others = mutable - obsolete
899 900 unstable = set()
900 901 for r in sorted(others):
901 902 # A rev is unstable if one of its parent is obsolete or unstable
902 903 # this works since we traverse following growing rev order
903 904 for p in pfunc(r):
904 905 if p in obsolete or p in unstable:
905 906 unstable.add(r)
906 907 break
907 908 return unstable
908 909
909 910 @cachefor('suspended')
910 911 def _computesuspendedset(repo):
911 912 """the set of obsolete parents with non obsolete descendants"""
912 913 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
913 914 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
914 915
915 916 @cachefor('extinct')
916 917 def _computeextinctset(repo):
917 918 """the set of obsolete parents without non obsolete descendants"""
918 919 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
919 920
920 921
921 922 @cachefor('bumped')
922 923 def _computebumpedset(repo):
923 924 """the set of revs trying to obsolete public revisions"""
924 925 bumped = set()
925 926 # util function (avoid attribute lookup in the loop)
926 927 phase = repo._phasecache.phase # would be faster to grab the full list
927 928 public = phases.public
928 929 cl = repo.changelog
929 930 torev = cl.nodemap.get
930 931 for ctx in repo.set('(not public()) and (not obsolete())'):
931 932 rev = ctx.rev()
932 933 # We only evaluate mutable, non-obsolete revision
933 934 node = ctx.node()
934 935 # (future) A cache of precursors may worth if split is very common
935 936 for pnode in obsutil.allprecursors(repo.obsstore, [node],
936 937 ignoreflags=bumpedfix):
937 938 prev = torev(pnode) # unfiltered! but so is phasecache
938 939 if (prev is not None) and (phase(repo, prev) <= public):
939 940 # we have a public precursor
940 941 bumped.add(rev)
941 942 break # Next draft!
942 943 return bumped
943 944
944 945 @cachefor('divergent')
945 946 def _computedivergentset(repo):
946 947 """the set of rev that compete to be the final successors of some revision.
947 948 """
948 949 divergent = set()
949 950 obsstore = repo.obsstore
950 951 newermap = {}
951 952 for ctx in repo.set('(not public()) - obsolete()'):
952 953 mark = obsstore.precursors.get(ctx.node(), ())
953 954 toprocess = set(mark)
954 955 seen = set()
955 956 while toprocess:
956 957 prec = toprocess.pop()[0]
957 958 if prec in seen:
958 959 continue # emergency cycle hanging prevention
959 960 seen.add(prec)
960 961 if prec not in newermap:
961 962 obsutil.successorssets(repo, prec, cache=newermap)
962 963 newer = [n for n in newermap[prec] if n]
963 964 if len(newer) > 1:
964 965 divergent.add(ctx.rev())
965 966 break
966 967 toprocess.update(obsstore.precursors.get(prec, ()))
967 968 return divergent
968 969
969 970
970 971 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
971 972 operation=None):
972 973 """Add obsolete markers between changesets in a repo
973 974
974 975 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
975 976 tuple. `old` and `news` are changectx. metadata is an optional dictionary
976 977 containing metadata for this marker only. It is merged with the global
977 978 metadata specified through the `metadata` argument of this function,
978 979
979 980 Trying to obsolete a public changeset will raise an exception.
980 981
981 982 Current user and date are used except if specified otherwise in the
982 983 metadata attribute.
983 984
984 985 This function operates within a transaction of its own, but does
985 986 not take any lock on the repo.
986 987 """
987 988 # prepare metadata
988 989 if metadata is None:
989 990 metadata = {}
990 991 if 'user' not in metadata:
991 992 metadata['user'] = repo.ui.username()
992 993 useoperation = repo.ui.configbool('experimental',
993 994 'evolution.track-operation',
994 995 False)
995 996 if useoperation and operation:
996 997 metadata['operation'] = operation
997 998 tr = repo.transaction('add-obsolescence-marker')
998 999 try:
999 1000 markerargs = []
1000 1001 for rel in relations:
1001 1002 prec = rel[0]
1002 1003 sucs = rel[1]
1003 1004 localmetadata = metadata.copy()
1004 1005 if 2 < len(rel):
1005 1006 localmetadata.update(rel[2])
1006 1007
1007 1008 if not prec.mutable():
1008 1009 raise error.Abort(_("cannot obsolete public changeset: %s")
1009 1010 % prec,
1010 1011 hint="see 'hg help phases' for details")
1011 1012 nprec = prec.node()
1012 1013 nsucs = tuple(s.node() for s in sucs)
1013 1014 npare = None
1014 1015 if not nsucs:
1015 1016 npare = tuple(p.node() for p in prec.parents())
1016 1017 if nprec in nsucs:
1017 1018 raise error.Abort(_("changeset %s cannot obsolete itself")
1018 1019 % prec)
1019 1020
1020 1021 # Creating the marker causes the hidden cache to become invalid,
1021 1022 # which causes recomputation when we ask for prec.parents() above.
1022 1023 # Resulting in n^2 behavior. So let's prepare all of the args
1023 1024 # first, then create the markers.
1024 1025 markerargs.append((nprec, nsucs, npare, localmetadata))
1025 1026
1026 1027 for args in markerargs:
1027 1028 nprec, nsucs, npare, localmetadata = args
1028 1029 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1029 1030 date=date, metadata=localmetadata,
1030 1031 ui=repo.ui)
1031 1032 repo.filteredrevcache.clear()
1032 1033 tr.close()
1033 1034 finally:
1034 1035 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now