##// END OF EJS Templates
obsolete: clean createmarkers part about operation...
Boris Feld -
r34389:ddcef6d0 default
parent child Browse files
Show More
@@ -1,1072 +1,1075 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 183 while off < stop:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: predecessor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize(_fm1metapair)
318 318
319 319 def _fm1purereadmarkers(data, off, stop):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 ufixed = struct.Struct(_fm1fixed).unpack
334 334
335 335 while off < stop:
336 336 # read fixed part
337 337 o1 = off + fsize
338 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 339
340 340 if flags & sha2flag:
341 341 # FIXME: prec was read as a SHA1, needs to be amended
342 342
343 343 # read 0 or more successors
344 344 if numsuc == 1:
345 345 o2 = o1 + sha2size
346 346 sucs = (data[o1:o2],)
347 347 else:
348 348 o2 = o1 + sha2size * numsuc
349 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 350
351 351 # read parents
352 352 if numpar == noneflag:
353 353 o3 = o2
354 354 parents = None
355 355 elif numpar == 1:
356 356 o3 = o2 + sha2size
357 357 parents = (data[o2:o3],)
358 358 else:
359 359 o3 = o2 + sha2size * numpar
360 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 361 else:
362 362 # read 0 or more successors
363 363 if numsuc == 1:
364 364 o2 = o1 + sha1size
365 365 sucs = (data[o1:o2],)
366 366 else:
367 367 o2 = o1 + sha1size * numsuc
368 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 369
370 370 # read parents
371 371 if numpar == noneflag:
372 372 o3 = o2
373 373 parents = None
374 374 elif numpar == 1:
375 375 o3 = o2 + sha1size
376 376 parents = (data[o2:o3],)
377 377 else:
378 378 o3 = o2 + sha1size * numpar
379 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 380
381 381 # read metadata
382 382 off = o3 + metasize * nummeta
383 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 384 metadata = []
385 385 for idx in xrange(0, len(metapairsize), 2):
386 386 o1 = off + metapairsize[idx]
387 387 o2 = o1 + metapairsize[idx + 1]
388 388 metadata.append((data[off:o1], data[o1:o2]))
389 389 off = o2
390 390
391 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 392
393 393 def _fm1encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 # determine node size
396 396 _fm1node = _fm1nodesha1
397 397 if flags & usingsha256:
398 398 _fm1node = _fm1nodesha256
399 399 numsuc = len(sucs)
400 400 numextranodes = numsuc
401 401 if parents is None:
402 402 numpar = _fm1parentnone
403 403 else:
404 404 numpar = len(parents)
405 405 numextranodes += numpar
406 406 formatnodes = _fm1node * numextranodes
407 407 formatmeta = _fm1metapair * len(metadata)
408 408 format = _fm1fixed + formatnodes + formatmeta
409 409 # tz is stored in minutes so we divide by 60
410 410 tz = date[1]//60
411 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 412 data.extend(sucs)
413 413 if parents is not None:
414 414 data.extend(parents)
415 415 totalsize = _calcsize(format)
416 416 for key, value in metadata:
417 417 lk = len(key)
418 418 lv = len(value)
419 419 data.append(lk)
420 420 data.append(lv)
421 421 totalsize += lk + lv
422 422 data[0] = totalsize
423 423 data = [_pack(format, *data)]
424 424 for key, value in metadata:
425 425 data.append(key)
426 426 data.append(value)
427 427 return ''.join(data)
428 428
429 429 def _fm1readmarkers(data, off, stop):
430 430 native = getattr(parsers, 'fm1readmarkers', None)
431 431 if not native:
432 432 return _fm1purereadmarkers(data, off, stop)
433 433 return native(data, off, stop)
434 434
435 435 # mapping to read/write various marker formats
436 436 # <version> -> (decoder, encoder)
437 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439 439
440 440 def _readmarkerversion(data):
441 441 return _unpack('>B', data[0:1])[0]
442 442
443 443 @util.nogc
444 444 def _readmarkers(data, off=None, stop=None):
445 445 """Read and enumerate markers from raw data"""
446 446 diskversion = _readmarkerversion(data)
447 447 if not off:
448 448 off = 1 # skip 1 byte version number
449 449 if stop is None:
450 450 stop = len(data)
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off, stop)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 def _addprecursors(*args, **kwargs):
474 474 msg = ("'obsolete._addprecursors' is deprecated, "
475 475 "use 'obsolete._addpredecessors'")
476 476 util.nouideprecwarn(msg, '4.4')
477 477
478 478 return _addpredecessors(*args, **kwargs)
479 479
480 480 @util.nogc
481 481 def _addpredecessors(predecessors, markers):
482 482 for mark in markers:
483 483 for suc in mark[1]:
484 484 predecessors.setdefault(suc, set()).add(mark)
485 485
486 486 @util.nogc
487 487 def _addchildren(children, markers):
488 488 for mark in markers:
489 489 parents = mark[5]
490 490 if parents is not None:
491 491 for p in parents:
492 492 children.setdefault(p, set()).add(mark)
493 493
494 494 def _checkinvalidmarkers(markers):
495 495 """search for marker with invalid data and raise error if needed
496 496
497 497 Exist as a separated function to allow the evolve extension for a more
498 498 subtle handling.
499 499 """
500 500 for mark in markers:
501 501 if node.nullid in mark[1]:
502 502 raise error.Abort(_('bad obsolescence marker detected: '
503 503 'invalid successors nullid'))
504 504
505 505 class obsstore(object):
506 506 """Store obsolete markers
507 507
508 508 Markers can be accessed with two mappings:
509 509 - predecessors[x] -> set(markers on predecessors edges of x)
510 510 - successors[x] -> set(markers on successors edges of x)
511 511 - children[x] -> set(markers on predecessors edges of children(x)
512 512 """
513 513
514 514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 515 # prec: nodeid, predecessors changesets
516 516 # succs: tuple of nodeid, successor changesets (0-N length)
517 517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 518 # meta: binary blob, encoded metadata dictionary
519 519 # date: (float, int) tuple, date of marker creation
520 520 # parents: (tuple of nodeid) or None, parents of predecessors
521 521 # None is used when no data has been recorded
522 522
523 523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 524 # caches for various obsolescence related cache
525 525 self.caches = {}
526 526 self.svfs = svfs
527 527 self._defaultformat = defaultformat
528 528 self._readonly = readonly
529 529
530 530 def __iter__(self):
531 531 return iter(self._all)
532 532
533 533 def __len__(self):
534 534 return len(self._all)
535 535
536 536 def __nonzero__(self):
537 537 if not self._cached('_all'):
538 538 try:
539 539 return self.svfs.stat('obsstore').st_size > 1
540 540 except OSError as inst:
541 541 if inst.errno != errno.ENOENT:
542 542 raise
543 543 # just build an empty _all list if no obsstore exists, which
544 544 # avoids further stat() syscalls
545 545 return bool(self._all)
546 546
547 547 __bool__ = __nonzero__
548 548
549 549 @property
550 550 def readonly(self):
551 551 """True if marker creation is disabled
552 552
553 553 Remove me in the future when obsolete marker is always on."""
554 554 return self._readonly
555 555
556 556 def create(self, transaction, prec, succs=(), flag=0, parents=None,
557 557 date=None, metadata=None, ui=None):
558 558 """obsolete: add a new obsolete marker
559 559
560 560 * ensuring it is hashable
561 561 * check mandatory metadata
562 562 * encode metadata
563 563
564 564 If you are a human writing code creating marker you want to use the
565 565 `createmarkers` function in this module instead.
566 566
567 567 return True if a new marker have been added, False if the markers
568 568 already existed (no op).
569 569 """
570 570 if metadata is None:
571 571 metadata = {}
572 572 if date is None:
573 573 if 'date' in metadata:
574 574 # as a courtesy for out-of-tree extensions
575 575 date = util.parsedate(metadata.pop('date'))
576 576 elif ui is not None:
577 577 date = ui.configdate('devel', 'default-date')
578 578 if date is None:
579 579 date = util.makedate()
580 580 else:
581 581 date = util.makedate()
582 582 if len(prec) != 20:
583 583 raise ValueError(prec)
584 584 for succ in succs:
585 585 if len(succ) != 20:
586 586 raise ValueError(succ)
587 587 if prec in succs:
588 588 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
589 589
590 590 metadata = tuple(sorted(metadata.iteritems()))
591 591
592 592 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
593 593 return bool(self.add(transaction, [marker]))
594 594
595 595 def add(self, transaction, markers):
596 596 """Add new markers to the store
597 597
598 598 Take care of filtering duplicate.
599 599 Return the number of new marker."""
600 600 if self._readonly:
601 601 raise error.Abort(_('creating obsolete markers is not enabled on '
602 602 'this repo'))
603 603 known = set()
604 604 getsuccessors = self.successors.get
605 605 new = []
606 606 for m in markers:
607 607 if m not in getsuccessors(m[0], ()) and m not in known:
608 608 known.add(m)
609 609 new.append(m)
610 610 if new:
611 611 f = self.svfs('obsstore', 'ab')
612 612 try:
613 613 offset = f.tell()
614 614 transaction.add('obsstore', offset)
615 615 # offset == 0: new file - add the version header
616 616 data = b''.join(encodemarkers(new, offset == 0, self._version))
617 617 f.write(data)
618 618 finally:
619 619 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
620 620 # call 'filecacheentry.refresh()' here
621 621 f.close()
622 622 addedmarkers = transaction.changes.get('obsmarkers')
623 623 if addedmarkers is not None:
624 624 addedmarkers.update(new)
625 625 self._addmarkers(new, data)
626 626 # new marker *may* have changed several set. invalidate the cache.
627 627 self.caches.clear()
628 628 # records the number of new markers for the transaction hooks
629 629 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
630 630 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
631 631 return len(new)
632 632
633 633 def mergemarkers(self, transaction, data):
634 634 """merge a binary stream of markers inside the obsstore
635 635
636 636 Returns the number of new markers added."""
637 637 version, markers = _readmarkers(data)
638 638 return self.add(transaction, markers)
639 639
640 640 @propertycache
641 641 def _data(self):
642 642 return self.svfs.tryread('obsstore')
643 643
644 644 @propertycache
645 645 def _version(self):
646 646 if len(self._data) >= 1:
647 647 return _readmarkerversion(self._data)
648 648 else:
649 649 return self._defaultformat
650 650
651 651 @propertycache
652 652 def _all(self):
653 653 data = self._data
654 654 if not data:
655 655 return []
656 656 self._version, markers = _readmarkers(data)
657 657 markers = list(markers)
658 658 _checkinvalidmarkers(markers)
659 659 return markers
660 660
661 661 @propertycache
662 662 def successors(self):
663 663 successors = {}
664 664 _addsuccessors(successors, self._all)
665 665 return successors
666 666
667 667 @property
668 668 def precursors(self):
669 669 msg = ("'obsstore.precursors' is deprecated, "
670 670 "use 'obsstore.predecessors'")
671 671 util.nouideprecwarn(msg, '4.4')
672 672
673 673 return self.predecessors
674 674
675 675 @propertycache
676 676 def predecessors(self):
677 677 predecessors = {}
678 678 _addpredecessors(predecessors, self._all)
679 679 return predecessors
680 680
681 681 @propertycache
682 682 def children(self):
683 683 children = {}
684 684 _addchildren(children, self._all)
685 685 return children
686 686
687 687 def _cached(self, attr):
688 688 return attr in self.__dict__
689 689
690 690 def _addmarkers(self, markers, rawdata):
691 691 markers = list(markers) # to allow repeated iteration
692 692 self._data = self._data + rawdata
693 693 self._all.extend(markers)
694 694 if self._cached('successors'):
695 695 _addsuccessors(self.successors, markers)
696 696 if self._cached('predecessors'):
697 697 _addpredecessors(self.predecessors, markers)
698 698 if self._cached('children'):
699 699 _addchildren(self.children, markers)
700 700 _checkinvalidmarkers(markers)
701 701
702 702 def relevantmarkers(self, nodes):
703 703 """return a set of all obsolescence markers relevant to a set of nodes.
704 704
705 705 "relevant" to a set of nodes mean:
706 706
707 707 - marker that use this changeset as successor
708 708 - prune marker of direct children on this changeset
709 709 - recursive application of the two rules on predecessors of these
710 710 markers
711 711
712 712 It is a set so you cannot rely on order."""
713 713
714 714 pendingnodes = set(nodes)
715 715 seenmarkers = set()
716 716 seennodes = set(pendingnodes)
717 717 precursorsmarkers = self.predecessors
718 718 succsmarkers = self.successors
719 719 children = self.children
720 720 while pendingnodes:
721 721 direct = set()
722 722 for current in pendingnodes:
723 723 direct.update(precursorsmarkers.get(current, ()))
724 724 pruned = [m for m in children.get(current, ()) if not m[1]]
725 725 direct.update(pruned)
726 726 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
727 727 direct.update(pruned)
728 728 direct -= seenmarkers
729 729 pendingnodes = set([m[0] for m in direct])
730 730 seenmarkers |= direct
731 731 pendingnodes -= seennodes
732 732 seennodes |= pendingnodes
733 733 return seenmarkers
734 734
735 735 def makestore(ui, repo):
736 736 """Create an obsstore instance from a repo."""
737 737 # read default format for new obsstore.
738 738 # developer config: format.obsstore-version
739 739 defaultformat = ui.configint('format', 'obsstore-version')
740 740 # rely on obsstore class default when possible.
741 741 kwargs = {}
742 742 if defaultformat is not None:
743 743 kwargs['defaultformat'] = defaultformat
744 744 readonly = not isenabled(repo, createmarkersopt)
745 745 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
746 746 if store and readonly:
747 747 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
748 748 % len(list(store)))
749 749 return store
750 750
751 751 def commonversion(versions):
752 752 """Return the newest version listed in both versions and our local formats.
753 753
754 754 Returns None if no common version exists.
755 755 """
756 756 versions.sort(reverse=True)
757 757 # search for highest version known on both side
758 758 for v in versions:
759 759 if v in formats:
760 760 return v
761 761 return None
762 762
763 763 # arbitrary picked to fit into 8K limit from HTTP server
764 764 # you have to take in account:
765 765 # - the version header
766 766 # - the base85 encoding
767 767 _maxpayload = 5300
768 768
769 769 def _pushkeyescape(markers):
770 770 """encode markers into a dict suitable for pushkey exchange
771 771
772 772 - binary data is base85 encoded
773 773 - split in chunks smaller than 5300 bytes"""
774 774 keys = {}
775 775 parts = []
776 776 currentlen = _maxpayload * 2 # ensure we create a new part
777 777 for marker in markers:
778 778 nextdata = _fm0encodeonemarker(marker)
779 779 if (len(nextdata) + currentlen > _maxpayload):
780 780 currentpart = []
781 781 currentlen = 0
782 782 parts.append(currentpart)
783 783 currentpart.append(nextdata)
784 784 currentlen += len(nextdata)
785 785 for idx, part in enumerate(reversed(parts)):
786 786 data = ''.join([_pack('>B', _fm0version)] + part)
787 787 keys['dump%i' % idx] = util.b85encode(data)
788 788 return keys
789 789
790 790 def listmarkers(repo):
791 791 """List markers over pushkey"""
792 792 if not repo.obsstore:
793 793 return {}
794 794 return _pushkeyescape(sorted(repo.obsstore))
795 795
796 796 def pushmarker(repo, key, old, new):
797 797 """Push markers over pushkey"""
798 798 if not key.startswith('dump'):
799 799 repo.ui.warn(_('unknown key: %r') % key)
800 800 return False
801 801 if old:
802 802 repo.ui.warn(_('unexpected old value for %r') % key)
803 803 return False
804 804 data = util.b85decode(new)
805 805 lock = repo.lock()
806 806 try:
807 807 tr = repo.transaction('pushkey: obsolete markers')
808 808 try:
809 809 repo.obsstore.mergemarkers(tr, data)
810 810 repo.invalidatevolatilesets()
811 811 tr.close()
812 812 return True
813 813 finally:
814 814 tr.release()
815 815 finally:
816 816 lock.release()
817 817
818 818 # keep compatibility for the 4.3 cycle
819 819 def allprecursors(obsstore, nodes, ignoreflags=0):
820 820 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
821 821 util.nouideprecwarn(movemsg, '4.3')
822 822 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
823 823
824 824 def allsuccessors(obsstore, nodes, ignoreflags=0):
825 825 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
826 826 util.nouideprecwarn(movemsg, '4.3')
827 827 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
828 828
829 829 def marker(repo, data):
830 830 movemsg = 'obsolete.marker moved to obsutil.marker'
831 831 repo.ui.deprecwarn(movemsg, '4.3')
832 832 return obsutil.marker(repo, data)
833 833
834 834 def getmarkers(repo, nodes=None, exclusive=False):
835 835 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
836 836 repo.ui.deprecwarn(movemsg, '4.3')
837 837 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
838 838
839 839 def exclusivemarkers(repo, nodes):
840 840 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
841 841 repo.ui.deprecwarn(movemsg, '4.3')
842 842 return obsutil.exclusivemarkers(repo, nodes)
843 843
844 844 def foreground(repo, nodes):
845 845 movemsg = 'obsolete.foreground moved to obsutil.foreground'
846 846 repo.ui.deprecwarn(movemsg, '4.3')
847 847 return obsutil.foreground(repo, nodes)
848 848
849 849 def successorssets(repo, initialnode, cache=None):
850 850 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
851 851 repo.ui.deprecwarn(movemsg, '4.3')
852 852 return obsutil.successorssets(repo, initialnode, cache=cache)
853 853
854 854 # mapping of 'set-name' -> <function to compute this set>
855 855 cachefuncs = {}
856 856 def cachefor(name):
857 857 """Decorator to register a function as computing the cache for a set"""
858 858 def decorator(func):
859 859 if name in cachefuncs:
860 860 msg = "duplicated registration for volatileset '%s' (existing: %r)"
861 861 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
862 862 cachefuncs[name] = func
863 863 return func
864 864 return decorator
865 865
866 866 def getrevs(repo, name):
867 867 """Return the set of revision that belong to the <name> set
868 868
869 869 Such access may compute the set and cache it for future use"""
870 870 repo = repo.unfiltered()
871 871 if not repo.obsstore:
872 872 return frozenset()
873 873 if name not in repo.obsstore.caches:
874 874 repo.obsstore.caches[name] = cachefuncs[name](repo)
875 875 return repo.obsstore.caches[name]
876 876
877 877 # To be simple we need to invalidate obsolescence cache when:
878 878 #
879 879 # - new changeset is added:
880 880 # - public phase is changed
881 881 # - obsolescence marker are added
882 882 # - strip is used a repo
883 883 def clearobscaches(repo):
884 884 """Remove all obsolescence related cache from a repo
885 885
886 886 This remove all cache in obsstore is the obsstore already exist on the
887 887 repo.
888 888
889 889 (We could be smarter here given the exact event that trigger the cache
890 890 clearing)"""
891 891 # only clear cache is there is obsstore data in this repo
892 892 if 'obsstore' in repo._filecache:
893 893 repo.obsstore.caches.clear()
894 894
895 895 def _mutablerevs(repo):
896 896 """the set of mutable revision in the repository"""
897 897 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
898 898
899 899 @cachefor('obsolete')
900 900 def _computeobsoleteset(repo):
901 901 """the set of obsolete revisions"""
902 902 getnode = repo.changelog.node
903 903 notpublic = _mutablerevs(repo)
904 904 isobs = repo.obsstore.successors.__contains__
905 905 obs = set(r for r in notpublic if isobs(getnode(r)))
906 906 return obs
907 907
908 908 @cachefor('unstable')
909 909 def _computeunstableset(repo):
910 910 msg = ("'unstable' volatile set is deprecated, "
911 911 "use 'orphan'")
912 912 repo.ui.deprecwarn(msg, '4.4')
913 913
914 914 return _computeorphanset(repo)
915 915
916 916 @cachefor('orphan')
917 917 def _computeorphanset(repo):
918 918 """the set of non obsolete revisions with obsolete parents"""
919 919 pfunc = repo.changelog.parentrevs
920 920 mutable = _mutablerevs(repo)
921 921 obsolete = getrevs(repo, 'obsolete')
922 922 others = mutable - obsolete
923 923 unstable = set()
924 924 for r in sorted(others):
925 925 # A rev is unstable if one of its parent is obsolete or unstable
926 926 # this works since we traverse following growing rev order
927 927 for p in pfunc(r):
928 928 if p in obsolete or p in unstable:
929 929 unstable.add(r)
930 930 break
931 931 return unstable
932 932
933 933 @cachefor('suspended')
934 934 def _computesuspendedset(repo):
935 935 """the set of obsolete parents with non obsolete descendants"""
936 936 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
937 937 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
938 938
939 939 @cachefor('extinct')
940 940 def _computeextinctset(repo):
941 941 """the set of obsolete parents without non obsolete descendants"""
942 942 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
943 943
944 944 @cachefor('bumped')
945 945 def _computebumpedset(repo):
946 946 msg = ("'bumped' volatile set is deprecated, "
947 947 "use 'phasedivergent'")
948 948 repo.ui.deprecwarn(msg, '4.4')
949 949
950 950 return _computephasedivergentset(repo)
951 951
952 952 @cachefor('phasedivergent')
953 953 def _computephasedivergentset(repo):
954 954 """the set of revs trying to obsolete public revisions"""
955 955 bumped = set()
956 956 # util function (avoid attribute lookup in the loop)
957 957 phase = repo._phasecache.phase # would be faster to grab the full list
958 958 public = phases.public
959 959 cl = repo.changelog
960 960 torev = cl.nodemap.get
961 961 for ctx in repo.set('(not public()) and (not obsolete())'):
962 962 rev = ctx.rev()
963 963 # We only evaluate mutable, non-obsolete revision
964 964 node = ctx.node()
965 965 # (future) A cache of predecessors may worth if split is very common
966 966 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
967 967 ignoreflags=bumpedfix):
968 968 prev = torev(pnode) # unfiltered! but so is phasecache
969 969 if (prev is not None) and (phase(repo, prev) <= public):
970 970 # we have a public predecessor
971 971 bumped.add(rev)
972 972 break # Next draft!
973 973 return bumped
974 974
975 975 @cachefor('divergent')
976 976 def _computedivergentset(repo):
977 977 msg = ("'divergent' volatile set is deprecated, "
978 978 "use 'contentdivergent'")
979 979 repo.ui.deprecwarn(msg, '4.4')
980 980
981 981 return _computecontentdivergentset(repo)
982 982
983 983 @cachefor('contentdivergent')
984 984 def _computecontentdivergentset(repo):
985 985 """the set of rev that compete to be the final successors of some revision.
986 986 """
987 987 divergent = set()
988 988 obsstore = repo.obsstore
989 989 newermap = {}
990 990 for ctx in repo.set('(not public()) - obsolete()'):
991 991 mark = obsstore.predecessors.get(ctx.node(), ())
992 992 toprocess = set(mark)
993 993 seen = set()
994 994 while toprocess:
995 995 prec = toprocess.pop()[0]
996 996 if prec in seen:
997 997 continue # emergency cycle hanging prevention
998 998 seen.add(prec)
999 999 if prec not in newermap:
1000 1000 obsutil.successorssets(repo, prec, cache=newermap)
1001 1001 newer = [n for n in newermap[prec] if n]
1002 1002 if len(newer) > 1:
1003 1003 divergent.add(ctx.rev())
1004 1004 break
1005 1005 toprocess.update(obsstore.predecessors.get(prec, ()))
1006 1006 return divergent
1007 1007
1008 1008
1009 1009 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1010 1010 operation=None):
1011 1011 """Add obsolete markers between changesets in a repo
1012 1012
1013 1013 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1014 1014 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1015 1015 containing metadata for this marker only. It is merged with the global
1016 1016 metadata specified through the `metadata` argument of this function,
1017 1017
1018 1018 Trying to obsolete a public changeset will raise an exception.
1019 1019
1020 1020 Current user and date are used except if specified otherwise in the
1021 1021 metadata attribute.
1022 1022
1023 1023 This function operates within a transaction of its own, but does
1024 1024 not take any lock on the repo.
1025 1025 """
1026 1026 # prepare metadata
1027 1027 if metadata is None:
1028 1028 metadata = {}
1029 1029 if 'user' not in metadata:
1030 1030 metadata['user'] = repo.ui.username()
1031
1032 # Operation metadata handling
1031 1033 useoperation = repo.ui.configbool('experimental',
1032 1034 'stabilization.track-operation')
1033 1035 if useoperation and operation:
1034 1036 metadata['operation'] = operation
1037
1035 1038 tr = repo.transaction('add-obsolescence-marker')
1036 1039 try:
1037 1040 markerargs = []
1038 1041 for rel in relations:
1039 1042 prec = rel[0]
1040 1043 sucs = rel[1]
1041 1044 localmetadata = metadata.copy()
1042 1045 if 2 < len(rel):
1043 1046 localmetadata.update(rel[2])
1044 1047
1045 1048 if not prec.mutable():
1046 1049 raise error.Abort(_("cannot obsolete public changeset: %s")
1047 1050 % prec,
1048 1051 hint="see 'hg help phases' for details")
1049 1052 nprec = prec.node()
1050 1053 nsucs = tuple(s.node() for s in sucs)
1051 1054 npare = None
1052 1055 if not nsucs:
1053 1056 npare = tuple(p.node() for p in prec.parents())
1054 1057 if nprec in nsucs:
1055 1058 raise error.Abort(_("changeset %s cannot obsolete itself")
1056 1059 % prec)
1057 1060
1058 1061 # Creating the marker causes the hidden cache to become invalid,
1059 1062 # which causes recomputation when we ask for prec.parents() above.
1060 1063 # Resulting in n^2 behavior. So let's prepare all of the args
1061 1064 # first, then create the markers.
1062 1065 markerargs.append((nprec, nsucs, npare, localmetadata))
1063 1066
1064 1067 for args in markerargs:
1065 1068 nprec, nsucs, npare, localmetadata = args
1066 1069 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1067 1070 date=date, metadata=localmetadata,
1068 1071 ui=repo.ui)
1069 1072 repo.filteredrevcache.clear()
1070 1073 tr.close()
1071 1074 finally:
1072 1075 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now