##// END OF EJS Templates
obsstore: let read marker API take a range of offsets...
Jun Wu -
r33504:5d3ba439 default
parent child Browse files
Show More
@@ -1,1034 +1,1034 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 l = len(data)
184 while off + _fm0fsize <= l:
183 while off < stop:
185 184 # read fixed part
186 185 cur = data[off:off + _fm0fsize]
187 186 off += _fm0fsize
188 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 188 # read replacement
190 189 sucs = ()
191 190 if numsuc:
192 191 s = (_fm0fnodesize * numsuc)
193 192 cur = data[off:off + s]
194 193 sucs = _unpack(_fm0node * numsuc, cur)
195 194 off += s
196 195 # read metadata
197 196 # (metadata will be decoded on demand)
198 197 metadata = data[off:off + mdsize]
199 198 if len(metadata) != mdsize:
200 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 200 'short, %d bytes expected, got %d')
202 201 % (mdsize, len(metadata)))
203 202 off += mdsize
204 203 metadata = _fm0decodemeta(metadata)
205 204 try:
206 205 when, offset = metadata.pop('date', '0 0').split(' ')
207 206 date = float(when), int(offset)
208 207 except ValueError:
209 208 date = (0., 0)
210 209 parents = None
211 210 if 'p2' in metadata:
212 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 212 elif 'p1' in metadata:
214 213 parents = (metadata.pop('p1', None),)
215 214 elif 'p0' in metadata:
216 215 parents = ()
217 216 if parents is not None:
218 217 try:
219 218 parents = tuple(node.bin(p) for p in parents)
220 219 # if parent content is not a nodeid, drop the data
221 220 for p in parents:
222 221 if len(p) != 20:
223 222 parents = None
224 223 break
225 224 except TypeError:
226 225 # if content cannot be translated to nodeid drop the data.
227 226 parents = None
228 227
229 228 metadata = tuple(sorted(metadata.iteritems()))
230 229
231 230 yield (pre, sucs, flags, metadata, date, parents)
232 231
233 232 def _fm0encodeonemarker(marker):
234 233 pre, sucs, flags, metadata, date, parents = marker
235 234 if flags & usingsha256:
236 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 236 metadata = dict(metadata)
238 237 time, tz = date
239 238 metadata['date'] = '%r %i' % (time, tz)
240 239 if parents is not None:
241 240 if not parents:
242 241 # mark that we explicitly recorded no parents
243 242 metadata['p0'] = ''
244 243 for i, p in enumerate(parents, 1):
245 244 metadata['p%i' % i] = node.hex(p)
246 245 metadata = _fm0encodemeta(metadata)
247 246 numsuc = len(sucs)
248 247 format = _fm0fixed + (_fm0node * numsuc)
249 248 data = [numsuc, len(metadata), flags, pre]
250 249 data.extend(sucs)
251 250 return _pack(format, *data) + metadata
252 251
253 252 def _fm0encodemeta(meta):
254 253 """Return encoded metadata string to string mapping.
255 254
256 255 Assume no ':' in key and no '\0' in both key and value."""
257 256 for key, value in meta.iteritems():
258 257 if ':' in key or '\0' in key:
259 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 259 if '\0' in value:
261 260 raise ValueError("':' is forbidden in metadata value'")
262 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263 262
264 263 def _fm0decodemeta(data):
265 264 """Return string to string dictionary from encoded version."""
266 265 d = {}
267 266 for l in data.split('\0'):
268 267 if l:
269 268 key, value = l.split(':')
270 269 d[key] = value
271 270 return d
272 271
273 272 ## Parsing and writing of version "1"
274 273 #
275 274 # The header is followed by the markers. Each marker is made of:
276 275 #
277 276 # - uint32: total size of the marker (including this field)
278 277 #
279 278 # - float64: date in seconds since epoch
280 279 #
281 280 # - int16: timezone offset in minutes
282 281 #
283 282 # - uint16: a bit field. It is reserved for flags used in common
284 283 # obsolete marker operations, to avoid repeated decoding of metadata
285 284 # entries.
286 285 #
287 286 # - uint8: number of successors "N", can be zero.
288 287 #
289 288 # - uint8: number of parents "P", can be zero.
290 289 #
291 290 # 0: parents data stored but no parent,
292 291 # 1: one parent stored,
293 292 # 2: two parents stored,
294 293 # 3: no parent data stored
295 294 #
296 295 # - uint8: number of metadata entries M
297 296 #
298 297 # - 20 or 32 bytes: precursor changeset identifier.
299 298 #
300 299 # - N*(20 or 32) bytes: successors changesets identifiers.
301 300 #
302 301 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 302 #
304 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 304 #
306 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 306 _fm1version = 1
308 307 _fm1fixed = '>IdhHBBB20s'
309 308 _fm1nodesha1 = '20s'
310 309 _fm1nodesha256 = '32s'
311 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 312 _fm1fsize = _calcsize(_fm1fixed)
314 313 _fm1parentnone = 3
315 314 _fm1parentshift = 14
316 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 316 _fm1metapair = 'BB'
318 317 _fm1metapairsize = _calcsize('BB')
319 318
320 def _fm1purereadmarkers(data, off):
319 def _fm1purereadmarkers(data, off, stop):
321 320 # make some global constants local for performance
322 321 noneflag = _fm1parentnone
323 322 sha2flag = usingsha256
324 323 sha1size = _fm1nodesha1size
325 324 sha2size = _fm1nodesha256size
326 325 sha1fmt = _fm1nodesha1
327 326 sha2fmt = _fm1nodesha256
328 327 metasize = _fm1metapairsize
329 328 metafmt = _fm1metapair
330 329 fsize = _fm1fsize
331 330 unpack = _unpack
332 331
333 332 # Loop on markers
334 stop = len(data) - _fm1fsize
335 333 ufixed = struct.Struct(_fm1fixed).unpack
336 334
337 while off <= stop:
335 while off < stop:
338 336 # read fixed part
339 337 o1 = off + fsize
340 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341 339
342 340 if flags & sha2flag:
343 341 # FIXME: prec was read as a SHA1, needs to be amended
344 342
345 343 # read 0 or more successors
346 344 if numsuc == 1:
347 345 o2 = o1 + sha2size
348 346 sucs = (data[o1:o2],)
349 347 else:
350 348 o2 = o1 + sha2size * numsuc
351 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352 350
353 351 # read parents
354 352 if numpar == noneflag:
355 353 o3 = o2
356 354 parents = None
357 355 elif numpar == 1:
358 356 o3 = o2 + sha2size
359 357 parents = (data[o2:o3],)
360 358 else:
361 359 o3 = o2 + sha2size * numpar
362 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 361 else:
364 362 # read 0 or more successors
365 363 if numsuc == 1:
366 364 o2 = o1 + sha1size
367 365 sucs = (data[o1:o2],)
368 366 else:
369 367 o2 = o1 + sha1size * numsuc
370 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371 369
372 370 # read parents
373 371 if numpar == noneflag:
374 372 o3 = o2
375 373 parents = None
376 374 elif numpar == 1:
377 375 o3 = o2 + sha1size
378 376 parents = (data[o2:o3],)
379 377 else:
380 378 o3 = o2 + sha1size * numpar
381 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
382 380
383 381 # read metadata
384 382 off = o3 + metasize * nummeta
385 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 384 metadata = []
387 385 for idx in xrange(0, len(metapairsize), 2):
388 386 o1 = off + metapairsize[idx]
389 387 o2 = o1 + metapairsize[idx + 1]
390 388 metadata.append((data[off:o1], data[o1:o2]))
391 389 off = o2
392 390
393 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394 392
395 393 def _fm1encodeonemarker(marker):
396 394 pre, sucs, flags, metadata, date, parents = marker
397 395 # determine node size
398 396 _fm1node = _fm1nodesha1
399 397 if flags & usingsha256:
400 398 _fm1node = _fm1nodesha256
401 399 numsuc = len(sucs)
402 400 numextranodes = numsuc
403 401 if parents is None:
404 402 numpar = _fm1parentnone
405 403 else:
406 404 numpar = len(parents)
407 405 numextranodes += numpar
408 406 formatnodes = _fm1node * numextranodes
409 407 formatmeta = _fm1metapair * len(metadata)
410 408 format = _fm1fixed + formatnodes + formatmeta
411 409 # tz is stored in minutes so we divide by 60
412 410 tz = date[1]//60
413 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 412 data.extend(sucs)
415 413 if parents is not None:
416 414 data.extend(parents)
417 415 totalsize = _calcsize(format)
418 416 for key, value in metadata:
419 417 lk = len(key)
420 418 lv = len(value)
421 419 data.append(lk)
422 420 data.append(lv)
423 421 totalsize += lk + lv
424 422 data[0] = totalsize
425 423 data = [_pack(format, *data)]
426 424 for key, value in metadata:
427 425 data.append(key)
428 426 data.append(value)
429 427 return ''.join(data)
430 428
431 def _fm1readmarkers(data, off):
429 def _fm1readmarkers(data, off, stop):
432 430 native = getattr(parsers, 'fm1readmarkers', None)
433 431 if not native:
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
432 return _fm1purereadmarkers(data, off, stop)
436 433 return native(data, off, stop)
437 434
438 435 # mapping to read/write various marker formats
439 436 # <version> -> (decoder, encoder)
440 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442 439
443 440 def _readmarkerversion(data):
444 441 return _unpack('>B', data[0:1])[0]
445 442
446 443 @util.nogc
447 def _readmarkers(data):
444 def _readmarkers(data, off=None, stop=None):
448 445 """Read and enumerate markers from raw data"""
449 446 diskversion = _readmarkerversion(data)
450 off = 1
447 if not off:
448 off = 1 # skip 1 byte version number
449 if stop is None:
450 stop = len(data)
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off, stop)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 @util.nogc
474 474 def _addprecursors(precursors, markers):
475 475 for mark in markers:
476 476 for suc in mark[1]:
477 477 precursors.setdefault(suc, set()).add(mark)
478 478
479 479 @util.nogc
480 480 def _addchildren(children, markers):
481 481 for mark in markers:
482 482 parents = mark[5]
483 483 if parents is not None:
484 484 for p in parents:
485 485 children.setdefault(p, set()).add(mark)
486 486
487 487 def _checkinvalidmarkers(markers):
488 488 """search for marker with invalid data and raise error if needed
489 489
490 490 Exist as a separated function to allow the evolve extension for a more
491 491 subtle handling.
492 492 """
493 493 for mark in markers:
494 494 if node.nullid in mark[1]:
495 495 raise error.Abort(_('bad obsolescence marker detected: '
496 496 'invalid successors nullid'))
497 497
498 498 class obsstore(object):
499 499 """Store obsolete markers
500 500
501 501 Markers can be accessed with two mappings:
502 502 - precursors[x] -> set(markers on precursors edges of x)
503 503 - successors[x] -> set(markers on successors edges of x)
504 504 - children[x] -> set(markers on precursors edges of children(x)
505 505 """
506 506
507 507 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
508 508 # prec: nodeid, precursor changesets
509 509 # succs: tuple of nodeid, successor changesets (0-N length)
510 510 # flag: integer, flag field carrying modifier for the markers (see doc)
511 511 # meta: binary blob, encoded metadata dictionary
512 512 # date: (float, int) tuple, date of marker creation
513 513 # parents: (tuple of nodeid) or None, parents of precursors
514 514 # None is used when no data has been recorded
515 515
516 516 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
517 517 # caches for various obsolescence related cache
518 518 self.caches = {}
519 519 self.svfs = svfs
520 520 self._defaultformat = defaultformat
521 521 self._readonly = readonly
522 522
523 523 def __iter__(self):
524 524 return iter(self._all)
525 525
526 526 def __len__(self):
527 527 return len(self._all)
528 528
529 529 def __nonzero__(self):
530 530 if not self._cached('_all'):
531 531 try:
532 532 return self.svfs.stat('obsstore').st_size > 1
533 533 except OSError as inst:
534 534 if inst.errno != errno.ENOENT:
535 535 raise
536 536 # just build an empty _all list if no obsstore exists, which
537 537 # avoids further stat() syscalls
538 538 pass
539 539 return bool(self._all)
540 540
541 541 __bool__ = __nonzero__
542 542
543 543 @property
544 544 def readonly(self):
545 545 """True if marker creation is disabled
546 546
547 547 Remove me in the future when obsolete marker is always on."""
548 548 return self._readonly
549 549
550 550 def create(self, transaction, prec, succs=(), flag=0, parents=None,
551 551 date=None, metadata=None, ui=None):
552 552 """obsolete: add a new obsolete marker
553 553
554 554 * ensuring it is hashable
555 555 * check mandatory metadata
556 556 * encode metadata
557 557
558 558 If you are a human writing code creating marker you want to use the
559 559 `createmarkers` function in this module instead.
560 560
561 561 return True if a new marker have been added, False if the markers
562 562 already existed (no op).
563 563 """
564 564 if metadata is None:
565 565 metadata = {}
566 566 if date is None:
567 567 if 'date' in metadata:
568 568 # as a courtesy for out-of-tree extensions
569 569 date = util.parsedate(metadata.pop('date'))
570 570 elif ui is not None:
571 571 date = ui.configdate('devel', 'default-date')
572 572 if date is None:
573 573 date = util.makedate()
574 574 else:
575 575 date = util.makedate()
576 576 if len(prec) != 20:
577 577 raise ValueError(prec)
578 578 for succ in succs:
579 579 if len(succ) != 20:
580 580 raise ValueError(succ)
581 581 if prec in succs:
582 582 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
583 583
584 584 metadata = tuple(sorted(metadata.iteritems()))
585 585
586 586 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
587 587 return bool(self.add(transaction, [marker]))
588 588
589 589 def add(self, transaction, markers):
590 590 """Add new markers to the store
591 591
592 592 Take care of filtering duplicate.
593 593 Return the number of new marker."""
594 594 if self._readonly:
595 595 raise error.Abort(_('creating obsolete markers is not enabled on '
596 596 'this repo'))
597 597 known = set()
598 598 getsuccessors = self.successors.get
599 599 new = []
600 600 for m in markers:
601 601 if m not in getsuccessors(m[0], ()) and m not in known:
602 602 known.add(m)
603 603 new.append(m)
604 604 if new:
605 605 f = self.svfs('obsstore', 'ab')
606 606 try:
607 607 offset = f.tell()
608 608 transaction.add('obsstore', offset)
609 609 # offset == 0: new file - add the version header
610 610 data = b''.join(encodemarkers(new, offset == 0, self._version))
611 611 f.write(data)
612 612 finally:
613 613 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
614 614 # call 'filecacheentry.refresh()' here
615 615 f.close()
616 616 addedmarkers = transaction.changes.get('obsmarkers')
617 617 if addedmarkers is not None:
618 618 addedmarkers.update(new)
619 619 self._addmarkers(new, data)
620 620 # new marker *may* have changed several set. invalidate the cache.
621 621 self.caches.clear()
622 622 # records the number of new markers for the transaction hooks
623 623 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
624 624 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
625 625 return len(new)
626 626
627 627 def mergemarkers(self, transaction, data):
628 628 """merge a binary stream of markers inside the obsstore
629 629
630 630 Returns the number of new markers added."""
631 631 version, markers = _readmarkers(data)
632 632 return self.add(transaction, markers)
633 633
634 634 @propertycache
635 635 def _data(self):
636 636 return self.svfs.tryread('obsstore')
637 637
638 638 @propertycache
639 639 def _version(self):
640 640 if len(self._data) >= 1:
641 641 return _readmarkerversion(self._data)
642 642 else:
643 643 return self._defaultformat
644 644
645 645 @propertycache
646 646 def _all(self):
647 647 data = self._data
648 648 if not data:
649 649 return []
650 650 self._version, markers = _readmarkers(data)
651 651 markers = list(markers)
652 652 _checkinvalidmarkers(markers)
653 653 return markers
654 654
655 655 @propertycache
656 656 def successors(self):
657 657 successors = {}
658 658 _addsuccessors(successors, self._all)
659 659 return successors
660 660
661 661 @propertycache
662 662 def precursors(self):
663 663 precursors = {}
664 664 _addprecursors(precursors, self._all)
665 665 return precursors
666 666
667 667 @propertycache
668 668 def children(self):
669 669 children = {}
670 670 _addchildren(children, self._all)
671 671 return children
672 672
673 673 def _cached(self, attr):
674 674 return attr in self.__dict__
675 675
676 676 def _addmarkers(self, markers, rawdata):
677 677 markers = list(markers) # to allow repeated iteration
678 678 self._data = self._data + rawdata
679 679 self._all.extend(markers)
680 680 if self._cached('successors'):
681 681 _addsuccessors(self.successors, markers)
682 682 if self._cached('precursors'):
683 683 _addprecursors(self.precursors, markers)
684 684 if self._cached('children'):
685 685 _addchildren(self.children, markers)
686 686 _checkinvalidmarkers(markers)
687 687
688 688 def relevantmarkers(self, nodes):
689 689 """return a set of all obsolescence markers relevant to a set of nodes.
690 690
691 691 "relevant" to a set of nodes mean:
692 692
693 693 - marker that use this changeset as successor
694 694 - prune marker of direct children on this changeset
695 695 - recursive application of the two rules on precursors of these markers
696 696
697 697 It is a set so you cannot rely on order."""
698 698
699 699 pendingnodes = set(nodes)
700 700 seenmarkers = set()
701 701 seennodes = set(pendingnodes)
702 702 precursorsmarkers = self.precursors
703 703 succsmarkers = self.successors
704 704 children = self.children
705 705 while pendingnodes:
706 706 direct = set()
707 707 for current in pendingnodes:
708 708 direct.update(precursorsmarkers.get(current, ()))
709 709 pruned = [m for m in children.get(current, ()) if not m[1]]
710 710 direct.update(pruned)
711 711 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
712 712 direct.update(pruned)
713 713 direct -= seenmarkers
714 714 pendingnodes = set([m[0] for m in direct])
715 715 seenmarkers |= direct
716 716 pendingnodes -= seennodes
717 717 seennodes |= pendingnodes
718 718 return seenmarkers
719 719
720 720 def makestore(ui, repo):
721 721 """Create an obsstore instance from a repo."""
722 722 # read default format for new obsstore.
723 723 # developer config: format.obsstore-version
724 724 defaultformat = ui.configint('format', 'obsstore-version')
725 725 # rely on obsstore class default when possible.
726 726 kwargs = {}
727 727 if defaultformat is not None:
728 728 kwargs['defaultformat'] = defaultformat
729 729 readonly = not isenabled(repo, createmarkersopt)
730 730 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
731 731 if store and readonly:
732 732 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
733 733 % len(list(store)))
734 734 return store
735 735
736 736 def commonversion(versions):
737 737 """Return the newest version listed in both versions and our local formats.
738 738
739 739 Returns None if no common version exists.
740 740 """
741 741 versions.sort(reverse=True)
742 742 # search for highest version known on both side
743 743 for v in versions:
744 744 if v in formats:
745 745 return v
746 746 return None
747 747
748 748 # arbitrary picked to fit into 8K limit from HTTP server
749 749 # you have to take in account:
750 750 # - the version header
751 751 # - the base85 encoding
752 752 _maxpayload = 5300
753 753
754 754 def _pushkeyescape(markers):
755 755 """encode markers into a dict suitable for pushkey exchange
756 756
757 757 - binary data is base85 encoded
758 758 - split in chunks smaller than 5300 bytes"""
759 759 keys = {}
760 760 parts = []
761 761 currentlen = _maxpayload * 2 # ensure we create a new part
762 762 for marker in markers:
763 763 nextdata = _fm0encodeonemarker(marker)
764 764 if (len(nextdata) + currentlen > _maxpayload):
765 765 currentpart = []
766 766 currentlen = 0
767 767 parts.append(currentpart)
768 768 currentpart.append(nextdata)
769 769 currentlen += len(nextdata)
770 770 for idx, part in enumerate(reversed(parts)):
771 771 data = ''.join([_pack('>B', _fm0version)] + part)
772 772 keys['dump%i' % idx] = util.b85encode(data)
773 773 return keys
774 774
775 775 def listmarkers(repo):
776 776 """List markers over pushkey"""
777 777 if not repo.obsstore:
778 778 return {}
779 779 return _pushkeyescape(sorted(repo.obsstore))
780 780
781 781 def pushmarker(repo, key, old, new):
782 782 """Push markers over pushkey"""
783 783 if not key.startswith('dump'):
784 784 repo.ui.warn(_('unknown key: %r') % key)
785 785 return False
786 786 if old:
787 787 repo.ui.warn(_('unexpected old value for %r') % key)
788 788 return False
789 789 data = util.b85decode(new)
790 790 lock = repo.lock()
791 791 try:
792 792 tr = repo.transaction('pushkey: obsolete markers')
793 793 try:
794 794 repo.obsstore.mergemarkers(tr, data)
795 795 repo.invalidatevolatilesets()
796 796 tr.close()
797 797 return True
798 798 finally:
799 799 tr.release()
800 800 finally:
801 801 lock.release()
802 802
803 803 # keep compatibility for the 4.3 cycle
804 804 def allprecursors(obsstore, nodes, ignoreflags=0):
805 805 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
806 806 util.nouideprecwarn(movemsg, '4.3')
807 807 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
808 808
809 809 def allsuccessors(obsstore, nodes, ignoreflags=0):
810 810 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
811 811 util.nouideprecwarn(movemsg, '4.3')
812 812 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
813 813
814 814 def marker(repo, data):
815 815 movemsg = 'obsolete.marker moved to obsutil.marker'
816 816 repo.ui.deprecwarn(movemsg, '4.3')
817 817 return obsutil.marker(repo, data)
818 818
819 819 def getmarkers(repo, nodes=None, exclusive=False):
820 820 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
821 821 repo.ui.deprecwarn(movemsg, '4.3')
822 822 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
823 823
824 824 def exclusivemarkers(repo, nodes):
825 825 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
826 826 repo.ui.deprecwarn(movemsg, '4.3')
827 827 return obsutil.exclusivemarkers(repo, nodes)
828 828
829 829 def foreground(repo, nodes):
830 830 movemsg = 'obsolete.foreground moved to obsutil.foreground'
831 831 repo.ui.deprecwarn(movemsg, '4.3')
832 832 return obsutil.foreground(repo, nodes)
833 833
834 834 def successorssets(repo, initialnode, cache=None):
835 835 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
836 836 repo.ui.deprecwarn(movemsg, '4.3')
837 837 return obsutil.successorssets(repo, initialnode, cache=cache)
838 838
839 839 # mapping of 'set-name' -> <function to compute this set>
840 840 cachefuncs = {}
841 841 def cachefor(name):
842 842 """Decorator to register a function as computing the cache for a set"""
843 843 def decorator(func):
844 844 if name in cachefuncs:
845 845 msg = "duplicated registration for volatileset '%s' (existing: %r)"
846 846 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
847 847 cachefuncs[name] = func
848 848 return func
849 849 return decorator
850 850
851 851 def getrevs(repo, name):
852 852 """Return the set of revision that belong to the <name> set
853 853
854 854 Such access may compute the set and cache it for future use"""
855 855 repo = repo.unfiltered()
856 856 if not repo.obsstore:
857 857 return frozenset()
858 858 if name not in repo.obsstore.caches:
859 859 repo.obsstore.caches[name] = cachefuncs[name](repo)
860 860 return repo.obsstore.caches[name]
861 861
862 862 # To be simple we need to invalidate obsolescence cache when:
863 863 #
864 864 # - new changeset is added:
865 865 # - public phase is changed
866 866 # - obsolescence marker are added
867 867 # - strip is used a repo
868 868 def clearobscaches(repo):
869 869 """Remove all obsolescence related cache from a repo
870 870
871 871 This remove all cache in obsstore is the obsstore already exist on the
872 872 repo.
873 873
874 874 (We could be smarter here given the exact event that trigger the cache
875 875 clearing)"""
876 876 # only clear cache is there is obsstore data in this repo
877 877 if 'obsstore' in repo._filecache:
878 878 repo.obsstore.caches.clear()
879 879
880 880 def _mutablerevs(repo):
881 881 """the set of mutable revision in the repository"""
882 882 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
883 883
884 884 @cachefor('obsolete')
885 885 def _computeobsoleteset(repo):
886 886 """the set of obsolete revisions"""
887 887 getnode = repo.changelog.node
888 888 notpublic = _mutablerevs(repo)
889 889 isobs = repo.obsstore.successors.__contains__
890 890 obs = set(r for r in notpublic if isobs(getnode(r)))
891 891 return obs
892 892
893 893 @cachefor('unstable')
894 894 def _computeunstableset(repo):
895 895 """the set of non obsolete revisions with obsolete parents"""
896 896 pfunc = repo.changelog.parentrevs
897 897 mutable = _mutablerevs(repo)
898 898 obsolete = getrevs(repo, 'obsolete')
899 899 others = mutable - obsolete
900 900 unstable = set()
901 901 for r in sorted(others):
902 902 # A rev is unstable if one of its parent is obsolete or unstable
903 903 # this works since we traverse following growing rev order
904 904 for p in pfunc(r):
905 905 if p in obsolete or p in unstable:
906 906 unstable.add(r)
907 907 break
908 908 return unstable
909 909
910 910 @cachefor('suspended')
911 911 def _computesuspendedset(repo):
912 912 """the set of obsolete parents with non obsolete descendants"""
913 913 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
914 914 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
915 915
916 916 @cachefor('extinct')
917 917 def _computeextinctset(repo):
918 918 """the set of obsolete parents without non obsolete descendants"""
919 919 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
920 920
921 921
922 922 @cachefor('bumped')
923 923 def _computebumpedset(repo):
924 924 """the set of revs trying to obsolete public revisions"""
925 925 bumped = set()
926 926 # util function (avoid attribute lookup in the loop)
927 927 phase = repo._phasecache.phase # would be faster to grab the full list
928 928 public = phases.public
929 929 cl = repo.changelog
930 930 torev = cl.nodemap.get
931 931 for ctx in repo.set('(not public()) and (not obsolete())'):
932 932 rev = ctx.rev()
933 933 # We only evaluate mutable, non-obsolete revision
934 934 node = ctx.node()
935 935 # (future) A cache of precursors may worth if split is very common
936 936 for pnode in obsutil.allprecursors(repo.obsstore, [node],
937 937 ignoreflags=bumpedfix):
938 938 prev = torev(pnode) # unfiltered! but so is phasecache
939 939 if (prev is not None) and (phase(repo, prev) <= public):
940 940 # we have a public precursor
941 941 bumped.add(rev)
942 942 break # Next draft!
943 943 return bumped
944 944
945 945 @cachefor('divergent')
946 946 def _computedivergentset(repo):
947 947 """the set of rev that compete to be the final successors of some revision.
948 948 """
949 949 divergent = set()
950 950 obsstore = repo.obsstore
951 951 newermap = {}
952 952 for ctx in repo.set('(not public()) - obsolete()'):
953 953 mark = obsstore.precursors.get(ctx.node(), ())
954 954 toprocess = set(mark)
955 955 seen = set()
956 956 while toprocess:
957 957 prec = toprocess.pop()[0]
958 958 if prec in seen:
959 959 continue # emergency cycle hanging prevention
960 960 seen.add(prec)
961 961 if prec not in newermap:
962 962 obsutil.successorssets(repo, prec, cache=newermap)
963 963 newer = [n for n in newermap[prec] if n]
964 964 if len(newer) > 1:
965 965 divergent.add(ctx.rev())
966 966 break
967 967 toprocess.update(obsstore.precursors.get(prec, ()))
968 968 return divergent
969 969
970 970
971 971 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
972 972 operation=None):
973 973 """Add obsolete markers between changesets in a repo
974 974
975 975 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
976 976 tuple. `old` and `news` are changectx. metadata is an optional dictionary
977 977 containing metadata for this marker only. It is merged with the global
978 978 metadata specified through the `metadata` argument of this function,
979 979
980 980 Trying to obsolete a public changeset will raise an exception.
981 981
982 982 Current user and date are used except if specified otherwise in the
983 983 metadata attribute.
984 984
985 985 This function operates within a transaction of its own, but does
986 986 not take any lock on the repo.
987 987 """
988 988 # prepare metadata
989 989 if metadata is None:
990 990 metadata = {}
991 991 if 'user' not in metadata:
992 992 metadata['user'] = repo.ui.username()
993 993 useoperation = repo.ui.configbool('experimental',
994 994 'evolution.track-operation')
995 995 if useoperation and operation:
996 996 metadata['operation'] = operation
997 997 tr = repo.transaction('add-obsolescence-marker')
998 998 try:
999 999 markerargs = []
1000 1000 for rel in relations:
1001 1001 prec = rel[0]
1002 1002 sucs = rel[1]
1003 1003 localmetadata = metadata.copy()
1004 1004 if 2 < len(rel):
1005 1005 localmetadata.update(rel[2])
1006 1006
1007 1007 if not prec.mutable():
1008 1008 raise error.Abort(_("cannot obsolete public changeset: %s")
1009 1009 % prec,
1010 1010 hint="see 'hg help phases' for details")
1011 1011 nprec = prec.node()
1012 1012 nsucs = tuple(s.node() for s in sucs)
1013 1013 npare = None
1014 1014 if not nsucs:
1015 1015 npare = tuple(p.node() for p in prec.parents())
1016 1016 if nprec in nsucs:
1017 1017 raise error.Abort(_("changeset %s cannot obsolete itself")
1018 1018 % prec)
1019 1019
1020 1020 # Creating the marker causes the hidden cache to become invalid,
1021 1021 # which causes recomputation when we ask for prec.parents() above.
1022 1022 # Resulting in n^2 behavior. So let's prepare all of the args
1023 1023 # first, then create the markers.
1024 1024 markerargs.append((nprec, nsucs, npare, localmetadata))
1025 1025
1026 1026 for args in markerargs:
1027 1027 nprec, nsucs, npare, localmetadata = args
1028 1028 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1029 1029 date=date, metadata=localmetadata,
1030 1030 ui=repo.ui)
1031 1031 repo.filteredrevcache.clear()
1032 1032 tr.close()
1033 1033 finally:
1034 1034 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now