##// END OF EJS Templates
obsolete: replace references to 'sopener' with 'svfs'
Siddharth Agarwal -
r25669:8d948618 default
parent child Browse files
Show More
@@ -1,1253 +1,1253 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "precursor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a precursor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "precursor markers of Y" because they hold
28 28 information about the precursors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker are used:
46 46
47 47 (A, (C, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 import struct
71 71 import util, base85, node, parsers
72 72 import phases
73 73 from i18n import _
74 74
75 75 _pack = struct.pack
76 76 _unpack = struct.unpack
77 77 _calcsize = struct.calcsize
78 78 propertycache = util.propertycache
79 79
80 80 # the obsolete feature is not mature enough to be enabled by default.
81 81 # you have to rely on third party extension extension to enable this.
82 82 _enabled = False
83 83
84 84 # Options for obsolescence
85 85 createmarkersopt = 'createmarkers'
86 86 allowunstableopt = 'allowunstable'
87 87 exchangeopt = 'exchange'
88 88
89 89 ### obsolescence marker flag
90 90
91 91 ## bumpedfix flag
92 92 #
93 93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 94 # "bumped" because it's a successors of a public changesets
95 95 #
96 96 # o A' (bumped)
97 97 # |`:
98 98 # | o A
99 99 # |/
100 100 # o Z
101 101 #
102 102 # The way to solve this situation is to create a new changeset Ad as children
103 103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 105 #
106 106 # o Ad
107 107 # |`:
108 108 # | x A'
109 109 # |'|
110 110 # o | A
111 111 # |/
112 112 # o Z
113 113 #
114 114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 116 # This flag mean that the successors express the changes between the public and
117 117 # bumped version and fix the situation, breaking the transitivity of
118 118 # "bumped" here.
119 119 bumpedfix = 1
120 120 usingsha256 = 2
121 121
122 122 ## Parsing and writing of version "0"
123 123 #
124 124 # The header is followed by the markers. Each marker is made of:
125 125 #
126 126 # - 1 uint8 : number of new changesets "N", can be zero.
127 127 #
128 128 # - 1 uint32: metadata size "M" in bytes.
129 129 #
130 130 # - 1 byte: a bit field. It is reserved for flags used in common
131 131 # obsolete marker operations, to avoid repeated decoding of metadata
132 132 # entries.
133 133 #
134 134 # - 20 bytes: obsoleted changeset identifier.
135 135 #
136 136 # - N*20 bytes: new changesets identifiers.
137 137 #
138 138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 139 # string contains a key and a value, separated by a colon ':', without
140 140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 141 # cannot contain '\0'.
142 142 _fm0version = 0
143 143 _fm0fixed = '>BIB20s'
144 144 _fm0node = '20s'
145 145 _fm0fsize = _calcsize(_fm0fixed)
146 146 _fm0fnodesize = _calcsize(_fm0node)
147 147
148 148 def _fm0readmarkers(data, off):
149 149 # Loop on markers
150 150 l = len(data)
151 151 while off + _fm0fsize <= l:
152 152 # read fixed part
153 153 cur = data[off:off + _fm0fsize]
154 154 off += _fm0fsize
155 155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 156 # read replacement
157 157 sucs = ()
158 158 if numsuc:
159 159 s = (_fm0fnodesize * numsuc)
160 160 cur = data[off:off + s]
161 161 sucs = _unpack(_fm0node * numsuc, cur)
162 162 off += s
163 163 # read metadata
164 164 # (metadata will be decoded on demand)
165 165 metadata = data[off:off + mdsize]
166 166 if len(metadata) != mdsize:
167 167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 168 'short, %d bytes expected, got %d')
169 169 % (mdsize, len(metadata)))
170 170 off += mdsize
171 171 metadata = _fm0decodemeta(metadata)
172 172 try:
173 173 when, offset = metadata.pop('date', '0 0').split(' ')
174 174 date = float(when), int(offset)
175 175 except ValueError:
176 176 date = (0., 0)
177 177 parents = None
178 178 if 'p2' in metadata:
179 179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 180 elif 'p1' in metadata:
181 181 parents = (metadata.pop('p1', None),)
182 182 elif 'p0' in metadata:
183 183 parents = ()
184 184 if parents is not None:
185 185 try:
186 186 parents = tuple(node.bin(p) for p in parents)
187 187 # if parent content is not a nodeid, drop the data
188 188 for p in parents:
189 189 if len(p) != 20:
190 190 parents = None
191 191 break
192 192 except TypeError:
193 193 # if content cannot be translated to nodeid drop the data.
194 194 parents = None
195 195
196 196 metadata = tuple(sorted(metadata.iteritems()))
197 197
198 198 yield (pre, sucs, flags, metadata, date, parents)
199 199
200 200 def _fm0encodeonemarker(marker):
201 201 pre, sucs, flags, metadata, date, parents = marker
202 202 if flags & usingsha256:
203 203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 204 metadata = dict(metadata)
205 205 time, tz = date
206 206 metadata['date'] = '%r %i' % (time, tz)
207 207 if parents is not None:
208 208 if not parents:
209 209 # mark that we explicitly recorded no parents
210 210 metadata['p0'] = ''
211 211 for i, p in enumerate(parents):
212 212 metadata['p%i' % (i + 1)] = node.hex(p)
213 213 metadata = _fm0encodemeta(metadata)
214 214 numsuc = len(sucs)
215 215 format = _fm0fixed + (_fm0node * numsuc)
216 216 data = [numsuc, len(metadata), flags, pre]
217 217 data.extend(sucs)
218 218 return _pack(format, *data) + metadata
219 219
220 220 def _fm0encodemeta(meta):
221 221 """Return encoded metadata string to string mapping.
222 222
223 223 Assume no ':' in key and no '\0' in both key and value."""
224 224 for key, value in meta.iteritems():
225 225 if ':' in key or '\0' in key:
226 226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 227 if '\0' in value:
228 228 raise ValueError("':' is forbidden in metadata value'")
229 229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230 230
231 231 def _fm0decodemeta(data):
232 232 """Return string to string dictionary from encoded version."""
233 233 d = {}
234 234 for l in data.split('\0'):
235 235 if l:
236 236 key, value = l.split(':')
237 237 d[key] = value
238 238 return d
239 239
240 240 ## Parsing and writing of version "1"
241 241 #
242 242 # The header is followed by the markers. Each marker is made of:
243 243 #
244 244 # - uint32: total size of the marker (including this field)
245 245 #
246 246 # - float64: date in seconds since epoch
247 247 #
248 248 # - int16: timezone offset in minutes
249 249 #
250 250 # - uint16: a bit field. It is reserved for flags used in common
251 251 # obsolete marker operations, to avoid repeated decoding of metadata
252 252 # entries.
253 253 #
254 254 # - uint8: number of successors "N", can be zero.
255 255 #
256 256 # - uint8: number of parents "P", can be zero.
257 257 #
258 258 # 0: parents data stored but no parent,
259 259 # 1: one parent stored,
260 260 # 2: two parents stored,
261 261 # 3: no parent data stored
262 262 #
263 263 # - uint8: number of metadata entries M
264 264 #
265 265 # - 20 or 32 bytes: precursor changeset identifier.
266 266 #
267 267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 268 #
269 269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 270 #
271 271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 272 #
273 273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 274 _fm1version = 1
275 275 _fm1fixed = '>IdhHBBB20s'
276 276 _fm1nodesha1 = '20s'
277 277 _fm1nodesha256 = '32s'
278 278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 280 _fm1fsize = _calcsize(_fm1fixed)
281 281 _fm1parentnone = 3
282 282 _fm1parentshift = 14
283 283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 284 _fm1metapair = 'BB'
285 285 _fm1metapairsize = _calcsize('BB')
286 286
287 287 def _fm1purereadmarkers(data, off):
288 288 # make some global constants local for performance
289 289 noneflag = _fm1parentnone
290 290 sha2flag = usingsha256
291 291 sha1size = _fm1nodesha1size
292 292 sha2size = _fm1nodesha256size
293 293 sha1fmt = _fm1nodesha1
294 294 sha2fmt = _fm1nodesha256
295 295 metasize = _fm1metapairsize
296 296 metafmt = _fm1metapair
297 297 fsize = _fm1fsize
298 298 unpack = _unpack
299 299
300 300 # Loop on markers
301 301 stop = len(data) - _fm1fsize
302 302 ufixed = struct.Struct(_fm1fixed).unpack
303 303
304 304 while off <= stop:
305 305 # read fixed part
306 306 o1 = off + fsize
307 307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308 308
309 309 if flags & sha2flag:
310 310 # FIXME: prec was read as a SHA1, needs to be amended
311 311
312 312 # read 0 or more successors
313 313 if numsuc == 1:
314 314 o2 = o1 + sha2size
315 315 sucs = (data[o1:o2],)
316 316 else:
317 317 o2 = o1 + sha2size * numsuc
318 318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319 319
320 320 # read parents
321 321 if numpar == noneflag:
322 322 o3 = o2
323 323 parents = None
324 324 elif numpar == 1:
325 325 o3 = o2 + sha2size
326 326 parents = (data[o2:o3],)
327 327 else:
328 328 o3 = o2 + sha2size * numpar
329 329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 330 else:
331 331 # read 0 or more successors
332 332 if numsuc == 1:
333 333 o2 = o1 + sha1size
334 334 sucs = (data[o1:o2],)
335 335 else:
336 336 o2 = o1 + sha1size * numsuc
337 337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338 338
339 339 # read parents
340 340 if numpar == noneflag:
341 341 o3 = o2
342 342 parents = None
343 343 elif numpar == 1:
344 344 o3 = o2 + sha1size
345 345 parents = (data[o2:o3],)
346 346 else:
347 347 o3 = o2 + sha1size * numpar
348 348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349 349
350 350 # read metadata
351 351 off = o3 + metasize * nummeta
352 352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 353 metadata = []
354 354 for idx in xrange(0, len(metapairsize), 2):
355 355 o1 = off + metapairsize[idx]
356 356 o2 = o1 + metapairsize[idx + 1]
357 357 metadata.append((data[off:o1], data[o1:o2]))
358 358 off = o2
359 359
360 360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361 361
362 362 def _fm1encodeonemarker(marker):
363 363 pre, sucs, flags, metadata, date, parents = marker
364 364 # determine node size
365 365 _fm1node = _fm1nodesha1
366 366 if flags & usingsha256:
367 367 _fm1node = _fm1nodesha256
368 368 numsuc = len(sucs)
369 369 numextranodes = numsuc
370 370 if parents is None:
371 371 numpar = _fm1parentnone
372 372 else:
373 373 numpar = len(parents)
374 374 numextranodes += numpar
375 375 formatnodes = _fm1node * numextranodes
376 376 formatmeta = _fm1metapair * len(metadata)
377 377 format = _fm1fixed + formatnodes + formatmeta
378 378 # tz is stored in minutes so we divide by 60
379 379 tz = date[1]//60
380 380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 381 data.extend(sucs)
382 382 if parents is not None:
383 383 data.extend(parents)
384 384 totalsize = _calcsize(format)
385 385 for key, value in metadata:
386 386 lk = len(key)
387 387 lv = len(value)
388 388 data.append(lk)
389 389 data.append(lv)
390 390 totalsize += lk + lv
391 391 data[0] = totalsize
392 392 data = [_pack(format, *data)]
393 393 for key, value in metadata:
394 394 data.append(key)
395 395 data.append(value)
396 396 return ''.join(data)
397 397
398 398 def _fm1readmarkers(data, off):
399 399 native = getattr(parsers, 'fm1readmarkers', None)
400 400 if not native:
401 401 return _fm1purereadmarkers(data, off)
402 402 stop = len(data) - _fm1fsize
403 403 return native(data, off, stop)
404 404
405 405 # mapping to read/write various marker formats
406 406 # <version> -> (decoder, encoder)
407 407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409 409
410 410 @util.nogc
411 411 def _readmarkers(data):
412 412 """Read and enumerate markers from raw data"""
413 413 off = 0
414 414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 415 off += 1
416 416 if diskversion not in formats:
417 417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 418 % diskversion)
419 419 return diskversion, formats[diskversion][0](data, off)
420 420
421 421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 422 # Kept separate from flushmarkers(), it will be reused for
423 423 # markers exchange.
424 424 encodeone = formats[version][1]
425 425 if addheader:
426 426 yield _pack('>B', version)
427 427 for marker in markers:
428 428 yield encodeone(marker)
429 429
430 430
431 431 class marker(object):
432 432 """Wrap obsolete marker raw data"""
433 433
434 434 def __init__(self, repo, data):
435 435 # the repo argument will be used to create changectx in later version
436 436 self._repo = repo
437 437 self._data = data
438 438 self._decodedmeta = None
439 439
440 440 def __hash__(self):
441 441 return hash(self._data)
442 442
443 443 def __eq__(self, other):
444 444 if type(other) != type(self):
445 445 return False
446 446 return self._data == other._data
447 447
448 448 def precnode(self):
449 449 """Precursor changeset node identifier"""
450 450 return self._data[0]
451 451
452 452 def succnodes(self):
453 453 """List of successor changesets node identifiers"""
454 454 return self._data[1]
455 455
456 456 def parentnodes(self):
457 457 """Parents of the precursors (None if not recorded)"""
458 458 return self._data[5]
459 459
460 460 def metadata(self):
461 461 """Decoded metadata dictionary"""
462 462 return dict(self._data[3])
463 463
464 464 def date(self):
465 465 """Creation date as (unixtime, offset)"""
466 466 return self._data[4]
467 467
468 468 def flags(self):
469 469 """The flags field of the marker"""
470 470 return self._data[2]
471 471
472 472 @util.nogc
473 473 def _addsuccessors(successors, markers):
474 474 for mark in markers:
475 475 successors.setdefault(mark[0], set()).add(mark)
476 476
477 477 @util.nogc
478 478 def _addprecursors(precursors, markers):
479 479 for mark in markers:
480 480 for suc in mark[1]:
481 481 precursors.setdefault(suc, set()).add(mark)
482 482
483 483 @util.nogc
484 484 def _addchildren(children, markers):
485 485 for mark in markers:
486 486 parents = mark[5]
487 487 if parents is not None:
488 488 for p in parents:
489 489 children.setdefault(p, set()).add(mark)
490 490
491 491 def _checkinvalidmarkers(markers):
492 492 """search for marker with invalid data and raise error if needed
493 493
494 494 Exist as a separated function to allow the evolve extension for a more
495 495 subtle handling.
496 496 """
497 497 for mark in markers:
498 498 if node.nullid in mark[1]:
499 499 raise util.Abort(_('bad obsolescence marker detected: '
500 500 'invalid successors nullid'))
501 501
502 502 class obsstore(object):
503 503 """Store obsolete markers
504 504
505 505 Markers can be accessed with two mappings:
506 506 - precursors[x] -> set(markers on precursors edges of x)
507 507 - successors[x] -> set(markers on successors edges of x)
508 508 - children[x] -> set(markers on precursors edges of children(x)
509 509 """
510 510
511 511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 512 # prec: nodeid, precursor changesets
513 513 # succs: tuple of nodeid, successor changesets (0-N length)
514 514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 515 # meta: binary blob, encoded metadata dictionary
516 516 # date: (float, int) tuple, date of marker creation
517 517 # parents: (tuple of nodeid) or None, parents of precursors
518 518 # None is used when no data has been recorded
519 519
520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
520 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
521 521 # caches for various obsolescence related cache
522 522 self.caches = {}
523 523 self._all = []
524 self.sopener = sopener
525 data = sopener.tryread('obsstore')
524 self.svfs = svfs
525 data = svfs.tryread('obsstore')
526 526 self._version = defaultformat
527 527 self._readonly = readonly
528 528 if data:
529 529 self._version, markers = _readmarkers(data)
530 530 self._addmarkers(markers)
531 531
532 532 def __iter__(self):
533 533 return iter(self._all)
534 534
535 535 def __len__(self):
536 536 return len(self._all)
537 537
538 538 def __nonzero__(self):
539 539 return bool(self._all)
540 540
541 541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
542 542 date=None, metadata=None):
543 543 """obsolete: add a new obsolete marker
544 544
545 545 * ensuring it is hashable
546 546 * check mandatory metadata
547 547 * encode metadata
548 548
549 549 If you are a human writing code creating marker you want to use the
550 550 `createmarkers` function in this module instead.
551 551
552 552 return True if a new marker have been added, False if the markers
553 553 already existed (no op).
554 554 """
555 555 if metadata is None:
556 556 metadata = {}
557 557 if date is None:
558 558 if 'date' in metadata:
559 559 # as a courtesy for out-of-tree extensions
560 560 date = util.parsedate(metadata.pop('date'))
561 561 else:
562 562 date = util.makedate()
563 563 if len(prec) != 20:
564 564 raise ValueError(prec)
565 565 for succ in succs:
566 566 if len(succ) != 20:
567 567 raise ValueError(succ)
568 568 if prec in succs:
569 569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
570 570
571 571 metadata = tuple(sorted(metadata.iteritems()))
572 572
573 573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
574 574 return bool(self.add(transaction, [marker]))
575 575
576 576 def add(self, transaction, markers):
577 577 """Add new markers to the store
578 578
579 579 Take care of filtering duplicate.
580 580 Return the number of new marker."""
581 581 if self._readonly:
582 582 raise util.Abort('creating obsolete markers is not enabled on this '
583 583 'repo')
584 584 known = set(self._all)
585 585 new = []
586 586 for m in markers:
587 587 if m not in known:
588 588 known.add(m)
589 589 new.append(m)
590 590 if new:
591 f = self.sopener('obsstore', 'ab')
591 f = self.svfs('obsstore', 'ab')
592 592 try:
593 593 offset = f.tell()
594 594 transaction.add('obsstore', offset)
595 595 # offset == 0: new file - add the version header
596 596 for bytes in encodemarkers(new, offset == 0, self._version):
597 597 f.write(bytes)
598 598 finally:
599 599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
600 600 # call 'filecacheentry.refresh()' here
601 601 f.close()
602 602 self._addmarkers(new)
603 603 # new marker *may* have changed several set. invalidate the cache.
604 604 self.caches.clear()
605 605 # records the number of new markers for the transaction hooks
606 606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
607 607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
608 608 return len(new)
609 609
610 610 def mergemarkers(self, transaction, data):
611 611 """merge a binary stream of markers inside the obsstore
612 612
613 613 Returns the number of new markers added."""
614 614 version, markers = _readmarkers(data)
615 615 return self.add(transaction, markers)
616 616
617 617 @propertycache
618 618 def successors(self):
619 619 successors = {}
620 620 _addsuccessors(successors, self._all)
621 621 return successors
622 622
623 623 @propertycache
624 624 def precursors(self):
625 625 precursors = {}
626 626 _addprecursors(precursors, self._all)
627 627 return precursors
628 628
629 629 @propertycache
630 630 def children(self):
631 631 children = {}
632 632 _addchildren(children, self._all)
633 633 return children
634 634
635 635 def _cached(self, attr):
636 636 return attr in self.__dict__
637 637
638 638 def _addmarkers(self, markers):
639 639 markers = list(markers) # to allow repeated iteration
640 640 self._all.extend(markers)
641 641 if self._cached('successors'):
642 642 _addsuccessors(self.successors, markers)
643 643 if self._cached('precursors'):
644 644 _addprecursors(self.precursors, markers)
645 645 if self._cached('children'):
646 646 _addchildren(self.children, markers)
647 647 _checkinvalidmarkers(markers)
648 648
649 649 def relevantmarkers(self, nodes):
650 650 """return a set of all obsolescence markers relevant to a set of nodes.
651 651
652 652 "relevant" to a set of nodes mean:
653 653
654 654 - marker that use this changeset as successor
655 655 - prune marker of direct children on this changeset
656 656 - recursive application of the two rules on precursors of these markers
657 657
658 658 It is a set so you cannot rely on order."""
659 659
660 660 pendingnodes = set(nodes)
661 661 seenmarkers = set()
662 662 seennodes = set(pendingnodes)
663 663 precursorsmarkers = self.precursors
664 664 children = self.children
665 665 while pendingnodes:
666 666 direct = set()
667 667 for current in pendingnodes:
668 668 direct.update(precursorsmarkers.get(current, ()))
669 669 pruned = [m for m in children.get(current, ()) if not m[1]]
670 670 direct.update(pruned)
671 671 direct -= seenmarkers
672 672 pendingnodes = set([m[0] for m in direct])
673 673 seenmarkers |= direct
674 674 pendingnodes -= seennodes
675 675 seennodes |= pendingnodes
676 676 return seenmarkers
677 677
678 678 def commonversion(versions):
679 679 """Return the newest version listed in both versions and our local formats.
680 680
681 681 Returns None if no common version exists.
682 682 """
683 683 versions.sort(reverse=True)
684 684 # search for highest version known on both side
685 685 for v in versions:
686 686 if v in formats:
687 687 return v
688 688 return None
689 689
690 690 # arbitrary picked to fit into 8K limit from HTTP server
691 691 # you have to take in account:
692 692 # - the version header
693 693 # - the base85 encoding
694 694 _maxpayload = 5300
695 695
696 696 def _pushkeyescape(markers):
697 697 """encode markers into a dict suitable for pushkey exchange
698 698
699 699 - binary data is base85 encoded
700 700 - split in chunks smaller than 5300 bytes"""
701 701 keys = {}
702 702 parts = []
703 703 currentlen = _maxpayload * 2 # ensure we create a new part
704 704 for marker in markers:
705 705 nextdata = _fm0encodeonemarker(marker)
706 706 if (len(nextdata) + currentlen > _maxpayload):
707 707 currentpart = []
708 708 currentlen = 0
709 709 parts.append(currentpart)
710 710 currentpart.append(nextdata)
711 711 currentlen += len(nextdata)
712 712 for idx, part in enumerate(reversed(parts)):
713 713 data = ''.join([_pack('>B', _fm0version)] + part)
714 714 keys['dump%i' % idx] = base85.b85encode(data)
715 715 return keys
716 716
717 717 def listmarkers(repo):
718 718 """List markers over pushkey"""
719 719 if not repo.obsstore:
720 720 return {}
721 721 return _pushkeyescape(sorted(repo.obsstore))
722 722
723 723 def pushmarker(repo, key, old, new):
724 724 """Push markers over pushkey"""
725 725 if not key.startswith('dump'):
726 726 repo.ui.warn(_('unknown key: %r') % key)
727 727 return 0
728 728 if old:
729 729 repo.ui.warn(_('unexpected old value for %r') % key)
730 730 return 0
731 731 data = base85.b85decode(new)
732 732 lock = repo.lock()
733 733 try:
734 734 tr = repo.transaction('pushkey: obsolete markers')
735 735 try:
736 736 repo.obsstore.mergemarkers(tr, data)
737 737 tr.close()
738 738 return 1
739 739 finally:
740 740 tr.release()
741 741 finally:
742 742 lock.release()
743 743
744 744 def getmarkers(repo, nodes=None):
745 745 """returns markers known in a repository
746 746
747 747 If <nodes> is specified, only markers "relevant" to those nodes are are
748 748 returned"""
749 749 if nodes is None:
750 750 rawmarkers = repo.obsstore
751 751 else:
752 752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
753 753
754 754 for markerdata in rawmarkers:
755 755 yield marker(repo, markerdata)
756 756
757 757 def relevantmarkers(repo, node):
758 758 """all obsolete markers relevant to some revision"""
759 759 for markerdata in repo.obsstore.relevantmarkers(node):
760 760 yield marker(repo, markerdata)
761 761
762 762
763 763 def precursormarkers(ctx):
764 764 """obsolete marker marking this changeset as a successors"""
765 765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
766 766 yield marker(ctx.repo(), data)
767 767
768 768 def successormarkers(ctx):
769 769 """obsolete marker making this changeset obsolete"""
770 770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
771 771 yield marker(ctx.repo(), data)
772 772
773 773 def allsuccessors(obsstore, nodes, ignoreflags=0):
774 774 """Yield node for every successor of <nodes>.
775 775
776 776 Some successors may be unknown locally.
777 777
778 778 This is a linear yield unsuited to detecting split changesets. It includes
779 779 initial nodes too."""
780 780 remaining = set(nodes)
781 781 seen = set(remaining)
782 782 while remaining:
783 783 current = remaining.pop()
784 784 yield current
785 785 for mark in obsstore.successors.get(current, ()):
786 786 # ignore marker flagged with specified flag
787 787 if mark[2] & ignoreflags:
788 788 continue
789 789 for suc in mark[1]:
790 790 if suc not in seen:
791 791 seen.add(suc)
792 792 remaining.add(suc)
793 793
794 794 def allprecursors(obsstore, nodes, ignoreflags=0):
795 795 """Yield node for every precursors of <nodes>.
796 796
797 797 Some precursors may be unknown locally.
798 798
799 799 This is a linear yield unsuited to detecting folded changesets. It includes
800 800 initial nodes too."""
801 801
802 802 remaining = set(nodes)
803 803 seen = set(remaining)
804 804 while remaining:
805 805 current = remaining.pop()
806 806 yield current
807 807 for mark in obsstore.precursors.get(current, ()):
808 808 # ignore marker flagged with specified flag
809 809 if mark[2] & ignoreflags:
810 810 continue
811 811 suc = mark[0]
812 812 if suc not in seen:
813 813 seen.add(suc)
814 814 remaining.add(suc)
815 815
816 816 def foreground(repo, nodes):
817 817 """return all nodes in the "foreground" of other node
818 818
819 819 The foreground of a revision is anything reachable using parent -> children
820 820 or precursor -> successor relation. It is very similar to "descendant" but
821 821 augmented with obsolescence information.
822 822
823 823 Beware that possible obsolescence cycle may result if complex situation.
824 824 """
825 825 repo = repo.unfiltered()
826 826 foreground = set(repo.set('%ln::', nodes))
827 827 if repo.obsstore:
828 828 # We only need this complicated logic if there is obsolescence
829 829 # XXX will probably deserve an optimised revset.
830 830 nm = repo.changelog.nodemap
831 831 plen = -1
832 832 # compute the whole set of successors or descendants
833 833 while len(foreground) != plen:
834 834 plen = len(foreground)
835 835 succs = set(c.node() for c in foreground)
836 836 mutable = [c.node() for c in foreground if c.mutable()]
837 837 succs.update(allsuccessors(repo.obsstore, mutable))
838 838 known = (n for n in succs if n in nm)
839 839 foreground = set(repo.set('%ln::', known))
840 840 return set(c.node() for c in foreground)
841 841
842 842
843 843 def successorssets(repo, initialnode, cache=None):
844 844 """Return all set of successors of initial nodes
845 845
846 846 The successors set of a changeset A are a group of revisions that succeed
847 847 A. It succeeds A as a consistent whole, each revision being only a partial
848 848 replacement. The successors set contains non-obsolete changesets only.
849 849
850 850 This function returns the full list of successor sets which is why it
851 851 returns a list of tuples and not just a single tuple. Each tuple is a valid
852 852 successors set. Not that (A,) may be a valid successors set for changeset A
853 853 (see below).
854 854
855 855 In most cases, a changeset A will have a single element (e.g. the changeset
856 856 A is replaced by A') in its successors set. Though, it is also common for a
857 857 changeset A to have no elements in its successor set (e.g. the changeset
858 858 has been pruned). Therefore, the returned list of successors sets will be
859 859 [(A',)] or [], respectively.
860 860
861 861 When a changeset A is split into A' and B', however, it will result in a
862 862 successors set containing more than a single element, i.e. [(A',B')].
863 863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
864 864 (A'')].
865 865
866 866 If a changeset A is not obsolete, then it will conceptually have no
867 867 successors set. To distinguish this from a pruned changeset, the successor
868 868 set will only contain itself, i.e. [(A,)].
869 869
870 870 Finally, successors unknown locally are considered to be pruned (obsoleted
871 871 without any successors).
872 872
873 873 The optional `cache` parameter is a dictionary that may contain precomputed
874 874 successors sets. It is meant to reuse the computation of a previous call to
875 875 `successorssets` when multiple calls are made at the same time. The cache
876 876 dictionary is updated in place. The caller is responsible for its live
877 877 spawn. Code that makes multiple calls to `successorssets` *must* use this
878 878 cache mechanism or suffer terrible performances.
879 879
880 880 """
881 881
882 882 succmarkers = repo.obsstore.successors
883 883
884 884 # Stack of nodes we search successors sets for
885 885 toproceed = [initialnode]
886 886 # set version of above list for fast loop detection
887 887 # element added to "toproceed" must be added here
888 888 stackedset = set(toproceed)
889 889 if cache is None:
890 890 cache = {}
891 891
892 892 # This while loop is the flattened version of a recursive search for
893 893 # successors sets
894 894 #
895 895 # def successorssets(x):
896 896 # successors = directsuccessors(x)
897 897 # ss = [[]]
898 898 # for succ in directsuccessors(x):
899 899 # # product as in itertools cartesian product
900 900 # ss = product(ss, successorssets(succ))
901 901 # return ss
902 902 #
903 903 # But we can not use plain recursive calls here:
904 904 # - that would blow the python call stack
905 905 # - obsolescence markers may have cycles, we need to handle them.
906 906 #
907 907 # The `toproceed` list act as our call stack. Every node we search
908 908 # successors set for are stacked there.
909 909 #
910 910 # The `stackedset` is set version of this stack used to check if a node is
911 911 # already stacked. This check is used to detect cycles and prevent infinite
912 912 # loop.
913 913 #
914 914 # successors set of all nodes are stored in the `cache` dictionary.
915 915 #
916 916 # After this while loop ends we use the cache to return the successors sets
917 917 # for the node requested by the caller.
918 918 while toproceed:
919 919 # Every iteration tries to compute the successors sets of the topmost
920 920 # node of the stack: CURRENT.
921 921 #
922 922 # There are four possible outcomes:
923 923 #
924 924 # 1) We already know the successors sets of CURRENT:
925 925 # -> mission accomplished, pop it from the stack.
926 926 # 2) Node is not obsolete:
927 927 # -> the node is its own successors sets. Add it to the cache.
928 928 # 3) We do not know successors set of direct successors of CURRENT:
929 929 # -> We add those successors to the stack.
930 930 # 4) We know successors sets of all direct successors of CURRENT:
931 931 # -> We can compute CURRENT successors set and add it to the
932 932 # cache.
933 933 #
934 934 current = toproceed[-1]
935 935 if current in cache:
936 936 # case (1): We already know the successors sets
937 937 stackedset.remove(toproceed.pop())
938 938 elif current not in succmarkers:
939 939 # case (2): The node is not obsolete.
940 940 if current in repo:
941 941 # We have a valid last successors.
942 942 cache[current] = [(current,)]
943 943 else:
944 944 # Final obsolete version is unknown locally.
945 945 # Do not count that as a valid successors
946 946 cache[current] = []
947 947 else:
948 948 # cases (3) and (4)
949 949 #
950 950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
951 951 # from case (4):
952 952 #
953 953 # For each direct successors of CURRENT, we check whether its
954 954 # successors sets are known. If they are not, we stack the
955 955 # unknown node and proceed to the next iteration of the while
956 956 # loop. (case 3)
957 957 #
958 958 # During this step, we may detect obsolescence cycles: a node
959 959 # with unknown successors sets but already in the call stack.
960 960 # In such a situation, we arbitrary set the successors sets of
961 961 # the node to nothing (node pruned) to break the cycle.
962 962 #
963 963 # If no break was encountered we proceed to phase 2.
964 964 #
965 965 # Phase 2 computes successors sets of CURRENT (case 4); see details
966 966 # in phase 2 itself.
967 967 #
968 968 # Note the two levels of iteration in each phase.
969 969 # - The first one handles obsolescence markers using CURRENT as
970 970 # precursor (successors markers of CURRENT).
971 971 #
972 972 # Having multiple entry here means divergence.
973 973 #
974 974 # - The second one handles successors defined in each marker.
975 975 #
976 976 # Having none means pruned node, multiple successors means split,
977 977 # single successors are standard replacement.
978 978 #
979 979 for mark in sorted(succmarkers[current]):
980 980 for suc in mark[1]:
981 981 if suc not in cache:
982 982 if suc in stackedset:
983 983 # cycle breaking
984 984 cache[suc] = []
985 985 else:
986 986 # case (3) If we have not computed successors sets
987 987 # of one of those successors we add it to the
988 988 # `toproceed` stack and stop all work for this
989 989 # iteration.
990 990 toproceed.append(suc)
991 991 stackedset.add(suc)
992 992 break
993 993 else:
994 994 continue
995 995 break
996 996 else:
997 997 # case (4): we know all successors sets of all direct
998 998 # successors
999 999 #
1000 1000 # Successors set contributed by each marker depends on the
1001 1001 # successors sets of all its "successors" node.
1002 1002 #
1003 1003 # Each different marker is a divergence in the obsolescence
1004 1004 # history. It contributes successors sets distinct from other
1005 1005 # markers.
1006 1006 #
1007 1007 # Within a marker, a successor may have divergent successors
1008 1008 # sets. In such a case, the marker will contribute multiple
1009 1009 # divergent successors sets. If multiple successors have
1010 1010 # divergent successors sets, a Cartesian product is used.
1011 1011 #
1012 1012 # At the end we post-process successors sets to remove
1013 1013 # duplicated entry and successors set that are strict subset of
1014 1014 # another one.
1015 1015 succssets = []
1016 1016 for mark in sorted(succmarkers[current]):
1017 1017 # successors sets contributed by this marker
1018 1018 markss = [[]]
1019 1019 for suc in mark[1]:
1020 1020 # cardinal product with previous successors
1021 1021 productresult = []
1022 1022 for prefix in markss:
1023 1023 for suffix in cache[suc]:
1024 1024 newss = list(prefix)
1025 1025 for part in suffix:
1026 1026 # do not duplicated entry in successors set
1027 1027 # first entry wins.
1028 1028 if part not in newss:
1029 1029 newss.append(part)
1030 1030 productresult.append(newss)
1031 1031 markss = productresult
1032 1032 succssets.extend(markss)
1033 1033 # remove duplicated and subset
1034 1034 seen = []
1035 1035 final = []
1036 1036 candidate = sorted(((set(s), s) for s in succssets if s),
1037 1037 key=lambda x: len(x[1]), reverse=True)
1038 1038 for setversion, listversion in candidate:
1039 1039 for seenset in seen:
1040 1040 if setversion.issubset(seenset):
1041 1041 break
1042 1042 else:
1043 1043 final.append(listversion)
1044 1044 seen.append(setversion)
1045 1045 final.reverse() # put small successors set first
1046 1046 cache[current] = final
1047 1047 return cache[initialnode]
1048 1048
1049 1049 def _knownrevs(repo, nodes):
1050 1050 """yield revision numbers of known nodes passed in parameters
1051 1051
1052 1052 Unknown revisions are silently ignored."""
1053 1053 torev = repo.changelog.nodemap.get
1054 1054 for n in nodes:
1055 1055 rev = torev(n)
1056 1056 if rev is not None:
1057 1057 yield rev
1058 1058
1059 1059 # mapping of 'set-name' -> <function to compute this set>
1060 1060 cachefuncs = {}
1061 1061 def cachefor(name):
1062 1062 """Decorator to register a function as computing the cache for a set"""
1063 1063 def decorator(func):
1064 1064 assert name not in cachefuncs
1065 1065 cachefuncs[name] = func
1066 1066 return func
1067 1067 return decorator
1068 1068
1069 1069 def getrevs(repo, name):
1070 1070 """Return the set of revision that belong to the <name> set
1071 1071
1072 1072 Such access may compute the set and cache it for future use"""
1073 1073 repo = repo.unfiltered()
1074 1074 if not repo.obsstore:
1075 1075 return frozenset()
1076 1076 if name not in repo.obsstore.caches:
1077 1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1078 1078 return repo.obsstore.caches[name]
1079 1079
1080 1080 # To be simple we need to invalidate obsolescence cache when:
1081 1081 #
1082 1082 # - new changeset is added:
1083 1083 # - public phase is changed
1084 1084 # - obsolescence marker are added
1085 1085 # - strip is used a repo
1086 1086 def clearobscaches(repo):
1087 1087 """Remove all obsolescence related cache from a repo
1088 1088
1089 1089 This remove all cache in obsstore is the obsstore already exist on the
1090 1090 repo.
1091 1091
1092 1092 (We could be smarter here given the exact event that trigger the cache
1093 1093 clearing)"""
1094 1094 # only clear cache is there is obsstore data in this repo
1095 1095 if 'obsstore' in repo._filecache:
1096 1096 repo.obsstore.caches.clear()
1097 1097
1098 1098 @cachefor('obsolete')
1099 1099 def _computeobsoleteset(repo):
1100 1100 """the set of obsolete revisions"""
1101 1101 obs = set()
1102 1102 getrev = repo.changelog.nodemap.get
1103 1103 getphase = repo._phasecache.phase
1104 1104 for n in repo.obsstore.successors:
1105 1105 rev = getrev(n)
1106 1106 if rev is not None and getphase(repo, rev):
1107 1107 obs.add(rev)
1108 1108 return obs
1109 1109
1110 1110 @cachefor('unstable')
1111 1111 def _computeunstableset(repo):
1112 1112 """the set of non obsolete revisions with obsolete parents"""
1113 1113 revs = [(ctx.rev(), ctx) for ctx in
1114 1114 repo.set('(not public()) and (not obsolete())')]
1115 1115 revs.sort(key=lambda x:x[0])
1116 1116 unstable = set()
1117 1117 for rev, ctx in revs:
1118 1118 # A rev is unstable if one of its parent is obsolete or unstable
1119 1119 # this works since we traverse following growing rev order
1120 1120 if any((x.obsolete() or (x.rev() in unstable))
1121 1121 for x in ctx.parents()):
1122 1122 unstable.add(rev)
1123 1123 return unstable
1124 1124
1125 1125 @cachefor('suspended')
1126 1126 def _computesuspendedset(repo):
1127 1127 """the set of obsolete parents with non obsolete descendants"""
1128 1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1129 1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1130 1130
1131 1131 @cachefor('extinct')
1132 1132 def _computeextinctset(repo):
1133 1133 """the set of obsolete parents without non obsolete descendants"""
1134 1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1135 1135
1136 1136
1137 1137 @cachefor('bumped')
1138 1138 def _computebumpedset(repo):
1139 1139 """the set of revs trying to obsolete public revisions"""
1140 1140 bumped = set()
1141 1141 # util function (avoid attribute lookup in the loop)
1142 1142 phase = repo._phasecache.phase # would be faster to grab the full list
1143 1143 public = phases.public
1144 1144 cl = repo.changelog
1145 1145 torev = cl.nodemap.get
1146 1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1147 1147 rev = ctx.rev()
1148 1148 # We only evaluate mutable, non-obsolete revision
1149 1149 node = ctx.node()
1150 1150 # (future) A cache of precursors may worth if split is very common
1151 1151 for pnode in allprecursors(repo.obsstore, [node],
1152 1152 ignoreflags=bumpedfix):
1153 1153 prev = torev(pnode) # unfiltered! but so is phasecache
1154 1154 if (prev is not None) and (phase(repo, prev) <= public):
1155 1155 # we have a public precursors
1156 1156 bumped.add(rev)
1157 1157 break # Next draft!
1158 1158 return bumped
1159 1159
1160 1160 @cachefor('divergent')
1161 1161 def _computedivergentset(repo):
1162 1162 """the set of rev that compete to be the final successors of some revision.
1163 1163 """
1164 1164 divergent = set()
1165 1165 obsstore = repo.obsstore
1166 1166 newermap = {}
1167 1167 for ctx in repo.set('(not public()) - obsolete()'):
1168 1168 mark = obsstore.precursors.get(ctx.node(), ())
1169 1169 toprocess = set(mark)
1170 1170 seen = set()
1171 1171 while toprocess:
1172 1172 prec = toprocess.pop()[0]
1173 1173 if prec in seen:
1174 1174 continue # emergency cycle hanging prevention
1175 1175 seen.add(prec)
1176 1176 if prec not in newermap:
1177 1177 successorssets(repo, prec, newermap)
1178 1178 newer = [n for n in newermap[prec] if n]
1179 1179 if len(newer) > 1:
1180 1180 divergent.add(ctx.rev())
1181 1181 break
1182 1182 toprocess.update(obsstore.precursors.get(prec, ()))
1183 1183 return divergent
1184 1184
1185 1185
1186 1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1187 1187 """Add obsolete markers between changesets in a repo
1188 1188
1189 1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1190 1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1191 1191 containing metadata for this marker only. It is merged with the global
1192 1192 metadata specified through the `metadata` argument of this function,
1193 1193
1194 1194 Trying to obsolete a public changeset will raise an exception.
1195 1195
1196 1196 Current user and date are used except if specified otherwise in the
1197 1197 metadata attribute.
1198 1198
1199 1199 This function operates within a transaction of its own, but does
1200 1200 not take any lock on the repo.
1201 1201 """
1202 1202 # prepare metadata
1203 1203 if metadata is None:
1204 1204 metadata = {}
1205 1205 if 'user' not in metadata:
1206 1206 metadata['user'] = repo.ui.username()
1207 1207 tr = repo.transaction('add-obsolescence-marker')
1208 1208 try:
1209 1209 for rel in relations:
1210 1210 prec = rel[0]
1211 1211 sucs = rel[1]
1212 1212 localmetadata = metadata.copy()
1213 1213 if 2 < len(rel):
1214 1214 localmetadata.update(rel[2])
1215 1215
1216 1216 if not prec.mutable():
1217 1217 raise util.Abort("cannot obsolete public changeset: %s"
1218 1218 % prec,
1219 1219 hint='see "hg help phases" for details')
1220 1220 nprec = prec.node()
1221 1221 nsucs = tuple(s.node() for s in sucs)
1222 1222 npare = None
1223 1223 if not nsucs:
1224 1224 npare = tuple(p.node() for p in prec.parents())
1225 1225 if nprec in nsucs:
1226 1226 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1227 1227 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1228 1228 date=date, metadata=localmetadata)
1229 1229 repo.filteredrevcache.clear()
1230 1230 tr.close()
1231 1231 finally:
1232 1232 tr.release()
1233 1233
1234 1234 def isenabled(repo, option):
1235 1235 """Returns True if the given repository has the given obsolete option
1236 1236 enabled.
1237 1237 """
1238 1238 result = set(repo.ui.configlist('experimental', 'evolution'))
1239 1239 if 'all' in result:
1240 1240 return True
1241 1241
1242 1242 # For migration purposes, temporarily return true if the config hasn't been
1243 1243 # set but _enabled is true.
1244 1244 if len(result) == 0 and _enabled:
1245 1245 return True
1246 1246
1247 1247 # createmarkers must be enabled if other options are enabled
1248 1248 if ((allowunstableopt in result or exchangeopt in result) and
1249 1249 not createmarkersopt in result):
1250 1250 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1251 1251 "if other obsolete options are enabled"))
1252 1252
1253 1253 return option in result
General Comments 0
You need to be logged in to leave comments. Login now