##// END OF EJS Templates
obsolete: allow multiple predecessors in createmarkers...
Boris Feld -
r39958:6335c0de default
parent child Browse files
Show More
@@ -1,1034 +1,1040 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 encoding,
78 78 error,
79 79 node,
80 80 obsutil,
81 81 phases,
82 82 policy,
83 83 pycompat,
84 84 util,
85 85 )
86 86 from .utils import dateutil
87 87
88 88 parsers = policy.importmod(r'parsers')
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92 _calcsize = struct.calcsize
93 93 propertycache = util.propertycache
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # Options for obsolescence
100 100 createmarkersopt = 'createmarkers'
101 101 allowunstableopt = 'allowunstable'
102 102 exchangeopt = 'exchange'
103 103
104 104 def _getoptionvalue(repo, option):
105 105 """Returns True if the given repository has the given obsolete option
106 106 enabled.
107 107 """
108 108 configkey = 'evolution.%s' % option
109 109 newconfig = repo.ui.configbool('experimental', configkey)
110 110
111 111 # Return the value only if defined
112 112 if newconfig is not None:
113 113 return newconfig
114 114
115 115 # Fallback on generic option
116 116 try:
117 117 return repo.ui.configbool('experimental', 'evolution')
118 118 except (error.ConfigError, AttributeError):
119 119 # Fallback on old-fashion config
120 120 # inconsistent config: experimental.evolution
121 121 result = set(repo.ui.configlist('experimental', 'evolution'))
122 122
123 123 if 'all' in result:
124 124 return True
125 125
126 126 # For migration purposes, temporarily return true if the config hasn't
127 127 # been set but _enabled is true.
128 128 if len(result) == 0 and _enabled:
129 129 return True
130 130
131 131 # Temporary hack for next check
132 132 newconfig = repo.ui.config('experimental', 'evolution.createmarkers')
133 133 if newconfig:
134 134 result.add('createmarkers')
135 135
136 136 return option in result
137 137
138 138 def getoptions(repo):
139 139 """Returns dicts showing state of obsolescence features."""
140 140
141 141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 142 unstablevalue = _getoptionvalue(repo, allowunstableopt)
143 143 exchangevalue = _getoptionvalue(repo, exchangeopt)
144 144
145 145 # createmarkers must be enabled if other options are enabled
146 146 if ((unstablevalue or exchangevalue) and not createmarkersvalue):
147 147 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
148 148 "if other obsolete options are enabled"))
149 149
150 150 return {
151 151 createmarkersopt: createmarkersvalue,
152 152 allowunstableopt: unstablevalue,
153 153 exchangeopt: exchangevalue,
154 154 }
155 155
156 156 def isenabled(repo, option):
157 157 """Returns True if the given repository has the given obsolete option
158 158 enabled.
159 159 """
160 160 return getoptions(repo)[option]
161 161
162 162 # Creating aliases for marker flags because evolve extension looks for
163 163 # bumpedfix in obsolete.py
164 164 bumpedfix = obsutil.bumpedfix
165 165 usingsha256 = obsutil.usingsha256
166 166
167 167 ## Parsing and writing of version "0"
168 168 #
169 169 # The header is followed by the markers. Each marker is made of:
170 170 #
171 171 # - 1 uint8 : number of new changesets "N", can be zero.
172 172 #
173 173 # - 1 uint32: metadata size "M" in bytes.
174 174 #
175 175 # - 1 byte: a bit field. It is reserved for flags used in common
176 176 # obsolete marker operations, to avoid repeated decoding of metadata
177 177 # entries.
178 178 #
179 179 # - 20 bytes: obsoleted changeset identifier.
180 180 #
181 181 # - N*20 bytes: new changesets identifiers.
182 182 #
183 183 # - M bytes: metadata as a sequence of nul-terminated strings. Each
184 184 # string contains a key and a value, separated by a colon ':', without
185 185 # additional encoding. Keys cannot contain '\0' or ':' and values
186 186 # cannot contain '\0'.
187 187 _fm0version = 0
188 188 _fm0fixed = '>BIB20s'
189 189 _fm0node = '20s'
190 190 _fm0fsize = _calcsize(_fm0fixed)
191 191 _fm0fnodesize = _calcsize(_fm0node)
192 192
193 193 def _fm0readmarkers(data, off, stop):
194 194 # Loop on markers
195 195 while off < stop:
196 196 # read fixed part
197 197 cur = data[off:off + _fm0fsize]
198 198 off += _fm0fsize
199 199 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
200 200 # read replacement
201 201 sucs = ()
202 202 if numsuc:
203 203 s = (_fm0fnodesize * numsuc)
204 204 cur = data[off:off + s]
205 205 sucs = _unpack(_fm0node * numsuc, cur)
206 206 off += s
207 207 # read metadata
208 208 # (metadata will be decoded on demand)
209 209 metadata = data[off:off + mdsize]
210 210 if len(metadata) != mdsize:
211 211 raise error.Abort(_('parsing obsolete marker: metadata is too '
212 212 'short, %d bytes expected, got %d')
213 213 % (mdsize, len(metadata)))
214 214 off += mdsize
215 215 metadata = _fm0decodemeta(metadata)
216 216 try:
217 217 when, offset = metadata.pop('date', '0 0').split(' ')
218 218 date = float(when), int(offset)
219 219 except ValueError:
220 220 date = (0., 0)
221 221 parents = None
222 222 if 'p2' in metadata:
223 223 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
224 224 elif 'p1' in metadata:
225 225 parents = (metadata.pop('p1', None),)
226 226 elif 'p0' in metadata:
227 227 parents = ()
228 228 if parents is not None:
229 229 try:
230 230 parents = tuple(node.bin(p) for p in parents)
231 231 # if parent content is not a nodeid, drop the data
232 232 for p in parents:
233 233 if len(p) != 20:
234 234 parents = None
235 235 break
236 236 except TypeError:
237 237 # if content cannot be translated to nodeid drop the data.
238 238 parents = None
239 239
240 240 metadata = tuple(sorted(metadata.iteritems()))
241 241
242 242 yield (pre, sucs, flags, metadata, date, parents)
243 243
244 244 def _fm0encodeonemarker(marker):
245 245 pre, sucs, flags, metadata, date, parents = marker
246 246 if flags & usingsha256:
247 247 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
248 248 metadata = dict(metadata)
249 249 time, tz = date
250 250 metadata['date'] = '%r %i' % (time, tz)
251 251 if parents is not None:
252 252 if not parents:
253 253 # mark that we explicitly recorded no parents
254 254 metadata['p0'] = ''
255 255 for i, p in enumerate(parents, 1):
256 256 metadata['p%i' % i] = node.hex(p)
257 257 metadata = _fm0encodemeta(metadata)
258 258 numsuc = len(sucs)
259 259 format = _fm0fixed + (_fm0node * numsuc)
260 260 data = [numsuc, len(metadata), flags, pre]
261 261 data.extend(sucs)
262 262 return _pack(format, *data) + metadata
263 263
264 264 def _fm0encodemeta(meta):
265 265 """Return encoded metadata string to string mapping.
266 266
267 267 Assume no ':' in key and no '\0' in both key and value."""
268 268 for key, value in meta.iteritems():
269 269 if ':' in key or '\0' in key:
270 270 raise ValueError("':' and '\0' are forbidden in metadata key'")
271 271 if '\0' in value:
272 272 raise ValueError("':' is forbidden in metadata value'")
273 273 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
274 274
275 275 def _fm0decodemeta(data):
276 276 """Return string to string dictionary from encoded version."""
277 277 d = {}
278 278 for l in data.split('\0'):
279 279 if l:
280 280 key, value = l.split(':')
281 281 d[key] = value
282 282 return d
283 283
284 284 ## Parsing and writing of version "1"
285 285 #
286 286 # The header is followed by the markers. Each marker is made of:
287 287 #
288 288 # - uint32: total size of the marker (including this field)
289 289 #
290 290 # - float64: date in seconds since epoch
291 291 #
292 292 # - int16: timezone offset in minutes
293 293 #
294 294 # - uint16: a bit field. It is reserved for flags used in common
295 295 # obsolete marker operations, to avoid repeated decoding of metadata
296 296 # entries.
297 297 #
298 298 # - uint8: number of successors "N", can be zero.
299 299 #
300 300 # - uint8: number of parents "P", can be zero.
301 301 #
302 302 # 0: parents data stored but no parent,
303 303 # 1: one parent stored,
304 304 # 2: two parents stored,
305 305 # 3: no parent data stored
306 306 #
307 307 # - uint8: number of metadata entries M
308 308 #
309 309 # - 20 or 32 bytes: predecessor changeset identifier.
310 310 #
311 311 # - N*(20 or 32) bytes: successors changesets identifiers.
312 312 #
313 313 # - P*(20 or 32) bytes: parents of the predecessors changesets.
314 314 #
315 315 # - M*(uint8, uint8): size of all metadata entries (key and value)
316 316 #
317 317 # - remaining bytes: the metadata, each (key, value) pair after the other.
318 318 _fm1version = 1
319 319 _fm1fixed = '>IdhHBBB20s'
320 320 _fm1nodesha1 = '20s'
321 321 _fm1nodesha256 = '32s'
322 322 _fm1nodesha1size = _calcsize(_fm1nodesha1)
323 323 _fm1nodesha256size = _calcsize(_fm1nodesha256)
324 324 _fm1fsize = _calcsize(_fm1fixed)
325 325 _fm1parentnone = 3
326 326 _fm1parentshift = 14
327 327 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
328 328 _fm1metapair = 'BB'
329 329 _fm1metapairsize = _calcsize(_fm1metapair)
330 330
331 331 def _fm1purereadmarkers(data, off, stop):
332 332 # make some global constants local for performance
333 333 noneflag = _fm1parentnone
334 334 sha2flag = usingsha256
335 335 sha1size = _fm1nodesha1size
336 336 sha2size = _fm1nodesha256size
337 337 sha1fmt = _fm1nodesha1
338 338 sha2fmt = _fm1nodesha256
339 339 metasize = _fm1metapairsize
340 340 metafmt = _fm1metapair
341 341 fsize = _fm1fsize
342 342 unpack = _unpack
343 343
344 344 # Loop on markers
345 345 ufixed = struct.Struct(_fm1fixed).unpack
346 346
347 347 while off < stop:
348 348 # read fixed part
349 349 o1 = off + fsize
350 350 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
351 351
352 352 if flags & sha2flag:
353 353 # FIXME: prec was read as a SHA1, needs to be amended
354 354
355 355 # read 0 or more successors
356 356 if numsuc == 1:
357 357 o2 = o1 + sha2size
358 358 sucs = (data[o1:o2],)
359 359 else:
360 360 o2 = o1 + sha2size * numsuc
361 361 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
362 362
363 363 # read parents
364 364 if numpar == noneflag:
365 365 o3 = o2
366 366 parents = None
367 367 elif numpar == 1:
368 368 o3 = o2 + sha2size
369 369 parents = (data[o2:o3],)
370 370 else:
371 371 o3 = o2 + sha2size * numpar
372 372 parents = unpack(sha2fmt * numpar, data[o2:o3])
373 373 else:
374 374 # read 0 or more successors
375 375 if numsuc == 1:
376 376 o2 = o1 + sha1size
377 377 sucs = (data[o1:o2],)
378 378 else:
379 379 o2 = o1 + sha1size * numsuc
380 380 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
381 381
382 382 # read parents
383 383 if numpar == noneflag:
384 384 o3 = o2
385 385 parents = None
386 386 elif numpar == 1:
387 387 o3 = o2 + sha1size
388 388 parents = (data[o2:o3],)
389 389 else:
390 390 o3 = o2 + sha1size * numpar
391 391 parents = unpack(sha1fmt * numpar, data[o2:o3])
392 392
393 393 # read metadata
394 394 off = o3 + metasize * nummeta
395 395 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
396 396 metadata = []
397 397 for idx in pycompat.xrange(0, len(metapairsize), 2):
398 398 o1 = off + metapairsize[idx]
399 399 o2 = o1 + metapairsize[idx + 1]
400 400 metadata.append((data[off:o1], data[o1:o2]))
401 401 off = o2
402 402
403 403 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
404 404
405 405 def _fm1encodeonemarker(marker):
406 406 pre, sucs, flags, metadata, date, parents = marker
407 407 # determine node size
408 408 _fm1node = _fm1nodesha1
409 409 if flags & usingsha256:
410 410 _fm1node = _fm1nodesha256
411 411 numsuc = len(sucs)
412 412 numextranodes = numsuc
413 413 if parents is None:
414 414 numpar = _fm1parentnone
415 415 else:
416 416 numpar = len(parents)
417 417 numextranodes += numpar
418 418 formatnodes = _fm1node * numextranodes
419 419 formatmeta = _fm1metapair * len(metadata)
420 420 format = _fm1fixed + formatnodes + formatmeta
421 421 # tz is stored in minutes so we divide by 60
422 422 tz = date[1]//60
423 423 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
424 424 data.extend(sucs)
425 425 if parents is not None:
426 426 data.extend(parents)
427 427 totalsize = _calcsize(format)
428 428 for key, value in metadata:
429 429 lk = len(key)
430 430 lv = len(value)
431 431 if lk > 255:
432 432 msg = ('obsstore metadata key cannot be longer than 255 bytes'
433 433 ' (key "%s" is %u bytes)') % (key, lk)
434 434 raise error.ProgrammingError(msg)
435 435 if lv > 255:
436 436 msg = ('obsstore metadata value cannot be longer than 255 bytes'
437 437 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv)
438 438 raise error.ProgrammingError(msg)
439 439 data.append(lk)
440 440 data.append(lv)
441 441 totalsize += lk + lv
442 442 data[0] = totalsize
443 443 data = [_pack(format, *data)]
444 444 for key, value in metadata:
445 445 data.append(key)
446 446 data.append(value)
447 447 return ''.join(data)
448 448
449 449 def _fm1readmarkers(data, off, stop):
450 450 native = getattr(parsers, 'fm1readmarkers', None)
451 451 if not native:
452 452 return _fm1purereadmarkers(data, off, stop)
453 453 return native(data, off, stop)
454 454
455 455 # mapping to read/write various marker formats
456 456 # <version> -> (decoder, encoder)
457 457 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
458 458 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
459 459
460 460 def _readmarkerversion(data):
461 461 return _unpack('>B', data[0:1])[0]
462 462
463 463 @util.nogc
464 464 def _readmarkers(data, off=None, stop=None):
465 465 """Read and enumerate markers from raw data"""
466 466 diskversion = _readmarkerversion(data)
467 467 if not off:
468 468 off = 1 # skip 1 byte version number
469 469 if stop is None:
470 470 stop = len(data)
471 471 if diskversion not in formats:
472 472 msg = _('parsing obsolete marker: unknown version %r') % diskversion
473 473 raise error.UnknownVersion(msg, version=diskversion)
474 474 return diskversion, formats[diskversion][0](data, off, stop)
475 475
476 476 def encodeheader(version=_fm0version):
477 477 return _pack('>B', version)
478 478
479 479 def encodemarkers(markers, addheader=False, version=_fm0version):
480 480 # Kept separate from flushmarkers(), it will be reused for
481 481 # markers exchange.
482 482 encodeone = formats[version][1]
483 483 if addheader:
484 484 yield encodeheader(version)
485 485 for marker in markers:
486 486 yield encodeone(marker)
487 487
488 488 @util.nogc
489 489 def _addsuccessors(successors, markers):
490 490 for mark in markers:
491 491 successors.setdefault(mark[0], set()).add(mark)
492 492
493 493 @util.nogc
494 494 def _addpredecessors(predecessors, markers):
495 495 for mark in markers:
496 496 for suc in mark[1]:
497 497 predecessors.setdefault(suc, set()).add(mark)
498 498
499 499 @util.nogc
500 500 def _addchildren(children, markers):
501 501 for mark in markers:
502 502 parents = mark[5]
503 503 if parents is not None:
504 504 for p in parents:
505 505 children.setdefault(p, set()).add(mark)
506 506
507 507 def _checkinvalidmarkers(markers):
508 508 """search for marker with invalid data and raise error if needed
509 509
510 510 Exist as a separated function to allow the evolve extension for a more
511 511 subtle handling.
512 512 """
513 513 for mark in markers:
514 514 if node.nullid in mark[1]:
515 515 raise error.Abort(_('bad obsolescence marker detected: '
516 516 'invalid successors nullid'))
517 517
518 518 class obsstore(object):
519 519 """Store obsolete markers
520 520
521 521 Markers can be accessed with two mappings:
522 522 - predecessors[x] -> set(markers on predecessors edges of x)
523 523 - successors[x] -> set(markers on successors edges of x)
524 524 - children[x] -> set(markers on predecessors edges of children(x)
525 525 """
526 526
527 527 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
528 528 # prec: nodeid, predecessors changesets
529 529 # succs: tuple of nodeid, successor changesets (0-N length)
530 530 # flag: integer, flag field carrying modifier for the markers (see doc)
531 531 # meta: binary blob in UTF-8, encoded metadata dictionary
532 532 # date: (float, int) tuple, date of marker creation
533 533 # parents: (tuple of nodeid) or None, parents of predecessors
534 534 # None is used when no data has been recorded
535 535
536 536 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
537 537 # caches for various obsolescence related cache
538 538 self.caches = {}
539 539 self.svfs = svfs
540 540 self._defaultformat = defaultformat
541 541 self._readonly = readonly
542 542
543 543 def __iter__(self):
544 544 return iter(self._all)
545 545
546 546 def __len__(self):
547 547 return len(self._all)
548 548
549 549 def __nonzero__(self):
550 550 if not self._cached(r'_all'):
551 551 try:
552 552 return self.svfs.stat('obsstore').st_size > 1
553 553 except OSError as inst:
554 554 if inst.errno != errno.ENOENT:
555 555 raise
556 556 # just build an empty _all list if no obsstore exists, which
557 557 # avoids further stat() syscalls
558 558 return bool(self._all)
559 559
560 560 __bool__ = __nonzero__
561 561
562 562 @property
563 563 def readonly(self):
564 564 """True if marker creation is disabled
565 565
566 566 Remove me in the future when obsolete marker is always on."""
567 567 return self._readonly
568 568
569 569 def create(self, transaction, prec, succs=(), flag=0, parents=None,
570 570 date=None, metadata=None, ui=None):
571 571 """obsolete: add a new obsolete marker
572 572
573 573 * ensuring it is hashable
574 574 * check mandatory metadata
575 575 * encode metadata
576 576
577 577 If you are a human writing code creating marker you want to use the
578 578 `createmarkers` function in this module instead.
579 579
580 580 return True if a new marker have been added, False if the markers
581 581 already existed (no op).
582 582 """
583 583 if metadata is None:
584 584 metadata = {}
585 585 if date is None:
586 586 if 'date' in metadata:
587 587 # as a courtesy for out-of-tree extensions
588 588 date = dateutil.parsedate(metadata.pop('date'))
589 589 elif ui is not None:
590 590 date = ui.configdate('devel', 'default-date')
591 591 if date is None:
592 592 date = dateutil.makedate()
593 593 else:
594 594 date = dateutil.makedate()
595 595 if len(prec) != 20:
596 596 raise ValueError(prec)
597 597 for succ in succs:
598 598 if len(succ) != 20:
599 599 raise ValueError(succ)
600 600 if prec in succs:
601 601 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
602 602
603 603 metadata = tuple(sorted(metadata.iteritems()))
604 604 for k, v in metadata:
605 605 try:
606 606 # might be better to reject non-ASCII keys
607 607 k.decode('utf-8')
608 608 v.decode('utf-8')
609 609 except UnicodeDecodeError:
610 610 raise error.ProgrammingError(
611 611 'obsstore metadata must be valid UTF-8 sequence '
612 612 '(key = %r, value = %r)'
613 613 % (pycompat.bytestr(k), pycompat.bytestr(v)))
614 614
615 615 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
616 616 return bool(self.add(transaction, [marker]))
617 617
618 618 def add(self, transaction, markers):
619 619 """Add new markers to the store
620 620
621 621 Take care of filtering duplicate.
622 622 Return the number of new marker."""
623 623 if self._readonly:
624 624 raise error.Abort(_('creating obsolete markers is not enabled on '
625 625 'this repo'))
626 626 known = set()
627 627 getsuccessors = self.successors.get
628 628 new = []
629 629 for m in markers:
630 630 if m not in getsuccessors(m[0], ()) and m not in known:
631 631 known.add(m)
632 632 new.append(m)
633 633 if new:
634 634 f = self.svfs('obsstore', 'ab')
635 635 try:
636 636 offset = f.tell()
637 637 transaction.add('obsstore', offset)
638 638 # offset == 0: new file - add the version header
639 639 data = b''.join(encodemarkers(new, offset == 0, self._version))
640 640 f.write(data)
641 641 finally:
642 642 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
643 643 # call 'filecacheentry.refresh()' here
644 644 f.close()
645 645 addedmarkers = transaction.changes.get('obsmarkers')
646 646 if addedmarkers is not None:
647 647 addedmarkers.update(new)
648 648 self._addmarkers(new, data)
649 649 # new marker *may* have changed several set. invalidate the cache.
650 650 self.caches.clear()
651 651 # records the number of new markers for the transaction hooks
652 652 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
653 653 transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new))
654 654 return len(new)
655 655
656 656 def mergemarkers(self, transaction, data):
657 657 """merge a binary stream of markers inside the obsstore
658 658
659 659 Returns the number of new markers added."""
660 660 version, markers = _readmarkers(data)
661 661 return self.add(transaction, markers)
662 662
663 663 @propertycache
664 664 def _data(self):
665 665 return self.svfs.tryread('obsstore')
666 666
667 667 @propertycache
668 668 def _version(self):
669 669 if len(self._data) >= 1:
670 670 return _readmarkerversion(self._data)
671 671 else:
672 672 return self._defaultformat
673 673
674 674 @propertycache
675 675 def _all(self):
676 676 data = self._data
677 677 if not data:
678 678 return []
679 679 self._version, markers = _readmarkers(data)
680 680 markers = list(markers)
681 681 _checkinvalidmarkers(markers)
682 682 return markers
683 683
684 684 @propertycache
685 685 def successors(self):
686 686 successors = {}
687 687 _addsuccessors(successors, self._all)
688 688 return successors
689 689
690 690 @propertycache
691 691 def predecessors(self):
692 692 predecessors = {}
693 693 _addpredecessors(predecessors, self._all)
694 694 return predecessors
695 695
696 696 @propertycache
697 697 def children(self):
698 698 children = {}
699 699 _addchildren(children, self._all)
700 700 return children
701 701
702 702 def _cached(self, attr):
703 703 return attr in self.__dict__
704 704
705 705 def _addmarkers(self, markers, rawdata):
706 706 markers = list(markers) # to allow repeated iteration
707 707 self._data = self._data + rawdata
708 708 self._all.extend(markers)
709 709 if self._cached(r'successors'):
710 710 _addsuccessors(self.successors, markers)
711 711 if self._cached(r'predecessors'):
712 712 _addpredecessors(self.predecessors, markers)
713 713 if self._cached(r'children'):
714 714 _addchildren(self.children, markers)
715 715 _checkinvalidmarkers(markers)
716 716
717 717 def relevantmarkers(self, nodes):
718 718 """return a set of all obsolescence markers relevant to a set of nodes.
719 719
720 720 "relevant" to a set of nodes mean:
721 721
722 722 - marker that use this changeset as successor
723 723 - prune marker of direct children on this changeset
724 724 - recursive application of the two rules on predecessors of these
725 725 markers
726 726
727 727 It is a set so you cannot rely on order."""
728 728
729 729 pendingnodes = set(nodes)
730 730 seenmarkers = set()
731 731 seennodes = set(pendingnodes)
732 732 precursorsmarkers = self.predecessors
733 733 succsmarkers = self.successors
734 734 children = self.children
735 735 while pendingnodes:
736 736 direct = set()
737 737 for current in pendingnodes:
738 738 direct.update(precursorsmarkers.get(current, ()))
739 739 pruned = [m for m in children.get(current, ()) if not m[1]]
740 740 direct.update(pruned)
741 741 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
742 742 direct.update(pruned)
743 743 direct -= seenmarkers
744 744 pendingnodes = set([m[0] for m in direct])
745 745 seenmarkers |= direct
746 746 pendingnodes -= seennodes
747 747 seennodes |= pendingnodes
748 748 return seenmarkers
749 749
750 750 def makestore(ui, repo):
751 751 """Create an obsstore instance from a repo."""
752 752 # read default format for new obsstore.
753 753 # developer config: format.obsstore-version
754 754 defaultformat = ui.configint('format', 'obsstore-version')
755 755 # rely on obsstore class default when possible.
756 756 kwargs = {}
757 757 if defaultformat is not None:
758 758 kwargs[r'defaultformat'] = defaultformat
759 759 readonly = not isenabled(repo, createmarkersopt)
760 760 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
761 761 if store and readonly:
762 762 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
763 763 % len(list(store)))
764 764 return store
765 765
766 766 def commonversion(versions):
767 767 """Return the newest version listed in both versions and our local formats.
768 768
769 769 Returns None if no common version exists.
770 770 """
771 771 versions.sort(reverse=True)
772 772 # search for highest version known on both side
773 773 for v in versions:
774 774 if v in formats:
775 775 return v
776 776 return None
777 777
778 778 # arbitrary picked to fit into 8K limit from HTTP server
779 779 # you have to take in account:
780 780 # - the version header
781 781 # - the base85 encoding
782 782 _maxpayload = 5300
783 783
784 784 def _pushkeyescape(markers):
785 785 """encode markers into a dict suitable for pushkey exchange
786 786
787 787 - binary data is base85 encoded
788 788 - split in chunks smaller than 5300 bytes"""
789 789 keys = {}
790 790 parts = []
791 791 currentlen = _maxpayload * 2 # ensure we create a new part
792 792 for marker in markers:
793 793 nextdata = _fm0encodeonemarker(marker)
794 794 if (len(nextdata) + currentlen > _maxpayload):
795 795 currentpart = []
796 796 currentlen = 0
797 797 parts.append(currentpart)
798 798 currentpart.append(nextdata)
799 799 currentlen += len(nextdata)
800 800 for idx, part in enumerate(reversed(parts)):
801 801 data = ''.join([_pack('>B', _fm0version)] + part)
802 802 keys['dump%i' % idx] = util.b85encode(data)
803 803 return keys
804 804
805 805 def listmarkers(repo):
806 806 """List markers over pushkey"""
807 807 if not repo.obsstore:
808 808 return {}
809 809 return _pushkeyescape(sorted(repo.obsstore))
810 810
811 811 def pushmarker(repo, key, old, new):
812 812 """Push markers over pushkey"""
813 813 if not key.startswith('dump'):
814 814 repo.ui.warn(_('unknown key: %r') % key)
815 815 return False
816 816 if old:
817 817 repo.ui.warn(_('unexpected old value for %r') % key)
818 818 return False
819 819 data = util.b85decode(new)
820 820 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
821 821 repo.obsstore.mergemarkers(tr, data)
822 822 repo.invalidatevolatilesets()
823 823 return True
824 824
825 825 # mapping of 'set-name' -> <function to compute this set>
826 826 cachefuncs = {}
827 827 def cachefor(name):
828 828 """Decorator to register a function as computing the cache for a set"""
829 829 def decorator(func):
830 830 if name in cachefuncs:
831 831 msg = "duplicated registration for volatileset '%s' (existing: %r)"
832 832 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
833 833 cachefuncs[name] = func
834 834 return func
835 835 return decorator
836 836
837 837 def getrevs(repo, name):
838 838 """Return the set of revision that belong to the <name> set
839 839
840 840 Such access may compute the set and cache it for future use"""
841 841 repo = repo.unfiltered()
842 842 if not repo.obsstore:
843 843 return frozenset()
844 844 if name not in repo.obsstore.caches:
845 845 repo.obsstore.caches[name] = cachefuncs[name](repo)
846 846 return repo.obsstore.caches[name]
847 847
848 848 # To be simple we need to invalidate obsolescence cache when:
849 849 #
850 850 # - new changeset is added:
851 851 # - public phase is changed
852 852 # - obsolescence marker are added
853 853 # - strip is used a repo
854 854 def clearobscaches(repo):
855 855 """Remove all obsolescence related cache from a repo
856 856
857 857 This remove all cache in obsstore is the obsstore already exist on the
858 858 repo.
859 859
860 860 (We could be smarter here given the exact event that trigger the cache
861 861 clearing)"""
862 862 # only clear cache is there is obsstore data in this repo
863 863 if 'obsstore' in repo._filecache:
864 864 repo.obsstore.caches.clear()
865 865
866 866 def _mutablerevs(repo):
867 867 """the set of mutable revision in the repository"""
868 868 return repo._phasecache.getrevset(repo, phases.mutablephases)
869 869
870 870 @cachefor('obsolete')
871 871 def _computeobsoleteset(repo):
872 872 """the set of obsolete revisions"""
873 873 getnode = repo.changelog.node
874 874 notpublic = _mutablerevs(repo)
875 875 isobs = repo.obsstore.successors.__contains__
876 876 obs = set(r for r in notpublic if isobs(getnode(r)))
877 877 return obs
878 878
879 879 @cachefor('orphan')
880 880 def _computeorphanset(repo):
881 881 """the set of non obsolete revisions with obsolete parents"""
882 882 pfunc = repo.changelog.parentrevs
883 883 mutable = _mutablerevs(repo)
884 884 obsolete = getrevs(repo, 'obsolete')
885 885 others = mutable - obsolete
886 886 unstable = set()
887 887 for r in sorted(others):
888 888 # A rev is unstable if one of its parent is obsolete or unstable
889 889 # this works since we traverse following growing rev order
890 890 for p in pfunc(r):
891 891 if p in obsolete or p in unstable:
892 892 unstable.add(r)
893 893 break
894 894 return unstable
895 895
896 896 @cachefor('suspended')
897 897 def _computesuspendedset(repo):
898 898 """the set of obsolete parents with non obsolete descendants"""
899 899 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
900 900 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
901 901
902 902 @cachefor('extinct')
903 903 def _computeextinctset(repo):
904 904 """the set of obsolete parents without non obsolete descendants"""
905 905 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
906 906
907 907 @cachefor('phasedivergent')
908 908 def _computephasedivergentset(repo):
909 909 """the set of revs trying to obsolete public revisions"""
910 910 bumped = set()
911 911 # util function (avoid attribute lookup in the loop)
912 912 phase = repo._phasecache.phase # would be faster to grab the full list
913 913 public = phases.public
914 914 cl = repo.changelog
915 915 torev = cl.nodemap.get
916 916 tonode = cl.node
917 917 for rev in repo.revs('(not public()) and (not obsolete())'):
918 918 # We only evaluate mutable, non-obsolete revision
919 919 node = tonode(rev)
920 920 # (future) A cache of predecessors may worth if split is very common
921 921 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
922 922 ignoreflags=bumpedfix):
923 923 prev = torev(pnode) # unfiltered! but so is phasecache
924 924 if (prev is not None) and (phase(repo, prev) <= public):
925 925 # we have a public predecessor
926 926 bumped.add(rev)
927 927 break # Next draft!
928 928 return bumped
929 929
930 930 @cachefor('contentdivergent')
931 931 def _computecontentdivergentset(repo):
932 932 """the set of rev that compete to be the final successors of some revision.
933 933 """
934 934 divergent = set()
935 935 obsstore = repo.obsstore
936 936 newermap = {}
937 937 tonode = repo.changelog.node
938 938 for rev in repo.revs('(not public()) - obsolete()'):
939 939 node = tonode(rev)
940 940 mark = obsstore.predecessors.get(node, ())
941 941 toprocess = set(mark)
942 942 seen = set()
943 943 while toprocess:
944 944 prec = toprocess.pop()[0]
945 945 if prec in seen:
946 946 continue # emergency cycle hanging prevention
947 947 seen.add(prec)
948 948 if prec not in newermap:
949 949 obsutil.successorssets(repo, prec, cache=newermap)
950 950 newer = [n for n in newermap[prec] if n]
951 951 if len(newer) > 1:
952 952 divergent.add(rev)
953 953 break
954 954 toprocess.update(obsstore.predecessors.get(prec, ()))
955 955 return divergent
956 956
957 957
958 958 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
959 959 operation=None):
960 960 """Add obsolete markers between changesets in a repo
961 961
962 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
962 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
963 963 tuple. `old` and `news` are changectx. metadata is an optional dictionary
964 964 containing metadata for this marker only. It is merged with the global
965 965 metadata specified through the `metadata` argument of this function.
966 966 Any string values in metadata must be UTF-8 bytes.
967 967
968 968 Trying to obsolete a public changeset will raise an exception.
969 969
970 970 Current user and date are used except if specified otherwise in the
971 971 metadata attribute.
972 972
973 973 This function operates within a transaction of its own, but does
974 974 not take any lock on the repo.
975 975 """
976 976 # prepare metadata
977 977 if metadata is None:
978 978 metadata = {}
979 979 if 'user' not in metadata:
980 980 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
981 981 metadata['user'] = encoding.fromlocal(luser)
982 982
983 983 # Operation metadata handling
984 984 useoperation = repo.ui.configbool('experimental',
985 985 'evolution.track-operation')
986 986 if useoperation and operation:
987 987 metadata['operation'] = operation
988 988
989 989 # Effect flag metadata handling
990 990 saveeffectflag = repo.ui.configbool('experimental',
991 991 'evolution.effect-flags')
992 992
993 993 with repo.transaction('add-obsolescence-marker') as tr:
994 994 markerargs = []
995 995 for rel in relations:
996 prec = rel[0]
997 if True:
996 predecessors = rel[0]
997 if not isinstance(predecessors, tuple):
998 # preserve compat with old API until all caller are migrated
999 predecessors = (predecessors,)
1000 if 1 < len(predecessors) and len(rel[1]) != 1:
1001 msg = 'Fold markers can only have 1 successors, not %d'
1002 raise error.ProgrammingError(msg % len(rel[1]))
1003 for prec in predecessors:
998 1004 sucs = rel[1]
999 1005 localmetadata = metadata.copy()
1000 1006 if 2 < len(rel):
1001 1007 localmetadata.update(rel[2])
1002 1008
1003 1009 if not prec.mutable():
1004 1010 raise error.Abort(_("cannot obsolete public changeset: %s")
1005 1011 % prec,
1006 1012 hint="see 'hg help phases' for details")
1007 1013 nprec = prec.node()
1008 1014 nsucs = tuple(s.node() for s in sucs)
1009 1015 npare = None
1010 1016 if not nsucs:
1011 1017 npare = tuple(p.node() for p in prec.parents())
1012 1018 if nprec in nsucs:
1013 1019 raise error.Abort(_("changeset %s cannot obsolete itself")
1014 1020 % prec)
1015 1021
1016 1022 # Effect flag can be different by relation
1017 1023 if saveeffectflag:
1018 1024 # The effect flag is saved in a versioned field name for
1019 1025 # future evolution
1020 1026 effectflag = obsutil.geteffectflag(prec, sucs)
1021 1027 localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
1022 1028
1023 1029 # Creating the marker causes the hidden cache to become
1024 1030 # invalid, which causes recomputation when we ask for
1025 1031 # prec.parents() above. Resulting in n^2 behavior. So let's
1026 1032 # prepare all of the args first, then create the markers.
1027 1033 markerargs.append((nprec, nsucs, npare, localmetadata))
1028 1034
1029 1035 for args in markerargs:
1030 1036 nprec, nsucs, npare, localmetadata = args
1031 1037 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1032 1038 date=date, metadata=localmetadata,
1033 1039 ui=repo.ui)
1034 1040 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now