##// END OF EJS Templates
obsolete: remove two unused constants...
Augie Fackler -
r50071:3cd3aaba default
parent child Browse files
Show More
@@ -1,1150 +1,1148 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70
71 71 import errno
72 72 import struct
73 73
74 74 from .i18n import _
75 75 from .pycompat import getattr
76 76 from .node import (
77 77 bin,
78 78 hex,
79 79 )
80 80 from . import (
81 81 encoding,
82 82 error,
83 83 obsutil,
84 84 phases,
85 85 policy,
86 86 pycompat,
87 87 util,
88 88 )
89 89 from .utils import (
90 90 dateutil,
91 91 hashutil,
92 92 )
93 93
94 94 parsers = policy.importmod('parsers')
95 95
96 96 _pack = struct.pack
97 97 _unpack = struct.unpack
98 98 _calcsize = struct.calcsize
99 99 propertycache = util.propertycache
100 100
101 101 # Options for obsolescence
102 102 createmarkersopt = b'createmarkers'
103 103 allowunstableopt = b'allowunstable'
104 104 allowdivergenceopt = b'allowdivergence'
105 105 exchangeopt = b'exchange'
106 106
107 107
108 108 def _getoptionvalue(repo, option):
109 109 """Returns True if the given repository has the given obsolete option
110 110 enabled.
111 111 """
112 112 configkey = b'evolution.%s' % option
113 113 newconfig = repo.ui.configbool(b'experimental', configkey)
114 114
115 115 # Return the value only if defined
116 116 if newconfig is not None:
117 117 return newconfig
118 118
119 119 # Fallback on generic option
120 120 try:
121 121 return repo.ui.configbool(b'experimental', b'evolution')
122 122 except (error.ConfigError, AttributeError):
123 123 # Fallback on old-fashion config
124 124 # inconsistent config: experimental.evolution
125 125 result = set(repo.ui.configlist(b'experimental', b'evolution'))
126 126
127 127 if b'all' in result:
128 128 return True
129 129
130 130 # Temporary hack for next check
131 131 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
132 132 if newconfig:
133 133 result.add(b'createmarkers')
134 134
135 135 return option in result
136 136
137 137
138 138 def getoptions(repo):
139 139 """Returns dicts showing state of obsolescence features."""
140 140
141 141 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
142 142 if createmarkersvalue:
143 143 unstablevalue = _getoptionvalue(repo, allowunstableopt)
144 144 divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
145 145 exchangevalue = _getoptionvalue(repo, exchangeopt)
146 146 else:
147 147 # if we cannot create obsolescence markers, we shouldn't exchange them
148 148 # or perform operations that lead to instability or divergence
149 149 unstablevalue = False
150 150 divergencevalue = False
151 151 exchangevalue = False
152 152
153 153 return {
154 154 createmarkersopt: createmarkersvalue,
155 155 allowunstableopt: unstablevalue,
156 156 allowdivergenceopt: divergencevalue,
157 157 exchangeopt: exchangevalue,
158 158 }
159 159
160 160
161 161 def isenabled(repo, option):
162 162 """Returns True if the given repository has the given obsolete option
163 163 enabled.
164 164 """
165 165 return getoptions(repo)[option]
166 166
167 167
168 168 # Creating aliases for marker flags because evolve extension looks for
169 169 # bumpedfix in obsolete.py
170 170 bumpedfix = obsutil.bumpedfix
171 171 usingsha256 = obsutil.usingsha256
172 172
173 173 ## Parsing and writing of version "0"
174 174 #
175 175 # The header is followed by the markers. Each marker is made of:
176 176 #
177 177 # - 1 uint8 : number of new changesets "N", can be zero.
178 178 #
179 179 # - 1 uint32: metadata size "M" in bytes.
180 180 #
181 181 # - 1 byte: a bit field. It is reserved for flags used in common
182 182 # obsolete marker operations, to avoid repeated decoding of metadata
183 183 # entries.
184 184 #
185 185 # - 20 bytes: obsoleted changeset identifier.
186 186 #
187 187 # - N*20 bytes: new changesets identifiers.
188 188 #
189 189 # - M bytes: metadata as a sequence of nul-terminated strings. Each
190 190 # string contains a key and a value, separated by a colon ':', without
191 191 # additional encoding. Keys cannot contain '\0' or ':' and values
192 192 # cannot contain '\0'.
193 193 _fm0version = 0
194 194 _fm0fixed = b'>BIB20s'
195 195 _fm0node = b'20s'
196 196 _fm0fsize = _calcsize(_fm0fixed)
197 197 _fm0fnodesize = _calcsize(_fm0node)
198 198
199 199
200 200 def _fm0readmarkers(data, off, stop):
201 201 # Loop on markers
202 202 while off < stop:
203 203 # read fixed part
204 204 cur = data[off : off + _fm0fsize]
205 205 off += _fm0fsize
206 206 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
207 207 # read replacement
208 208 sucs = ()
209 209 if numsuc:
210 210 s = _fm0fnodesize * numsuc
211 211 cur = data[off : off + s]
212 212 sucs = _unpack(_fm0node * numsuc, cur)
213 213 off += s
214 214 # read metadata
215 215 # (metadata will be decoded on demand)
216 216 metadata = data[off : off + mdsize]
217 217 if len(metadata) != mdsize:
218 218 raise error.Abort(
219 219 _(
220 220 b'parsing obsolete marker: metadata is too '
221 221 b'short, %d bytes expected, got %d'
222 222 )
223 223 % (mdsize, len(metadata))
224 224 )
225 225 off += mdsize
226 226 metadata = _fm0decodemeta(metadata)
227 227 try:
228 228 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
229 229 date = float(when), int(offset)
230 230 except ValueError:
231 231 date = (0.0, 0)
232 232 parents = None
233 233 if b'p2' in metadata:
234 234 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
235 235 elif b'p1' in metadata:
236 236 parents = (metadata.pop(b'p1', None),)
237 237 elif b'p0' in metadata:
238 238 parents = ()
239 239 if parents is not None:
240 240 try:
241 241 parents = tuple(bin(p) for p in parents)
242 242 # if parent content is not a nodeid, drop the data
243 243 for p in parents:
244 244 if len(p) != 20:
245 245 parents = None
246 246 break
247 247 except TypeError:
248 248 # if content cannot be translated to nodeid drop the data.
249 249 parents = None
250 250
251 251 metadata = tuple(sorted(metadata.items()))
252 252
253 253 yield (pre, sucs, flags, metadata, date, parents)
254 254
255 255
256 256 def _fm0encodeonemarker(marker):
257 257 pre, sucs, flags, metadata, date, parents = marker
258 258 if flags & usingsha256:
259 259 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
260 260 metadata = dict(metadata)
261 261 time, tz = date
262 262 metadata[b'date'] = b'%r %i' % (time, tz)
263 263 if parents is not None:
264 264 if not parents:
265 265 # mark that we explicitly recorded no parents
266 266 metadata[b'p0'] = b''
267 267 for i, p in enumerate(parents, 1):
268 268 metadata[b'p%i' % i] = hex(p)
269 269 metadata = _fm0encodemeta(metadata)
270 270 numsuc = len(sucs)
271 271 format = _fm0fixed + (_fm0node * numsuc)
272 272 data = [numsuc, len(metadata), flags, pre]
273 273 data.extend(sucs)
274 274 return _pack(format, *data) + metadata
275 275
276 276
277 277 def _fm0encodemeta(meta):
278 278 """Return encoded metadata string to string mapping.
279 279
280 280 Assume no ':' in key and no '\0' in both key and value."""
281 281 for key, value in meta.items():
282 282 if b':' in key or b'\0' in key:
283 283 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
284 284 if b'\0' in value:
285 285 raise ValueError(b"':' is forbidden in metadata value'")
286 286 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
287 287
288 288
289 289 def _fm0decodemeta(data):
290 290 """Return string to string dictionary from encoded version."""
291 291 d = {}
292 292 for l in data.split(b'\0'):
293 293 if l:
294 294 key, value = l.split(b':', 1)
295 295 d[key] = value
296 296 return d
297 297
298 298
299 299 ## Parsing and writing of version "1"
300 300 #
301 301 # The header is followed by the markers. Each marker is made of:
302 302 #
303 303 # - uint32: total size of the marker (including this field)
304 304 #
305 305 # - float64: date in seconds since epoch
306 306 #
307 307 # - int16: timezone offset in minutes
308 308 #
309 309 # - uint16: a bit field. It is reserved for flags used in common
310 310 # obsolete marker operations, to avoid repeated decoding of metadata
311 311 # entries.
312 312 #
313 313 # - uint8: number of successors "N", can be zero.
314 314 #
315 315 # - uint8: number of parents "P", can be zero.
316 316 #
317 317 # 0: parents data stored but no parent,
318 318 # 1: one parent stored,
319 319 # 2: two parents stored,
320 320 # 3: no parent data stored
321 321 #
322 322 # - uint8: number of metadata entries M
323 323 #
324 324 # - 20 or 32 bytes: predecessor changeset identifier.
325 325 #
326 326 # - N*(20 or 32) bytes: successors changesets identifiers.
327 327 #
328 328 # - P*(20 or 32) bytes: parents of the predecessors changesets.
329 329 #
330 330 # - M*(uint8, uint8): size of all metadata entries (key and value)
331 331 #
332 332 # - remaining bytes: the metadata, each (key, value) pair after the other.
333 333 _fm1version = 1
334 334 _fm1fixed = b'>IdhHBBB'
335 335 _fm1nodesha1 = b'20s'
336 336 _fm1nodesha256 = b'32s'
337 337 _fm1nodesha1size = _calcsize(_fm1nodesha1)
338 338 _fm1nodesha256size = _calcsize(_fm1nodesha256)
339 339 _fm1fsize = _calcsize(_fm1fixed)
340 340 _fm1parentnone = 3
341 _fm1parentshift = 14
342 _fm1parentmask = _fm1parentnone << _fm1parentshift
343 341 _fm1metapair = b'BB'
344 342 _fm1metapairsize = _calcsize(_fm1metapair)
345 343
346 344
347 345 def _fm1purereadmarkers(data, off, stop):
348 346 # make some global constants local for performance
349 347 noneflag = _fm1parentnone
350 348 sha2flag = usingsha256
351 349 sha1size = _fm1nodesha1size
352 350 sha2size = _fm1nodesha256size
353 351 sha1fmt = _fm1nodesha1
354 352 sha2fmt = _fm1nodesha256
355 353 metasize = _fm1metapairsize
356 354 metafmt = _fm1metapair
357 355 fsize = _fm1fsize
358 356 unpack = _unpack
359 357
360 358 # Loop on markers
361 359 ufixed = struct.Struct(_fm1fixed).unpack
362 360
363 361 while off < stop:
364 362 # read fixed part
365 363 o1 = off + fsize
366 364 t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1])
367 365
368 366 if flags & sha2flag:
369 367 nodefmt = sha2fmt
370 368 nodesize = sha2size
371 369 else:
372 370 nodefmt = sha1fmt
373 371 nodesize = sha1size
374 372
375 373 (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize])
376 374 o1 += nodesize
377 375
378 376 # read 0 or more successors
379 377 if numsuc == 1:
380 378 o2 = o1 + nodesize
381 379 sucs = (data[o1:o2],)
382 380 else:
383 381 o2 = o1 + nodesize * numsuc
384 382 sucs = unpack(nodefmt * numsuc, data[o1:o2])
385 383
386 384 # read parents
387 385 if numpar == noneflag:
388 386 o3 = o2
389 387 parents = None
390 388 elif numpar == 1:
391 389 o3 = o2 + nodesize
392 390 parents = (data[o2:o3],)
393 391 else:
394 392 o3 = o2 + nodesize * numpar
395 393 parents = unpack(nodefmt * numpar, data[o2:o3])
396 394
397 395 # read metadata
398 396 off = o3 + metasize * nummeta
399 397 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
400 398 metadata = []
401 399 for idx in pycompat.xrange(0, len(metapairsize), 2):
402 400 o1 = off + metapairsize[idx]
403 401 o2 = o1 + metapairsize[idx + 1]
404 402 metadata.append((data[off:o1], data[o1:o2]))
405 403 off = o2
406 404
407 405 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
408 406
409 407
410 408 def _fm1encodeonemarker(marker):
411 409 pre, sucs, flags, metadata, date, parents = marker
412 410 # determine node size
413 411 _fm1node = _fm1nodesha1
414 412 if flags & usingsha256:
415 413 _fm1node = _fm1nodesha256
416 414 numsuc = len(sucs)
417 415 numextranodes = 1 + numsuc
418 416 if parents is None:
419 417 numpar = _fm1parentnone
420 418 else:
421 419 numpar = len(parents)
422 420 numextranodes += numpar
423 421 formatnodes = _fm1node * numextranodes
424 422 formatmeta = _fm1metapair * len(metadata)
425 423 format = _fm1fixed + formatnodes + formatmeta
426 424 # tz is stored in minutes so we divide by 60
427 425 tz = date[1] // 60
428 426 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
429 427 data.extend(sucs)
430 428 if parents is not None:
431 429 data.extend(parents)
432 430 totalsize = _calcsize(format)
433 431 for key, value in metadata:
434 432 lk = len(key)
435 433 lv = len(value)
436 434 if lk > 255:
437 435 msg = (
438 436 b'obsstore metadata key cannot be longer than 255 bytes'
439 437 b' (key "%s" is %u bytes)'
440 438 ) % (key, lk)
441 439 raise error.ProgrammingError(msg)
442 440 if lv > 255:
443 441 msg = (
444 442 b'obsstore metadata value cannot be longer than 255 bytes'
445 443 b' (value "%s" for key "%s" is %u bytes)'
446 444 ) % (value, key, lv)
447 445 raise error.ProgrammingError(msg)
448 446 data.append(lk)
449 447 data.append(lv)
450 448 totalsize += lk + lv
451 449 data[0] = totalsize
452 450 data = [_pack(format, *data)]
453 451 for key, value in metadata:
454 452 data.append(key)
455 453 data.append(value)
456 454 return b''.join(data)
457 455
458 456
459 457 def _fm1readmarkers(data, off, stop):
460 458 native = getattr(parsers, 'fm1readmarkers', None)
461 459 if not native:
462 460 return _fm1purereadmarkers(data, off, stop)
463 461 return native(data, off, stop)
464 462
465 463
466 464 # mapping to read/write various marker formats
467 465 # <version> -> (decoder, encoder)
468 466 formats = {
469 467 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
470 468 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
471 469 }
472 470
473 471
474 472 def _readmarkerversion(data):
475 473 return _unpack(b'>B', data[0:1])[0]
476 474
477 475
478 476 @util.nogc
479 477 def _readmarkers(data, off=None, stop=None):
480 478 """Read and enumerate markers from raw data"""
481 479 diskversion = _readmarkerversion(data)
482 480 if not off:
483 481 off = 1 # skip 1 byte version number
484 482 if stop is None:
485 483 stop = len(data)
486 484 if diskversion not in formats:
487 485 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
488 486 raise error.UnknownVersion(msg, version=diskversion)
489 487 return diskversion, formats[diskversion][0](data, off, stop)
490 488
491 489
492 490 def encodeheader(version=_fm0version):
493 491 return _pack(b'>B', version)
494 492
495 493
496 494 def encodemarkers(markers, addheader=False, version=_fm0version):
497 495 # Kept separate from flushmarkers(), it will be reused for
498 496 # markers exchange.
499 497 encodeone = formats[version][1]
500 498 if addheader:
501 499 yield encodeheader(version)
502 500 for marker in markers:
503 501 yield encodeone(marker)
504 502
505 503
506 504 @util.nogc
507 505 def _addsuccessors(successors, markers):
508 506 for mark in markers:
509 507 successors.setdefault(mark[0], set()).add(mark)
510 508
511 509
512 510 @util.nogc
513 511 def _addpredecessors(predecessors, markers):
514 512 for mark in markers:
515 513 for suc in mark[1]:
516 514 predecessors.setdefault(suc, set()).add(mark)
517 515
518 516
519 517 @util.nogc
520 518 def _addchildren(children, markers):
521 519 for mark in markers:
522 520 parents = mark[5]
523 521 if parents is not None:
524 522 for p in parents:
525 523 children.setdefault(p, set()).add(mark)
526 524
527 525
528 526 def _checkinvalidmarkers(repo, markers):
529 527 """search for marker with invalid data and raise error if needed
530 528
531 529 Exist as a separated function to allow the evolve extension for a more
532 530 subtle handling.
533 531 """
534 532 for mark in markers:
535 533 if repo.nullid in mark[1]:
536 534 raise error.Abort(
537 535 _(
538 536 b'bad obsolescence marker detected: '
539 537 b'invalid successors nullid'
540 538 )
541 539 )
542 540
543 541
544 542 class obsstore:
545 543 """Store obsolete markers
546 544
547 545 Markers can be accessed with two mappings:
548 546 - predecessors[x] -> set(markers on predecessors edges of x)
549 547 - successors[x] -> set(markers on successors edges of x)
550 548 - children[x] -> set(markers on predecessors edges of children(x)
551 549 """
552 550
553 551 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
554 552 # prec: nodeid, predecessors changesets
555 553 # succs: tuple of nodeid, successor changesets (0-N length)
556 554 # flag: integer, flag field carrying modifier for the markers (see doc)
557 555 # meta: binary blob in UTF-8, encoded metadata dictionary
558 556 # date: (float, int) tuple, date of marker creation
559 557 # parents: (tuple of nodeid) or None, parents of predecessors
560 558 # None is used when no data has been recorded
561 559
562 560 def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
563 561 # caches for various obsolescence related cache
564 562 self.caches = {}
565 563 self.svfs = svfs
566 564 self.repo = repo
567 565 self._defaultformat = defaultformat
568 566 self._readonly = readonly
569 567
570 568 def __iter__(self):
571 569 return iter(self._all)
572 570
573 571 def __len__(self):
574 572 return len(self._all)
575 573
576 574 def __nonzero__(self):
577 575 from . import statichttprepo
578 576
579 577 if isinstance(self.repo, statichttprepo.statichttprepository):
580 578 # If repo is accessed via static HTTP, then we can't use os.stat()
581 579 # to just peek at the file size.
582 580 return len(self._data) > 1
583 581 if not self._cached('_all'):
584 582 try:
585 583 return self.svfs.stat(b'obsstore').st_size > 1
586 584 except OSError as inst:
587 585 if inst.errno != errno.ENOENT:
588 586 raise
589 587 # just build an empty _all list if no obsstore exists, which
590 588 # avoids further stat() syscalls
591 589 return bool(self._all)
592 590
593 591 __bool__ = __nonzero__
594 592
595 593 @property
596 594 def readonly(self):
597 595 """True if marker creation is disabled
598 596
599 597 Remove me in the future when obsolete marker is always on."""
600 598 return self._readonly
601 599
602 600 def create(
603 601 self,
604 602 transaction,
605 603 prec,
606 604 succs=(),
607 605 flag=0,
608 606 parents=None,
609 607 date=None,
610 608 metadata=None,
611 609 ui=None,
612 610 ):
613 611 """obsolete: add a new obsolete marker
614 612
615 613 * ensuring it is hashable
616 614 * check mandatory metadata
617 615 * encode metadata
618 616
619 617 If you are a human writing code creating marker you want to use the
620 618 `createmarkers` function in this module instead.
621 619
622 620 return True if a new marker have been added, False if the markers
623 621 already existed (no op).
624 622 """
625 623 flag = int(flag)
626 624 if metadata is None:
627 625 metadata = {}
628 626 if date is None:
629 627 if b'date' in metadata:
630 628 # as a courtesy for out-of-tree extensions
631 629 date = dateutil.parsedate(metadata.pop(b'date'))
632 630 elif ui is not None:
633 631 date = ui.configdate(b'devel', b'default-date')
634 632 if date is None:
635 633 date = dateutil.makedate()
636 634 else:
637 635 date = dateutil.makedate()
638 636 if flag & usingsha256:
639 637 if len(prec) != 32:
640 638 raise ValueError(prec)
641 639 for succ in succs:
642 640 if len(succ) != 32:
643 641 raise ValueError(succ)
644 642 else:
645 643 if len(prec) != 20:
646 644 raise ValueError(prec)
647 645 for succ in succs:
648 646 if len(succ) != 20:
649 647 raise ValueError(succ)
650 648 if prec in succs:
651 649 raise ValueError(
652 650 'in-marker cycle with %s' % pycompat.sysstr(hex(prec))
653 651 )
654 652
655 653 metadata = tuple(sorted(metadata.items()))
656 654 for k, v in metadata:
657 655 try:
658 656 # might be better to reject non-ASCII keys
659 657 k.decode('utf-8')
660 658 v.decode('utf-8')
661 659 except UnicodeDecodeError:
662 660 raise error.ProgrammingError(
663 661 b'obsstore metadata must be valid UTF-8 sequence '
664 662 b'(key = %r, value = %r)'
665 663 % (pycompat.bytestr(k), pycompat.bytestr(v))
666 664 )
667 665
668 666 marker = (bytes(prec), tuple(succs), flag, metadata, date, parents)
669 667 return bool(self.add(transaction, [marker]))
670 668
671 669 def add(self, transaction, markers):
672 670 """Add new markers to the store
673 671
674 672 Take care of filtering duplicate.
675 673 Return the number of new marker."""
676 674 if self._readonly:
677 675 raise error.Abort(
678 676 _(b'creating obsolete markers is not enabled on this repo')
679 677 )
680 678 known = set()
681 679 getsuccessors = self.successors.get
682 680 new = []
683 681 for m in markers:
684 682 if m not in getsuccessors(m[0], ()) and m not in known:
685 683 known.add(m)
686 684 new.append(m)
687 685 if new:
688 686 f = self.svfs(b'obsstore', b'ab')
689 687 try:
690 688 offset = f.tell()
691 689 transaction.add(b'obsstore', offset)
692 690 # offset == 0: new file - add the version header
693 691 data = b''.join(encodemarkers(new, offset == 0, self._version))
694 692 f.write(data)
695 693 finally:
696 694 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
697 695 # call 'filecacheentry.refresh()' here
698 696 f.close()
699 697 addedmarkers = transaction.changes.get(b'obsmarkers')
700 698 if addedmarkers is not None:
701 699 addedmarkers.update(new)
702 700 self._addmarkers(new, data)
703 701 # new marker *may* have changed several set. invalidate the cache.
704 702 self.caches.clear()
705 703 # records the number of new markers for the transaction hooks
706 704 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
707 705 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
708 706 return len(new)
709 707
710 708 def mergemarkers(self, transaction, data):
711 709 """merge a binary stream of markers inside the obsstore
712 710
713 711 Returns the number of new markers added."""
714 712 version, markers = _readmarkers(data)
715 713 return self.add(transaction, markers)
716 714
717 715 @propertycache
718 716 def _data(self):
719 717 return self.svfs.tryread(b'obsstore')
720 718
721 719 @propertycache
722 720 def _version(self):
723 721 if len(self._data) >= 1:
724 722 return _readmarkerversion(self._data)
725 723 else:
726 724 return self._defaultformat
727 725
728 726 @propertycache
729 727 def _all(self):
730 728 data = self._data
731 729 if not data:
732 730 return []
733 731 self._version, markers = _readmarkers(data)
734 732 markers = list(markers)
735 733 _checkinvalidmarkers(self.repo, markers)
736 734 return markers
737 735
738 736 @propertycache
739 737 def successors(self):
740 738 successors = {}
741 739 _addsuccessors(successors, self._all)
742 740 return successors
743 741
744 742 @propertycache
745 743 def predecessors(self):
746 744 predecessors = {}
747 745 _addpredecessors(predecessors, self._all)
748 746 return predecessors
749 747
750 748 @propertycache
751 749 def children(self):
752 750 children = {}
753 751 _addchildren(children, self._all)
754 752 return children
755 753
756 754 def _cached(self, attr):
757 755 return attr in self.__dict__
758 756
759 757 def _addmarkers(self, markers, rawdata):
760 758 markers = list(markers) # to allow repeated iteration
761 759 self._data = self._data + rawdata
762 760 self._all.extend(markers)
763 761 if self._cached('successors'):
764 762 _addsuccessors(self.successors, markers)
765 763 if self._cached('predecessors'):
766 764 _addpredecessors(self.predecessors, markers)
767 765 if self._cached('children'):
768 766 _addchildren(self.children, markers)
769 767 _checkinvalidmarkers(self.repo, markers)
770 768
771 769 def relevantmarkers(self, nodes):
772 770 """return a set of all obsolescence markers relevant to a set of nodes.
773 771
774 772 "relevant" to a set of nodes mean:
775 773
776 774 - marker that use this changeset as successor
777 775 - prune marker of direct children on this changeset
778 776 - recursive application of the two rules on predecessors of these
779 777 markers
780 778
781 779 It is a set so you cannot rely on order."""
782 780
783 781 pendingnodes = set(nodes)
784 782 seenmarkers = set()
785 783 seennodes = set(pendingnodes)
786 784 precursorsmarkers = self.predecessors
787 785 succsmarkers = self.successors
788 786 children = self.children
789 787 while pendingnodes:
790 788 direct = set()
791 789 for current in pendingnodes:
792 790 direct.update(precursorsmarkers.get(current, ()))
793 791 pruned = [m for m in children.get(current, ()) if not m[1]]
794 792 direct.update(pruned)
795 793 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
796 794 direct.update(pruned)
797 795 direct -= seenmarkers
798 796 pendingnodes = {m[0] for m in direct}
799 797 seenmarkers |= direct
800 798 pendingnodes -= seennodes
801 799 seennodes |= pendingnodes
802 800 return seenmarkers
803 801
804 802
805 803 def makestore(ui, repo):
806 804 """Create an obsstore instance from a repo."""
807 805 # read default format for new obsstore.
808 806 # developer config: format.obsstore-version
809 807 defaultformat = ui.configint(b'format', b'obsstore-version')
810 808 # rely on obsstore class default when possible.
811 809 kwargs = {}
812 810 if defaultformat is not None:
813 811 kwargs['defaultformat'] = defaultformat
814 812 readonly = not isenabled(repo, createmarkersopt)
815 813 store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
816 814 if store and readonly:
817 815 ui.warn(
818 816 _(b'obsolete feature not enabled but %i markers found!\n')
819 817 % len(list(store))
820 818 )
821 819 return store
822 820
823 821
824 822 def commonversion(versions):
825 823 """Return the newest version listed in both versions and our local formats.
826 824
827 825 Returns None if no common version exists.
828 826 """
829 827 versions.sort(reverse=True)
830 828 # search for highest version known on both side
831 829 for v in versions:
832 830 if v in formats:
833 831 return v
834 832 return None
835 833
836 834
837 835 # arbitrary picked to fit into 8K limit from HTTP server
838 836 # you have to take in account:
839 837 # - the version header
840 838 # - the base85 encoding
841 839 _maxpayload = 5300
842 840
843 841
844 842 def _pushkeyescape(markers):
845 843 """encode markers into a dict suitable for pushkey exchange
846 844
847 845 - binary data is base85 encoded
848 846 - split in chunks smaller than 5300 bytes"""
849 847 keys = {}
850 848 parts = []
851 849 currentlen = _maxpayload * 2 # ensure we create a new part
852 850 for marker in markers:
853 851 nextdata = _fm0encodeonemarker(marker)
854 852 if len(nextdata) + currentlen > _maxpayload:
855 853 currentpart = []
856 854 currentlen = 0
857 855 parts.append(currentpart)
858 856 currentpart.append(nextdata)
859 857 currentlen += len(nextdata)
860 858 for idx, part in enumerate(reversed(parts)):
861 859 data = b''.join([_pack(b'>B', _fm0version)] + part)
862 860 keys[b'dump%i' % idx] = util.b85encode(data)
863 861 return keys
864 862
865 863
866 864 def listmarkers(repo):
867 865 """List markers over pushkey"""
868 866 if not repo.obsstore:
869 867 return {}
870 868 return _pushkeyescape(sorted(repo.obsstore))
871 869
872 870
873 871 def pushmarker(repo, key, old, new):
874 872 """Push markers over pushkey"""
875 873 if not key.startswith(b'dump'):
876 874 repo.ui.warn(_(b'unknown key: %r') % key)
877 875 return False
878 876 if old:
879 877 repo.ui.warn(_(b'unexpected old value for %r') % key)
880 878 return False
881 879 data = util.b85decode(new)
882 880 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
883 881 repo.obsstore.mergemarkers(tr, data)
884 882 repo.invalidatevolatilesets()
885 883 return True
886 884
887 885
888 886 # mapping of 'set-name' -> <function to compute this set>
889 887 cachefuncs = {}
890 888
891 889
892 890 def cachefor(name):
893 891 """Decorator to register a function as computing the cache for a set"""
894 892
895 893 def decorator(func):
896 894 if name in cachefuncs:
897 895 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
898 896 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
899 897 cachefuncs[name] = func
900 898 return func
901 899
902 900 return decorator
903 901
904 902
905 903 def getrevs(repo, name):
906 904 """Return the set of revision that belong to the <name> set
907 905
908 906 Such access may compute the set and cache it for future use"""
909 907 repo = repo.unfiltered()
910 908 with util.timedcm('getrevs %s', name):
911 909 if not repo.obsstore:
912 910 return frozenset()
913 911 if name not in repo.obsstore.caches:
914 912 repo.obsstore.caches[name] = cachefuncs[name](repo)
915 913 return repo.obsstore.caches[name]
916 914
917 915
918 916 # To be simple we need to invalidate obsolescence cache when:
919 917 #
920 918 # - new changeset is added:
921 919 # - public phase is changed
922 920 # - obsolescence marker are added
923 921 # - strip is used a repo
924 922 def clearobscaches(repo):
925 923 """Remove all obsolescence related cache from a repo
926 924
927 925 This remove all cache in obsstore is the obsstore already exist on the
928 926 repo.
929 927
930 928 (We could be smarter here given the exact event that trigger the cache
931 929 clearing)"""
932 930 # only clear cache is there is obsstore data in this repo
933 931 if b'obsstore' in repo._filecache:
934 932 repo.obsstore.caches.clear()
935 933
936 934
937 935 def _mutablerevs(repo):
938 936 """the set of mutable revision in the repository"""
939 937 return repo._phasecache.getrevset(repo, phases.mutablephases)
940 938
941 939
942 940 @cachefor(b'obsolete')
943 941 def _computeobsoleteset(repo):
944 942 """the set of obsolete revisions"""
945 943 getnode = repo.changelog.node
946 944 notpublic = _mutablerevs(repo)
947 945 isobs = repo.obsstore.successors.__contains__
948 946 return frozenset(r for r in notpublic if isobs(getnode(r)))
949 947
950 948
951 949 @cachefor(b'orphan')
952 950 def _computeorphanset(repo):
953 951 """the set of non obsolete revisions with obsolete parents"""
954 952 pfunc = repo.changelog.parentrevs
955 953 mutable = _mutablerevs(repo)
956 954 obsolete = getrevs(repo, b'obsolete')
957 955 others = mutable - obsolete
958 956 unstable = set()
959 957 for r in sorted(others):
960 958 # A rev is unstable if one of its parent is obsolete or unstable
961 959 # this works since we traverse following growing rev order
962 960 for p in pfunc(r):
963 961 if p in obsolete or p in unstable:
964 962 unstable.add(r)
965 963 break
966 964 return frozenset(unstable)
967 965
968 966
969 967 @cachefor(b'suspended')
970 968 def _computesuspendedset(repo):
971 969 """the set of obsolete parents with non obsolete descendants"""
972 970 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
973 971 return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
974 972
975 973
976 974 @cachefor(b'extinct')
977 975 def _computeextinctset(repo):
978 976 """the set of obsolete parents without non obsolete descendants"""
979 977 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
980 978
981 979
982 980 @cachefor(b'phasedivergent')
983 981 def _computephasedivergentset(repo):
984 982 """the set of revs trying to obsolete public revisions"""
985 983 bumped = set()
986 984 # util function (avoid attribute lookup in the loop)
987 985 phase = repo._phasecache.phase # would be faster to grab the full list
988 986 public = phases.public
989 987 cl = repo.changelog
990 988 torev = cl.index.get_rev
991 989 tonode = cl.node
992 990 obsstore = repo.obsstore
993 991 for rev in repo.revs(b'(not public()) and (not obsolete())'):
994 992 # We only evaluate mutable, non-obsolete revision
995 993 node = tonode(rev)
996 994 # (future) A cache of predecessors may worth if split is very common
997 995 for pnode in obsutil.allpredecessors(
998 996 obsstore, [node], ignoreflags=bumpedfix
999 997 ):
1000 998 prev = torev(pnode) # unfiltered! but so is phasecache
1001 999 if (prev is not None) and (phase(repo, prev) <= public):
1002 1000 # we have a public predecessor
1003 1001 bumped.add(rev)
1004 1002 break # Next draft!
1005 1003 return frozenset(bumped)
1006 1004
1007 1005
1008 1006 @cachefor(b'contentdivergent')
1009 1007 def _computecontentdivergentset(repo):
1010 1008 """the set of rev that compete to be the final successors of some revision."""
1011 1009 divergent = set()
1012 1010 obsstore = repo.obsstore
1013 1011 newermap = {}
1014 1012 tonode = repo.changelog.node
1015 1013 for rev in repo.revs(b'(not public()) - obsolete()'):
1016 1014 node = tonode(rev)
1017 1015 mark = obsstore.predecessors.get(node, ())
1018 1016 toprocess = set(mark)
1019 1017 seen = set()
1020 1018 while toprocess:
1021 1019 prec = toprocess.pop()[0]
1022 1020 if prec in seen:
1023 1021 continue # emergency cycle hanging prevention
1024 1022 seen.add(prec)
1025 1023 if prec not in newermap:
1026 1024 obsutil.successorssets(repo, prec, cache=newermap)
1027 1025 newer = [n for n in newermap[prec] if n]
1028 1026 if len(newer) > 1:
1029 1027 divergent.add(rev)
1030 1028 break
1031 1029 toprocess.update(obsstore.predecessors.get(prec, ()))
1032 1030 return frozenset(divergent)
1033 1031
1034 1032
1035 1033 def makefoldid(relation, user):
1036 1034
1037 1035 folddigest = hashutil.sha1(user)
1038 1036 for p in relation[0] + relation[1]:
1039 1037 folddigest.update(b'%d' % p.rev())
1040 1038 folddigest.update(p.node())
1041 1039 # Since fold only has to compete against fold for the same successors, it
1042 1040 # seems fine to use a small ID. Smaller ID save space.
1043 1041 return hex(folddigest.digest())[:8]
1044 1042
1045 1043
1046 1044 def createmarkers(
1047 1045 repo, relations, flag=0, date=None, metadata=None, operation=None
1048 1046 ):
1049 1047 """Add obsolete markers between changesets in a repo
1050 1048
1051 1049 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1052 1050 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1053 1051 containing metadata for this marker only. It is merged with the global
1054 1052 metadata specified through the `metadata` argument of this function.
1055 1053 Any string values in metadata must be UTF-8 bytes.
1056 1054
1057 1055 Trying to obsolete a public changeset will raise an exception.
1058 1056
1059 1057 Current user and date are used except if specified otherwise in the
1060 1058 metadata attribute.
1061 1059
1062 1060 This function operates within a transaction of its own, but does
1063 1061 not take any lock on the repo.
1064 1062 """
1065 1063 # prepare metadata
1066 1064 if metadata is None:
1067 1065 metadata = {}
1068 1066 if b'user' not in metadata:
1069 1067 luser = (
1070 1068 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1071 1069 )
1072 1070 metadata[b'user'] = encoding.fromlocal(luser)
1073 1071
1074 1072 # Operation metadata handling
1075 1073 useoperation = repo.ui.configbool(
1076 1074 b'experimental', b'evolution.track-operation'
1077 1075 )
1078 1076 if useoperation and operation:
1079 1077 metadata[b'operation'] = operation
1080 1078
1081 1079 # Effect flag metadata handling
1082 1080 saveeffectflag = repo.ui.configbool(
1083 1081 b'experimental', b'evolution.effect-flags'
1084 1082 )
1085 1083
1086 1084 with repo.transaction(b'add-obsolescence-marker') as tr:
1087 1085 markerargs = []
1088 1086 for rel in relations:
1089 1087 predecessors = rel[0]
1090 1088 if not isinstance(predecessors, tuple):
1091 1089 # preserve compat with old API until all caller are migrated
1092 1090 predecessors = (predecessors,)
1093 1091 if len(predecessors) > 1 and len(rel[1]) != 1:
1094 1092 msg = b'Fold markers can only have 1 successors, not %d'
1095 1093 raise error.ProgrammingError(msg % len(rel[1]))
1096 1094 foldid = None
1097 1095 foldsize = len(predecessors)
1098 1096 if 1 < foldsize:
1099 1097 foldid = makefoldid(rel, metadata[b'user'])
1100 1098 for foldidx, prec in enumerate(predecessors, 1):
1101 1099 sucs = rel[1]
1102 1100 localmetadata = metadata.copy()
1103 1101 if len(rel) > 2:
1104 1102 localmetadata.update(rel[2])
1105 1103 if foldid is not None:
1106 1104 localmetadata[b'fold-id'] = foldid
1107 1105 localmetadata[b'fold-idx'] = b'%d' % foldidx
1108 1106 localmetadata[b'fold-size'] = b'%d' % foldsize
1109 1107
1110 1108 if not prec.mutable():
1111 1109 raise error.Abort(
1112 1110 _(b"cannot obsolete public changeset: %s") % prec,
1113 1111 hint=b"see 'hg help phases' for details",
1114 1112 )
1115 1113 nprec = prec.node()
1116 1114 nsucs = tuple(s.node() for s in sucs)
1117 1115 npare = None
1118 1116 if not nsucs:
1119 1117 npare = tuple(p.node() for p in prec.parents())
1120 1118 if nprec in nsucs:
1121 1119 raise error.Abort(
1122 1120 _(b"changeset %s cannot obsolete itself") % prec
1123 1121 )
1124 1122
1125 1123 # Effect flag can be different by relation
1126 1124 if saveeffectflag:
1127 1125 # The effect flag is saved in a versioned field name for
1128 1126 # future evolution
1129 1127 effectflag = obsutil.geteffectflag(prec, sucs)
1130 1128 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1131 1129
1132 1130 # Creating the marker causes the hidden cache to become
1133 1131 # invalid, which causes recomputation when we ask for
1134 1132 # prec.parents() above. Resulting in n^2 behavior. So let's
1135 1133 # prepare all of the args first, then create the markers.
1136 1134 markerargs.append((nprec, nsucs, npare, localmetadata))
1137 1135
1138 1136 for args in markerargs:
1139 1137 nprec, nsucs, npare, localmetadata = args
1140 1138 repo.obsstore.create(
1141 1139 tr,
1142 1140 nprec,
1143 1141 nsucs,
1144 1142 flag,
1145 1143 parents=npare,
1146 1144 date=date,
1147 1145 metadata=localmetadata,
1148 1146 ui=repo.ui,
1149 1147 )
1150 1148 repo.filteredrevcache.clear()
General Comments 0
You need to be logged in to leave comments. Login now