##// END OF EJS Templates
tracing: add a couple of trace points on obsolete and repoview...
Augie Fackler -
r43534:4353942b default
parent child Browse files
Show More
@@ -1,1143 +1,1144 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import hashlib
74 74 import struct
75 75
76 76 from .i18n import _
77 77 from .pycompat import getattr
78 78 from . import (
79 79 encoding,
80 80 error,
81 81 node,
82 82 obsutil,
83 83 phases,
84 84 policy,
85 85 pycompat,
86 86 util,
87 87 )
88 88 from .utils import dateutil
89 89
90 90 parsers = policy.importmod(r'parsers')
91 91
92 92 _pack = struct.pack
93 93 _unpack = struct.unpack
94 94 _calcsize = struct.calcsize
95 95 propertycache = util.propertycache
96 96
97 97 # Options for obsolescence
98 98 createmarkersopt = b'createmarkers'
99 99 allowunstableopt = b'allowunstable'
100 100 exchangeopt = b'exchange'
101 101
102 102
103 103 def _getoptionvalue(repo, option):
104 104 """Returns True if the given repository has the given obsolete option
105 105 enabled.
106 106 """
107 107 configkey = b'evolution.%s' % option
108 108 newconfig = repo.ui.configbool(b'experimental', configkey)
109 109
110 110 # Return the value only if defined
111 111 if newconfig is not None:
112 112 return newconfig
113 113
114 114 # Fallback on generic option
115 115 try:
116 116 return repo.ui.configbool(b'experimental', b'evolution')
117 117 except (error.ConfigError, AttributeError):
118 118 # Fallback on old-fashion config
119 119 # inconsistent config: experimental.evolution
120 120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
121 121
122 122 if b'all' in result:
123 123 return True
124 124
125 125 # Temporary hack for next check
126 126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
127 127 if newconfig:
128 128 result.add(b'createmarkers')
129 129
130 130 return option in result
131 131
132 132
133 133 def getoptions(repo):
134 134 """Returns dicts showing state of obsolescence features."""
135 135
136 136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
137 137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
138 138 exchangevalue = _getoptionvalue(repo, exchangeopt)
139 139
140 140 # createmarkers must be enabled if other options are enabled
141 141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
142 142 raise error.Abort(
143 143 _(
144 144 b"'createmarkers' obsolete option must be enabled "
145 145 b"if other obsolete options are enabled"
146 146 )
147 147 )
148 148
149 149 return {
150 150 createmarkersopt: createmarkersvalue,
151 151 allowunstableopt: unstablevalue,
152 152 exchangeopt: exchangevalue,
153 153 }
154 154
155 155
156 156 def isenabled(repo, option):
157 157 """Returns True if the given repository has the given obsolete option
158 158 enabled.
159 159 """
160 160 return getoptions(repo)[option]
161 161
162 162
163 163 # Creating aliases for marker flags because evolve extension looks for
164 164 # bumpedfix in obsolete.py
165 165 bumpedfix = obsutil.bumpedfix
166 166 usingsha256 = obsutil.usingsha256
167 167
168 168 ## Parsing and writing of version "0"
169 169 #
170 170 # The header is followed by the markers. Each marker is made of:
171 171 #
172 172 # - 1 uint8 : number of new changesets "N", can be zero.
173 173 #
174 174 # - 1 uint32: metadata size "M" in bytes.
175 175 #
176 176 # - 1 byte: a bit field. It is reserved for flags used in common
177 177 # obsolete marker operations, to avoid repeated decoding of metadata
178 178 # entries.
179 179 #
180 180 # - 20 bytes: obsoleted changeset identifier.
181 181 #
182 182 # - N*20 bytes: new changesets identifiers.
183 183 #
184 184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 185 # string contains a key and a value, separated by a colon ':', without
186 186 # additional encoding. Keys cannot contain '\0' or ':' and values
187 187 # cannot contain '\0'.
188 188 _fm0version = 0
189 189 _fm0fixed = b'>BIB20s'
190 190 _fm0node = b'20s'
191 191 _fm0fsize = _calcsize(_fm0fixed)
192 192 _fm0fnodesize = _calcsize(_fm0node)
193 193
194 194
195 195 def _fm0readmarkers(data, off, stop):
196 196 # Loop on markers
197 197 while off < stop:
198 198 # read fixed part
199 199 cur = data[off : off + _fm0fsize]
200 200 off += _fm0fsize
201 201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
202 202 # read replacement
203 203 sucs = ()
204 204 if numsuc:
205 205 s = _fm0fnodesize * numsuc
206 206 cur = data[off : off + s]
207 207 sucs = _unpack(_fm0node * numsuc, cur)
208 208 off += s
209 209 # read metadata
210 210 # (metadata will be decoded on demand)
211 211 metadata = data[off : off + mdsize]
212 212 if len(metadata) != mdsize:
213 213 raise error.Abort(
214 214 _(
215 215 b'parsing obsolete marker: metadata is too '
216 216 b'short, %d bytes expected, got %d'
217 217 )
218 218 % (mdsize, len(metadata))
219 219 )
220 220 off += mdsize
221 221 metadata = _fm0decodemeta(metadata)
222 222 try:
223 223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
224 224 date = float(when), int(offset)
225 225 except ValueError:
226 226 date = (0.0, 0)
227 227 parents = None
228 228 if b'p2' in metadata:
229 229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
230 230 elif b'p1' in metadata:
231 231 parents = (metadata.pop(b'p1', None),)
232 232 elif b'p0' in metadata:
233 233 parents = ()
234 234 if parents is not None:
235 235 try:
236 236 parents = tuple(node.bin(p) for p in parents)
237 237 # if parent content is not a nodeid, drop the data
238 238 for p in parents:
239 239 if len(p) != 20:
240 240 parents = None
241 241 break
242 242 except TypeError:
243 243 # if content cannot be translated to nodeid drop the data.
244 244 parents = None
245 245
246 246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
247 247
248 248 yield (pre, sucs, flags, metadata, date, parents)
249 249
250 250
251 251 def _fm0encodeonemarker(marker):
252 252 pre, sucs, flags, metadata, date, parents = marker
253 253 if flags & usingsha256:
254 254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
255 255 metadata = dict(metadata)
256 256 time, tz = date
257 257 metadata[b'date'] = b'%r %i' % (time, tz)
258 258 if parents is not None:
259 259 if not parents:
260 260 # mark that we explicitly recorded no parents
261 261 metadata[b'p0'] = b''
262 262 for i, p in enumerate(parents, 1):
263 263 metadata[b'p%i' % i] = node.hex(p)
264 264 metadata = _fm0encodemeta(metadata)
265 265 numsuc = len(sucs)
266 266 format = _fm0fixed + (_fm0node * numsuc)
267 267 data = [numsuc, len(metadata), flags, pre]
268 268 data.extend(sucs)
269 269 return _pack(format, *data) + metadata
270 270
271 271
272 272 def _fm0encodemeta(meta):
273 273 """Return encoded metadata string to string mapping.
274 274
275 275 Assume no ':' in key and no '\0' in both key and value."""
276 276 for key, value in pycompat.iteritems(meta):
277 277 if b':' in key or b'\0' in key:
278 278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
279 279 if b'\0' in value:
280 280 raise ValueError(b"':' is forbidden in metadata value'")
281 281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
282 282
283 283
284 284 def _fm0decodemeta(data):
285 285 """Return string to string dictionary from encoded version."""
286 286 d = {}
287 287 for l in data.split(b'\0'):
288 288 if l:
289 289 key, value = l.split(b':', 1)
290 290 d[key] = value
291 291 return d
292 292
293 293
294 294 ## Parsing and writing of version "1"
295 295 #
296 296 # The header is followed by the markers. Each marker is made of:
297 297 #
298 298 # - uint32: total size of the marker (including this field)
299 299 #
300 300 # - float64: date in seconds since epoch
301 301 #
302 302 # - int16: timezone offset in minutes
303 303 #
304 304 # - uint16: a bit field. It is reserved for flags used in common
305 305 # obsolete marker operations, to avoid repeated decoding of metadata
306 306 # entries.
307 307 #
308 308 # - uint8: number of successors "N", can be zero.
309 309 #
310 310 # - uint8: number of parents "P", can be zero.
311 311 #
312 312 # 0: parents data stored but no parent,
313 313 # 1: one parent stored,
314 314 # 2: two parents stored,
315 315 # 3: no parent data stored
316 316 #
317 317 # - uint8: number of metadata entries M
318 318 #
319 319 # - 20 or 32 bytes: predecessor changeset identifier.
320 320 #
321 321 # - N*(20 or 32) bytes: successors changesets identifiers.
322 322 #
323 323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
324 324 #
325 325 # - M*(uint8, uint8): size of all metadata entries (key and value)
326 326 #
327 327 # - remaining bytes: the metadata, each (key, value) pair after the other.
328 328 _fm1version = 1
329 329 _fm1fixed = b'>IdhHBBB20s'
330 330 _fm1nodesha1 = b'20s'
331 331 _fm1nodesha256 = b'32s'
332 332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
333 333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
334 334 _fm1fsize = _calcsize(_fm1fixed)
335 335 _fm1parentnone = 3
336 336 _fm1parentshift = 14
337 337 _fm1parentmask = _fm1parentnone << _fm1parentshift
338 338 _fm1metapair = b'BB'
339 339 _fm1metapairsize = _calcsize(_fm1metapair)
340 340
341 341
342 342 def _fm1purereadmarkers(data, off, stop):
343 343 # make some global constants local for performance
344 344 noneflag = _fm1parentnone
345 345 sha2flag = usingsha256
346 346 sha1size = _fm1nodesha1size
347 347 sha2size = _fm1nodesha256size
348 348 sha1fmt = _fm1nodesha1
349 349 sha2fmt = _fm1nodesha256
350 350 metasize = _fm1metapairsize
351 351 metafmt = _fm1metapair
352 352 fsize = _fm1fsize
353 353 unpack = _unpack
354 354
355 355 # Loop on markers
356 356 ufixed = struct.Struct(_fm1fixed).unpack
357 357
358 358 while off < stop:
359 359 # read fixed part
360 360 o1 = off + fsize
361 361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
362 362
363 363 if flags & sha2flag:
364 364 # FIXME: prec was read as a SHA1, needs to be amended
365 365
366 366 # read 0 or more successors
367 367 if numsuc == 1:
368 368 o2 = o1 + sha2size
369 369 sucs = (data[o1:o2],)
370 370 else:
371 371 o2 = o1 + sha2size * numsuc
372 372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
373 373
374 374 # read parents
375 375 if numpar == noneflag:
376 376 o3 = o2
377 377 parents = None
378 378 elif numpar == 1:
379 379 o3 = o2 + sha2size
380 380 parents = (data[o2:o3],)
381 381 else:
382 382 o3 = o2 + sha2size * numpar
383 383 parents = unpack(sha2fmt * numpar, data[o2:o3])
384 384 else:
385 385 # read 0 or more successors
386 386 if numsuc == 1:
387 387 o2 = o1 + sha1size
388 388 sucs = (data[o1:o2],)
389 389 else:
390 390 o2 = o1 + sha1size * numsuc
391 391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
392 392
393 393 # read parents
394 394 if numpar == noneflag:
395 395 o3 = o2
396 396 parents = None
397 397 elif numpar == 1:
398 398 o3 = o2 + sha1size
399 399 parents = (data[o2:o3],)
400 400 else:
401 401 o3 = o2 + sha1size * numpar
402 402 parents = unpack(sha1fmt * numpar, data[o2:o3])
403 403
404 404 # read metadata
405 405 off = o3 + metasize * nummeta
406 406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
407 407 metadata = []
408 408 for idx in pycompat.xrange(0, len(metapairsize), 2):
409 409 o1 = off + metapairsize[idx]
410 410 o2 = o1 + metapairsize[idx + 1]
411 411 metadata.append((data[off:o1], data[o1:o2]))
412 412 off = o2
413 413
414 414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
415 415
416 416
417 417 def _fm1encodeonemarker(marker):
418 418 pre, sucs, flags, metadata, date, parents = marker
419 419 # determine node size
420 420 _fm1node = _fm1nodesha1
421 421 if flags & usingsha256:
422 422 _fm1node = _fm1nodesha256
423 423 numsuc = len(sucs)
424 424 numextranodes = numsuc
425 425 if parents is None:
426 426 numpar = _fm1parentnone
427 427 else:
428 428 numpar = len(parents)
429 429 numextranodes += numpar
430 430 formatnodes = _fm1node * numextranodes
431 431 formatmeta = _fm1metapair * len(metadata)
432 432 format = _fm1fixed + formatnodes + formatmeta
433 433 # tz is stored in minutes so we divide by 60
434 434 tz = date[1] // 60
435 435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
436 436 data.extend(sucs)
437 437 if parents is not None:
438 438 data.extend(parents)
439 439 totalsize = _calcsize(format)
440 440 for key, value in metadata:
441 441 lk = len(key)
442 442 lv = len(value)
443 443 if lk > 255:
444 444 msg = (
445 445 b'obsstore metadata key cannot be longer than 255 bytes'
446 446 b' (key "%s" is %u bytes)'
447 447 ) % (key, lk)
448 448 raise error.ProgrammingError(msg)
449 449 if lv > 255:
450 450 msg = (
451 451 b'obsstore metadata value cannot be longer than 255 bytes'
452 452 b' (value "%s" for key "%s" is %u bytes)'
453 453 ) % (value, key, lv)
454 454 raise error.ProgrammingError(msg)
455 455 data.append(lk)
456 456 data.append(lv)
457 457 totalsize += lk + lv
458 458 data[0] = totalsize
459 459 data = [_pack(format, *data)]
460 460 for key, value in metadata:
461 461 data.append(key)
462 462 data.append(value)
463 463 return b''.join(data)
464 464
465 465
466 466 def _fm1readmarkers(data, off, stop):
467 467 native = getattr(parsers, 'fm1readmarkers', None)
468 468 if not native:
469 469 return _fm1purereadmarkers(data, off, stop)
470 470 return native(data, off, stop)
471 471
472 472
473 473 # mapping to read/write various marker formats
474 474 # <version> -> (decoder, encoder)
475 475 formats = {
476 476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
477 477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
478 478 }
479 479
480 480
481 481 def _readmarkerversion(data):
482 482 return _unpack(b'>B', data[0:1])[0]
483 483
484 484
485 485 @util.nogc
486 486 def _readmarkers(data, off=None, stop=None):
487 487 """Read and enumerate markers from raw data"""
488 488 diskversion = _readmarkerversion(data)
489 489 if not off:
490 490 off = 1 # skip 1 byte version number
491 491 if stop is None:
492 492 stop = len(data)
493 493 if diskversion not in formats:
494 494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
495 495 raise error.UnknownVersion(msg, version=diskversion)
496 496 return diskversion, formats[diskversion][0](data, off, stop)
497 497
498 498
499 499 def encodeheader(version=_fm0version):
500 500 return _pack(b'>B', version)
501 501
502 502
503 503 def encodemarkers(markers, addheader=False, version=_fm0version):
504 504 # Kept separate from flushmarkers(), it will be reused for
505 505 # markers exchange.
506 506 encodeone = formats[version][1]
507 507 if addheader:
508 508 yield encodeheader(version)
509 509 for marker in markers:
510 510 yield encodeone(marker)
511 511
512 512
513 513 @util.nogc
514 514 def _addsuccessors(successors, markers):
515 515 for mark in markers:
516 516 successors.setdefault(mark[0], set()).add(mark)
517 517
518 518
519 519 @util.nogc
520 520 def _addpredecessors(predecessors, markers):
521 521 for mark in markers:
522 522 for suc in mark[1]:
523 523 predecessors.setdefault(suc, set()).add(mark)
524 524
525 525
526 526 @util.nogc
527 527 def _addchildren(children, markers):
528 528 for mark in markers:
529 529 parents = mark[5]
530 530 if parents is not None:
531 531 for p in parents:
532 532 children.setdefault(p, set()).add(mark)
533 533
534 534
535 535 def _checkinvalidmarkers(markers):
536 536 """search for marker with invalid data and raise error if needed
537 537
538 538 Exist as a separated function to allow the evolve extension for a more
539 539 subtle handling.
540 540 """
541 541 for mark in markers:
542 542 if node.nullid in mark[1]:
543 543 raise error.Abort(
544 544 _(
545 545 b'bad obsolescence marker detected: '
546 546 b'invalid successors nullid'
547 547 )
548 548 )
549 549
550 550
551 551 class obsstore(object):
552 552 """Store obsolete markers
553 553
554 554 Markers can be accessed with two mappings:
555 555 - predecessors[x] -> set(markers on predecessors edges of x)
556 556 - successors[x] -> set(markers on successors edges of x)
557 557 - children[x] -> set(markers on predecessors edges of children(x)
558 558 """
559 559
560 560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
561 561 # prec: nodeid, predecessors changesets
562 562 # succs: tuple of nodeid, successor changesets (0-N length)
563 563 # flag: integer, flag field carrying modifier for the markers (see doc)
564 564 # meta: binary blob in UTF-8, encoded metadata dictionary
565 565 # date: (float, int) tuple, date of marker creation
566 566 # parents: (tuple of nodeid) or None, parents of predecessors
567 567 # None is used when no data has been recorded
568 568
569 569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
570 570 # caches for various obsolescence related cache
571 571 self.caches = {}
572 572 self.svfs = svfs
573 573 self._defaultformat = defaultformat
574 574 self._readonly = readonly
575 575
576 576 def __iter__(self):
577 577 return iter(self._all)
578 578
579 579 def __len__(self):
580 580 return len(self._all)
581 581
582 582 def __nonzero__(self):
583 583 if not self._cached(r'_all'):
584 584 try:
585 585 return self.svfs.stat(b'obsstore').st_size > 1
586 586 except OSError as inst:
587 587 if inst.errno != errno.ENOENT:
588 588 raise
589 589 # just build an empty _all list if no obsstore exists, which
590 590 # avoids further stat() syscalls
591 591 return bool(self._all)
592 592
593 593 __bool__ = __nonzero__
594 594
595 595 @property
596 596 def readonly(self):
597 597 """True if marker creation is disabled
598 598
599 599 Remove me in the future when obsolete marker is always on."""
600 600 return self._readonly
601 601
602 602 def create(
603 603 self,
604 604 transaction,
605 605 prec,
606 606 succs=(),
607 607 flag=0,
608 608 parents=None,
609 609 date=None,
610 610 metadata=None,
611 611 ui=None,
612 612 ):
613 613 """obsolete: add a new obsolete marker
614 614
615 615 * ensuring it is hashable
616 616 * check mandatory metadata
617 617 * encode metadata
618 618
619 619 If you are a human writing code creating marker you want to use the
620 620 `createmarkers` function in this module instead.
621 621
622 622 return True if a new marker have been added, False if the markers
623 623 already existed (no op).
624 624 """
625 625 if metadata is None:
626 626 metadata = {}
627 627 if date is None:
628 628 if b'date' in metadata:
629 629 # as a courtesy for out-of-tree extensions
630 630 date = dateutil.parsedate(metadata.pop(b'date'))
631 631 elif ui is not None:
632 632 date = ui.configdate(b'devel', b'default-date')
633 633 if date is None:
634 634 date = dateutil.makedate()
635 635 else:
636 636 date = dateutil.makedate()
637 637 if len(prec) != 20:
638 638 raise ValueError(prec)
639 639 for succ in succs:
640 640 if len(succ) != 20:
641 641 raise ValueError(succ)
642 642 if prec in succs:
643 643 raise ValueError(
644 644 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
645 645 )
646 646
647 647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
648 648 for k, v in metadata:
649 649 try:
650 650 # might be better to reject non-ASCII keys
651 651 k.decode('utf-8')
652 652 v.decode('utf-8')
653 653 except UnicodeDecodeError:
654 654 raise error.ProgrammingError(
655 655 b'obsstore metadata must be valid UTF-8 sequence '
656 656 b'(key = %r, value = %r)'
657 657 % (pycompat.bytestr(k), pycompat.bytestr(v))
658 658 )
659 659
660 660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
661 661 return bool(self.add(transaction, [marker]))
662 662
663 663 def add(self, transaction, markers):
664 664 """Add new markers to the store
665 665
666 666 Take care of filtering duplicate.
667 667 Return the number of new marker."""
668 668 if self._readonly:
669 669 raise error.Abort(
670 670 _(b'creating obsolete markers is not enabled on this repo')
671 671 )
672 672 known = set()
673 673 getsuccessors = self.successors.get
674 674 new = []
675 675 for m in markers:
676 676 if m not in getsuccessors(m[0], ()) and m not in known:
677 677 known.add(m)
678 678 new.append(m)
679 679 if new:
680 680 f = self.svfs(b'obsstore', b'ab')
681 681 try:
682 682 offset = f.tell()
683 683 transaction.add(b'obsstore', offset)
684 684 # offset == 0: new file - add the version header
685 685 data = b''.join(encodemarkers(new, offset == 0, self._version))
686 686 f.write(data)
687 687 finally:
688 688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
689 689 # call 'filecacheentry.refresh()' here
690 690 f.close()
691 691 addedmarkers = transaction.changes.get(b'obsmarkers')
692 692 if addedmarkers is not None:
693 693 addedmarkers.update(new)
694 694 self._addmarkers(new, data)
695 695 # new marker *may* have changed several set. invalidate the cache.
696 696 self.caches.clear()
697 697 # records the number of new markers for the transaction hooks
698 698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
699 699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
700 700 return len(new)
701 701
702 702 def mergemarkers(self, transaction, data):
703 703 """merge a binary stream of markers inside the obsstore
704 704
705 705 Returns the number of new markers added."""
706 706 version, markers = _readmarkers(data)
707 707 return self.add(transaction, markers)
708 708
709 709 @propertycache
710 710 def _data(self):
711 711 return self.svfs.tryread(b'obsstore')
712 712
713 713 @propertycache
714 714 def _version(self):
715 715 if len(self._data) >= 1:
716 716 return _readmarkerversion(self._data)
717 717 else:
718 718 return self._defaultformat
719 719
720 720 @propertycache
721 721 def _all(self):
722 722 data = self._data
723 723 if not data:
724 724 return []
725 725 self._version, markers = _readmarkers(data)
726 726 markers = list(markers)
727 727 _checkinvalidmarkers(markers)
728 728 return markers
729 729
730 730 @propertycache
731 731 def successors(self):
732 732 successors = {}
733 733 _addsuccessors(successors, self._all)
734 734 return successors
735 735
736 736 @propertycache
737 737 def predecessors(self):
738 738 predecessors = {}
739 739 _addpredecessors(predecessors, self._all)
740 740 return predecessors
741 741
742 742 @propertycache
743 743 def children(self):
744 744 children = {}
745 745 _addchildren(children, self._all)
746 746 return children
747 747
748 748 def _cached(self, attr):
749 749 return attr in self.__dict__
750 750
751 751 def _addmarkers(self, markers, rawdata):
752 752 markers = list(markers) # to allow repeated iteration
753 753 self._data = self._data + rawdata
754 754 self._all.extend(markers)
755 755 if self._cached(r'successors'):
756 756 _addsuccessors(self.successors, markers)
757 757 if self._cached(r'predecessors'):
758 758 _addpredecessors(self.predecessors, markers)
759 759 if self._cached(r'children'):
760 760 _addchildren(self.children, markers)
761 761 _checkinvalidmarkers(markers)
762 762
763 763 def relevantmarkers(self, nodes):
764 764 """return a set of all obsolescence markers relevant to a set of nodes.
765 765
766 766 "relevant" to a set of nodes mean:
767 767
768 768 - marker that use this changeset as successor
769 769 - prune marker of direct children on this changeset
770 770 - recursive application of the two rules on predecessors of these
771 771 markers
772 772
773 773 It is a set so you cannot rely on order."""
774 774
775 775 pendingnodes = set(nodes)
776 776 seenmarkers = set()
777 777 seennodes = set(pendingnodes)
778 778 precursorsmarkers = self.predecessors
779 779 succsmarkers = self.successors
780 780 children = self.children
781 781 while pendingnodes:
782 782 direct = set()
783 783 for current in pendingnodes:
784 784 direct.update(precursorsmarkers.get(current, ()))
785 785 pruned = [m for m in children.get(current, ()) if not m[1]]
786 786 direct.update(pruned)
787 787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
788 788 direct.update(pruned)
789 789 direct -= seenmarkers
790 790 pendingnodes = {m[0] for m in direct}
791 791 seenmarkers |= direct
792 792 pendingnodes -= seennodes
793 793 seennodes |= pendingnodes
794 794 return seenmarkers
795 795
796 796
797 797 def makestore(ui, repo):
798 798 """Create an obsstore instance from a repo."""
799 799 # read default format for new obsstore.
800 800 # developer config: format.obsstore-version
801 801 defaultformat = ui.configint(b'format', b'obsstore-version')
802 802 # rely on obsstore class default when possible.
803 803 kwargs = {}
804 804 if defaultformat is not None:
805 805 kwargs[r'defaultformat'] = defaultformat
806 806 readonly = not isenabled(repo, createmarkersopt)
807 807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
808 808 if store and readonly:
809 809 ui.warn(
810 810 _(b'obsolete feature not enabled but %i markers found!\n')
811 811 % len(list(store))
812 812 )
813 813 return store
814 814
815 815
816 816 def commonversion(versions):
817 817 """Return the newest version listed in both versions and our local formats.
818 818
819 819 Returns None if no common version exists.
820 820 """
821 821 versions.sort(reverse=True)
822 822 # search for highest version known on both side
823 823 for v in versions:
824 824 if v in formats:
825 825 return v
826 826 return None
827 827
828 828
829 829 # arbitrary picked to fit into 8K limit from HTTP server
830 830 # you have to take in account:
831 831 # - the version header
832 832 # - the base85 encoding
833 833 _maxpayload = 5300
834 834
835 835
836 836 def _pushkeyescape(markers):
837 837 """encode markers into a dict suitable for pushkey exchange
838 838
839 839 - binary data is base85 encoded
840 840 - split in chunks smaller than 5300 bytes"""
841 841 keys = {}
842 842 parts = []
843 843 currentlen = _maxpayload * 2 # ensure we create a new part
844 844 for marker in markers:
845 845 nextdata = _fm0encodeonemarker(marker)
846 846 if len(nextdata) + currentlen > _maxpayload:
847 847 currentpart = []
848 848 currentlen = 0
849 849 parts.append(currentpart)
850 850 currentpart.append(nextdata)
851 851 currentlen += len(nextdata)
852 852 for idx, part in enumerate(reversed(parts)):
853 853 data = b''.join([_pack(b'>B', _fm0version)] + part)
854 854 keys[b'dump%i' % idx] = util.b85encode(data)
855 855 return keys
856 856
857 857
858 858 def listmarkers(repo):
859 859 """List markers over pushkey"""
860 860 if not repo.obsstore:
861 861 return {}
862 862 return _pushkeyescape(sorted(repo.obsstore))
863 863
864 864
865 865 def pushmarker(repo, key, old, new):
866 866 """Push markers over pushkey"""
867 867 if not key.startswith(b'dump'):
868 868 repo.ui.warn(_(b'unknown key: %r') % key)
869 869 return False
870 870 if old:
871 871 repo.ui.warn(_(b'unexpected old value for %r') % key)
872 872 return False
873 873 data = util.b85decode(new)
874 874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
875 875 repo.obsstore.mergemarkers(tr, data)
876 876 repo.invalidatevolatilesets()
877 877 return True
878 878
879 879
880 880 # mapping of 'set-name' -> <function to compute this set>
881 881 cachefuncs = {}
882 882
883 883
884 884 def cachefor(name):
885 885 """Decorator to register a function as computing the cache for a set"""
886 886
887 887 def decorator(func):
888 888 if name in cachefuncs:
889 889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
890 890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
891 891 cachefuncs[name] = func
892 892 return func
893 893
894 894 return decorator
895 895
896 896
897 897 def getrevs(repo, name):
898 898 """Return the set of revision that belong to the <name> set
899 899
900 900 Such access may compute the set and cache it for future use"""
901 901 repo = repo.unfiltered()
902 if not repo.obsstore:
903 return frozenset()
904 if name not in repo.obsstore.caches:
905 repo.obsstore.caches[name] = cachefuncs[name](repo)
906 return repo.obsstore.caches[name]
902 with util.timedcm('getrevs %s', name):
903 if not repo.obsstore:
904 return frozenset()
905 if name not in repo.obsstore.caches:
906 repo.obsstore.caches[name] = cachefuncs[name](repo)
907 return repo.obsstore.caches[name]
907 908
908 909
909 910 # To be simple we need to invalidate obsolescence cache when:
910 911 #
911 912 # - new changeset is added:
912 913 # - public phase is changed
913 914 # - obsolescence marker are added
914 915 # - strip is used a repo
915 916 def clearobscaches(repo):
916 917 """Remove all obsolescence related cache from a repo
917 918
918 919 This remove all cache in obsstore is the obsstore already exist on the
919 920 repo.
920 921
921 922 (We could be smarter here given the exact event that trigger the cache
922 923 clearing)"""
923 924 # only clear cache is there is obsstore data in this repo
924 925 if b'obsstore' in repo._filecache:
925 926 repo.obsstore.caches.clear()
926 927
927 928
928 929 def _mutablerevs(repo):
929 930 """the set of mutable revision in the repository"""
930 931 return repo._phasecache.getrevset(repo, phases.mutablephases)
931 932
932 933
933 934 @cachefor(b'obsolete')
934 935 def _computeobsoleteset(repo):
935 936 """the set of obsolete revisions"""
936 937 getnode = repo.changelog.node
937 938 notpublic = _mutablerevs(repo)
938 939 isobs = repo.obsstore.successors.__contains__
939 940 obs = set(r for r in notpublic if isobs(getnode(r)))
940 941 return obs
941 942
942 943
943 944 @cachefor(b'orphan')
944 945 def _computeorphanset(repo):
945 946 """the set of non obsolete revisions with obsolete parents"""
946 947 pfunc = repo.changelog.parentrevs
947 948 mutable = _mutablerevs(repo)
948 949 obsolete = getrevs(repo, b'obsolete')
949 950 others = mutable - obsolete
950 951 unstable = set()
951 952 for r in sorted(others):
952 953 # A rev is unstable if one of its parent is obsolete or unstable
953 954 # this works since we traverse following growing rev order
954 955 for p in pfunc(r):
955 956 if p in obsolete or p in unstable:
956 957 unstable.add(r)
957 958 break
958 959 return unstable
959 960
960 961
961 962 @cachefor(b'suspended')
962 963 def _computesuspendedset(repo):
963 964 """the set of obsolete parents with non obsolete descendants"""
964 965 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
965 966 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
966 967
967 968
968 969 @cachefor(b'extinct')
969 970 def _computeextinctset(repo):
970 971 """the set of obsolete parents without non obsolete descendants"""
971 972 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
972 973
973 974
974 975 @cachefor(b'phasedivergent')
975 976 def _computephasedivergentset(repo):
976 977 """the set of revs trying to obsolete public revisions"""
977 978 bumped = set()
978 979 # util function (avoid attribute lookup in the loop)
979 980 phase = repo._phasecache.phase # would be faster to grab the full list
980 981 public = phases.public
981 982 cl = repo.changelog
982 983 torev = cl.nodemap.get
983 984 tonode = cl.node
984 985 obsstore = repo.obsstore
985 986 for rev in repo.revs(b'(not public()) and (not obsolete())'):
986 987 # We only evaluate mutable, non-obsolete revision
987 988 node = tonode(rev)
988 989 # (future) A cache of predecessors may worth if split is very common
989 990 for pnode in obsutil.allpredecessors(
990 991 obsstore, [node], ignoreflags=bumpedfix
991 992 ):
992 993 prev = torev(pnode) # unfiltered! but so is phasecache
993 994 if (prev is not None) and (phase(repo, prev) <= public):
994 995 # we have a public predecessor
995 996 bumped.add(rev)
996 997 break # Next draft!
997 998 return bumped
998 999
999 1000
1000 1001 @cachefor(b'contentdivergent')
1001 1002 def _computecontentdivergentset(repo):
1002 1003 """the set of rev that compete to be the final successors of some revision.
1003 1004 """
1004 1005 divergent = set()
1005 1006 obsstore = repo.obsstore
1006 1007 newermap = {}
1007 1008 tonode = repo.changelog.node
1008 1009 for rev in repo.revs(b'(not public()) - obsolete()'):
1009 1010 node = tonode(rev)
1010 1011 mark = obsstore.predecessors.get(node, ())
1011 1012 toprocess = set(mark)
1012 1013 seen = set()
1013 1014 while toprocess:
1014 1015 prec = toprocess.pop()[0]
1015 1016 if prec in seen:
1016 1017 continue # emergency cycle hanging prevention
1017 1018 seen.add(prec)
1018 1019 if prec not in newermap:
1019 1020 obsutil.successorssets(repo, prec, cache=newermap)
1020 1021 newer = [n for n in newermap[prec] if n]
1021 1022 if len(newer) > 1:
1022 1023 divergent.add(rev)
1023 1024 break
1024 1025 toprocess.update(obsstore.predecessors.get(prec, ()))
1025 1026 return divergent
1026 1027
1027 1028
1028 1029 def makefoldid(relation, user):
1029 1030
1030 1031 folddigest = hashlib.sha1(user)
1031 1032 for p in relation[0] + relation[1]:
1032 1033 folddigest.update(b'%d' % p.rev())
1033 1034 folddigest.update(p.node())
1034 1035 # Since fold only has to compete against fold for the same successors, it
1035 1036 # seems fine to use a small ID. Smaller ID save space.
1036 1037 return node.hex(folddigest.digest())[:8]
1037 1038
1038 1039
1039 1040 def createmarkers(
1040 1041 repo, relations, flag=0, date=None, metadata=None, operation=None
1041 1042 ):
1042 1043 """Add obsolete markers between changesets in a repo
1043 1044
1044 1045 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1045 1046 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1046 1047 containing metadata for this marker only. It is merged with the global
1047 1048 metadata specified through the `metadata` argument of this function.
1048 1049 Any string values in metadata must be UTF-8 bytes.
1049 1050
1050 1051 Trying to obsolete a public changeset will raise an exception.
1051 1052
1052 1053 Current user and date are used except if specified otherwise in the
1053 1054 metadata attribute.
1054 1055
1055 1056 This function operates within a transaction of its own, but does
1056 1057 not take any lock on the repo.
1057 1058 """
1058 1059 # prepare metadata
1059 1060 if metadata is None:
1060 1061 metadata = {}
1061 1062 if b'user' not in metadata:
1062 1063 luser = (
1063 1064 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1064 1065 )
1065 1066 metadata[b'user'] = encoding.fromlocal(luser)
1066 1067
1067 1068 # Operation metadata handling
1068 1069 useoperation = repo.ui.configbool(
1069 1070 b'experimental', b'evolution.track-operation'
1070 1071 )
1071 1072 if useoperation and operation:
1072 1073 metadata[b'operation'] = operation
1073 1074
1074 1075 # Effect flag metadata handling
1075 1076 saveeffectflag = repo.ui.configbool(
1076 1077 b'experimental', b'evolution.effect-flags'
1077 1078 )
1078 1079
1079 1080 with repo.transaction(b'add-obsolescence-marker') as tr:
1080 1081 markerargs = []
1081 1082 for rel in relations:
1082 1083 predecessors = rel[0]
1083 1084 if not isinstance(predecessors, tuple):
1084 1085 # preserve compat with old API until all caller are migrated
1085 1086 predecessors = (predecessors,)
1086 1087 if len(predecessors) > 1 and len(rel[1]) != 1:
1087 1088 msg = b'Fold markers can only have 1 successors, not %d'
1088 1089 raise error.ProgrammingError(msg % len(rel[1]))
1089 1090 foldid = None
1090 1091 foldsize = len(predecessors)
1091 1092 if 1 < foldsize:
1092 1093 foldid = makefoldid(rel, metadata[b'user'])
1093 1094 for foldidx, prec in enumerate(predecessors, 1):
1094 1095 sucs = rel[1]
1095 1096 localmetadata = metadata.copy()
1096 1097 if len(rel) > 2:
1097 1098 localmetadata.update(rel[2])
1098 1099 if foldid is not None:
1099 1100 localmetadata[b'fold-id'] = foldid
1100 1101 localmetadata[b'fold-idx'] = b'%d' % foldidx
1101 1102 localmetadata[b'fold-size'] = b'%d' % foldsize
1102 1103
1103 1104 if not prec.mutable():
1104 1105 raise error.Abort(
1105 1106 _(b"cannot obsolete public changeset: %s") % prec,
1106 1107 hint=b"see 'hg help phases' for details",
1107 1108 )
1108 1109 nprec = prec.node()
1109 1110 nsucs = tuple(s.node() for s in sucs)
1110 1111 npare = None
1111 1112 if not nsucs:
1112 1113 npare = tuple(p.node() for p in prec.parents())
1113 1114 if nprec in nsucs:
1114 1115 raise error.Abort(
1115 1116 _(b"changeset %s cannot obsolete itself") % prec
1116 1117 )
1117 1118
1118 1119 # Effect flag can be different by relation
1119 1120 if saveeffectflag:
1120 1121 # The effect flag is saved in a versioned field name for
1121 1122 # future evolution
1122 1123 effectflag = obsutil.geteffectflag(prec, sucs)
1123 1124 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1124 1125
1125 1126 # Creating the marker causes the hidden cache to become
1126 1127 # invalid, which causes recomputation when we ask for
1127 1128 # prec.parents() above. Resulting in n^2 behavior. So let's
1128 1129 # prepare all of the args first, then create the markers.
1129 1130 markerargs.append((nprec, nsucs, npare, localmetadata))
1130 1131
1131 1132 for args in markerargs:
1132 1133 nprec, nsucs, npare, localmetadata = args
1133 1134 repo.obsstore.create(
1134 1135 tr,
1135 1136 nprec,
1136 1137 nsucs,
1137 1138 flag,
1138 1139 parents=npare,
1139 1140 date=date,
1140 1141 metadata=localmetadata,
1141 1142 ui=repo.ui,
1142 1143 )
1143 1144 repo.filteredrevcache.clear()
@@ -1,337 +1,337 b''
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import weakref
13 13
14 14 from .node import nullrev
15 15 from .pycompat import (
16 16 delattr,
17 17 getattr,
18 18 setattr,
19 19 )
20 20 from . import (
21 21 obsolete,
22 22 phases,
23 23 pycompat,
24 24 tags as tagsmod,
25 25 util,
26 26 )
27 27 from .utils import repoviewutil
28 28
29 29
30 30 def hideablerevs(repo):
31 31 """Revision candidates to be hidden
32 32
33 33 This is a standalone function to allow extensions to wrap it.
34 34
35 35 Because we use the set of immutable changesets as a fallback subset in
36 36 branchmap (see mercurial.utils.repoviewutils.subsettable), you cannot set
37 37 "public" changesets as "hideable". Doing so would break multiple code
38 38 assertions and lead to crashes."""
39 39 obsoletes = obsolete.getrevs(repo, b'obsolete')
40 40 internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
41 41 internals = frozenset(internals)
42 42 return obsoletes | internals
43 43
44 44
45 45 def pinnedrevs(repo):
46 46 """revisions blocking hidden changesets from being filtered
47 47 """
48 48
49 49 cl = repo.changelog
50 50 pinned = set()
51 51 pinned.update([par.rev() for par in repo[None].parents()])
52 52 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
53 53
54 54 tags = {}
55 55 tagsmod.readlocaltags(repo.ui, repo, tags, {})
56 56 if tags:
57 57 rev, nodemap = cl.rev, cl.nodemap
58 58 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
59 59 return pinned
60 60
61 61
62 62 def _revealancestors(pfunc, hidden, revs):
63 63 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
64 64 from 'hidden'
65 65
66 66 - pfunc(r): a funtion returning parent of 'r',
67 67 - hidden: the (preliminary) hidden revisions, to be updated
68 68 - revs: iterable of revnum,
69 69
70 70 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
71 71 *not* revealed)
72 72 """
73 73 stack = list(revs)
74 74 while stack:
75 75 for p in pfunc(stack.pop()):
76 76 if p != nullrev and p in hidden:
77 77 hidden.remove(p)
78 78 stack.append(p)
79 79
80 80
81 81 def computehidden(repo, visibilityexceptions=None):
82 82 """compute the set of hidden revision to filter
83 83
84 84 During most operation hidden should be filtered."""
85 85 assert not repo.changelog.filteredrevs
86 86
87 87 hidden = hideablerevs(repo)
88 88 if hidden:
89 89 hidden = set(hidden - pinnedrevs(repo))
90 90 if visibilityexceptions:
91 91 hidden -= visibilityexceptions
92 92 pfunc = repo.changelog.parentrevs
93 93 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
94 94
95 95 visible = mutable - hidden
96 96 _revealancestors(pfunc, hidden, visible)
97 97 return frozenset(hidden)
98 98
99 99
100 100 def computesecret(repo, visibilityexceptions=None):
101 101 """compute the set of revision that can never be exposed through hgweb
102 102
103 103 Changeset in the secret phase (or above) should stay unaccessible."""
104 104 assert not repo.changelog.filteredrevs
105 105 secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
106 106 return frozenset(secrets)
107 107
108 108
109 109 def computeunserved(repo, visibilityexceptions=None):
110 110 """compute the set of revision that should be filtered when used a server
111 111
112 112 Secret and hidden changeset should not pretend to be here."""
113 113 assert not repo.changelog.filteredrevs
114 114 # fast path in simple case to avoid impact of non optimised code
115 115 hiddens = filterrevs(repo, b'visible')
116 116 secrets = filterrevs(repo, b'served.hidden')
117 117 if secrets:
118 118 return frozenset(hiddens | secrets)
119 119 else:
120 120 return hiddens
121 121
122 122
123 123 def computemutable(repo, visibilityexceptions=None):
124 124 assert not repo.changelog.filteredrevs
125 125 # fast check to avoid revset call on huge repo
126 126 if any(repo._phasecache.phaseroots[1:]):
127 127 getphase = repo._phasecache.phase
128 128 maymutable = filterrevs(repo, b'base')
129 129 return frozenset(r for r in maymutable if getphase(repo, r))
130 130 return frozenset()
131 131
132 132
133 133 def computeimpactable(repo, visibilityexceptions=None):
134 134 """Everything impactable by mutable revision
135 135
136 136 The immutable filter still have some chance to get invalidated. This will
137 137 happen when:
138 138
139 139 - you garbage collect hidden changeset,
140 140 - public phase is moved backward,
141 141 - something is changed in the filtering (this could be fixed)
142 142
143 143 This filter out any mutable changeset and any public changeset that may be
144 144 impacted by something happening to a mutable revision.
145 145
146 146 This is achieved by filtered everything with a revision number egal or
147 147 higher than the first mutable changeset is filtered."""
148 148 assert not repo.changelog.filteredrevs
149 149 cl = repo.changelog
150 150 firstmutable = len(cl)
151 151 for roots in repo._phasecache.phaseroots[1:]:
152 152 if roots:
153 153 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
154 154 # protect from nullrev root
155 155 firstmutable = max(0, firstmutable)
156 156 return frozenset(pycompat.xrange(firstmutable, len(cl)))
157 157
158 158
159 159 # function to compute filtered set
160 160 #
161 161 # When adding a new filter you MUST update the table at:
162 162 # mercurial.utils.repoviewutil.subsettable
163 163 # Otherwise your filter will have to recompute all its branches cache
164 164 # from scratch (very slow).
165 165 filtertable = {
166 166 b'visible': computehidden,
167 167 b'visible-hidden': computehidden,
168 168 b'served.hidden': computesecret,
169 169 b'served': computeunserved,
170 170 b'immutable': computemutable,
171 171 b'base': computeimpactable,
172 172 }
173 173
174 174 _basefiltername = list(filtertable)
175 175
176 176
177 177 def extrafilter(ui):
178 178 """initialize extra filter and return its id
179 179
180 180 If extra filtering is configured, we make sure the associated filtered view
181 181 are declared and return the associated id.
182 182 """
183 183 frevs = ui.config(b'experimental', b'extra-filter-revs')
184 184 if frevs is None:
185 185 return None
186 186
187 187 fid = pycompat.sysbytes(util.DIGESTS[b'sha1'](frevs).hexdigest())[:12]
188 188
189 189 combine = lambda fname: fname + b'%' + fid
190 190
191 191 subsettable = repoviewutil.subsettable
192 192
193 193 if combine(b'base') not in filtertable:
194 194 for name in _basefiltername:
195 195
196 196 def extrafilteredrevs(repo, *args, **kwargs):
197 197 baserevs = filtertable[name](repo, *args, **kwargs)
198 198 extrarevs = frozenset(repo.revs(frevs))
199 199 return baserevs | extrarevs
200 200
201 201 filtertable[combine(name)] = extrafilteredrevs
202 202 if name in subsettable:
203 203 subsettable[combine(name)] = combine(subsettable[name])
204 204 return fid
205 205
206 206
207 207 def filterrevs(repo, filtername, visibilityexceptions=None):
208 208 """returns set of filtered revision for this filter name
209 209
210 210 visibilityexceptions is a set of revs which must are exceptions for
211 211 hidden-state and must be visible. They are dynamic and hence we should not
212 212 cache it's result"""
213 213 if filtername not in repo.filteredrevcache:
214 214 func = filtertable[filtername]
215 215 if visibilityexceptions:
216 216 return func(repo.unfiltered, visibilityexceptions)
217 217 repo.filteredrevcache[filtername] = func(repo.unfiltered())
218 218 return repo.filteredrevcache[filtername]
219 219
220 220
221 221 class repoview(object):
222 222 """Provide a read/write view of a repo through a filtered changelog
223 223
224 224 This object is used to access a filtered version of a repository without
225 225 altering the original repository object itself. We can not alter the
226 226 original object for two main reasons:
227 227 - It prevents the use of a repo with multiple filters at the same time. In
228 228 particular when multiple threads are involved.
229 229 - It makes scope of the filtering harder to control.
230 230
231 231 This object behaves very closely to the original repository. All attribute
232 232 operations are done on the original repository:
233 233 - An access to `repoview.someattr` actually returns `repo.someattr`,
234 234 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
235 235 - A deletion of `repoview.someattr` actually drops `someattr`
236 236 from `repo.__dict__`.
237 237
238 238 The only exception is the `changelog` property. It is overridden to return
239 239 a (surface) copy of `repo.changelog` with some revisions filtered. The
240 240 `filtername` attribute of the view control the revisions that need to be
241 241 filtered. (the fact the changelog is copied is an implementation detail).
242 242
243 243 Unlike attributes, this object intercepts all method calls. This means that
244 244 all methods are run on the `repoview` object with the filtered `changelog`
245 245 property. For this purpose the simple `repoview` class must be mixed with
246 246 the actual class of the repository. This ensures that the resulting
247 247 `repoview` object have the very same methods than the repo object. This
248 248 leads to the property below.
249 249
250 250 repoview.method() --> repo.__class__.method(repoview)
251 251
252 252 The inheritance has to be done dynamically because `repo` can be of any
253 253 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
254 254 """
255 255
256 256 def __init__(self, repo, filtername, visibilityexceptions=None):
257 257 object.__setattr__(self, r'_unfilteredrepo', repo)
258 258 object.__setattr__(self, r'filtername', filtername)
259 259 object.__setattr__(self, r'_clcachekey', None)
260 260 object.__setattr__(self, r'_clcache', None)
261 261 # revs which are exceptions and must not be hidden
262 262 object.__setattr__(self, r'_visibilityexceptions', visibilityexceptions)
263 263
264 264 # not a propertycache on purpose we shall implement a proper cache later
265 265 @property
266 266 def changelog(self):
267 267 """return a filtered version of the changeset
268 268
269 269 this changelog must not be used for writing"""
270 270 # some cache may be implemented later
271 271 unfi = self._unfilteredrepo
272 272 unfichangelog = unfi.changelog
273 273 # bypass call to changelog.method
274 274 unfiindex = unfichangelog.index
275 275 unfilen = len(unfiindex)
276 276 unfinode = unfiindex[unfilen - 1][7]
277
278 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
277 with util.timedcm('repo filter for %s', self.filtername):
278 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
279 279 cl = self._clcache
280 280 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
281 281 # if cl.index is not unfiindex, unfi.changelog would be
282 282 # recreated, and our clcache refers to garbage object
283 283 if cl is not None and (
284 284 cl.index is not unfiindex or newkey != self._clcachekey
285 285 ):
286 286 cl = None
287 287 # could have been made None by the previous if
288 288 if cl is None:
289 289 cl = copy.copy(unfichangelog)
290 290 cl.filteredrevs = revs
291 291 object.__setattr__(self, r'_clcache', cl)
292 292 object.__setattr__(self, r'_clcachekey', newkey)
293 293 return cl
294 294
295 295 def unfiltered(self):
296 296 """Return an unfiltered version of a repo"""
297 297 return self._unfilteredrepo
298 298
299 299 def filtered(self, name, visibilityexceptions=None):
300 300 """Return a filtered version of a repository"""
301 301 if name == self.filtername and not visibilityexceptions:
302 302 return self
303 303 return self.unfiltered().filtered(name, visibilityexceptions)
304 304
305 305 def __repr__(self):
306 306 return r'<%s:%s %r>' % (
307 307 self.__class__.__name__,
308 308 pycompat.sysstr(self.filtername),
309 309 self.unfiltered(),
310 310 )
311 311
312 312 # everything access are forwarded to the proxied repo
313 313 def __getattr__(self, attr):
314 314 return getattr(self._unfilteredrepo, attr)
315 315
316 316 def __setattr__(self, attr, value):
317 317 return setattr(self._unfilteredrepo, attr, value)
318 318
319 319 def __delattr__(self, attr):
320 320 return delattr(self._unfilteredrepo, attr)
321 321
322 322
323 323 # Python <3.4 easily leaks types via __mro__. See
324 324 # https://bugs.python.org/issue17950. We cache dynamically created types
325 325 # so they won't be leaked on every invocation of repo.filtered().
326 326 _filteredrepotypes = weakref.WeakKeyDictionary()
327 327
328 328
329 329 def newtype(base):
330 330 """Create a new type with the repoview mixin and the given base class"""
331 331 if base not in _filteredrepotypes:
332 332
333 333 class filteredrepo(repoview, base):
334 334 pass
335 335
336 336 _filteredrepotypes[base] = filteredrepo
337 337 return _filteredrepotypes[base]
General Comments 0
You need to be logged in to leave comments. Login now