##// END OF EJS Templates
revlog: add a `_get_decompressor` method...
marmoute -
r48028:eac3591a default
parent child Browse files
Show More
@@ -1,2696 +1,2697
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 1153 # * include management of a persistent nodemap in the main docket
1154 1154 # * enforce a "no-truncate" policy for mmap safety
1155 1155 # - for censoring operation
1156 1156 # - for stripping operation
1157 1157 # - for rollback operation
1158 1158 # * proper streaming (race free) of the docket file
1159 1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 1160 # * Exchange-wise, we will also need to do something more efficient than
1161 1161 # keeping references to the affected revlogs, especially memory-wise when
1162 1162 # rewriting sidedata.
1163 1163 # * sidedata compression
1164 1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 1165 # * Improvement to consider
1166 # - track compression mode in the index entris instead of the chunks
1166 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1167 1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 1170 # - keep track of chain base or size (probably not that useful anymore)
1170 1171 # - store data and sidedata in different files
1171 1172 coreconfigitem(
1172 1173 b'experimental',
1173 1174 b'revlogv2',
1174 1175 default=None,
1175 1176 )
1176 1177 coreconfigitem(
1177 1178 b'experimental',
1178 1179 b'revisions.disambiguatewithin',
1179 1180 default=None,
1180 1181 )
1181 1182 coreconfigitem(
1182 1183 b'experimental',
1183 1184 b'rust.index',
1184 1185 default=False,
1185 1186 )
1186 1187 coreconfigitem(
1187 1188 b'experimental',
1188 1189 b'server.filesdata.recommended-batch-size',
1189 1190 default=50000,
1190 1191 )
1191 1192 coreconfigitem(
1192 1193 b'experimental',
1193 1194 b'server.manifestdata.recommended-batch-size',
1194 1195 default=100000,
1195 1196 )
1196 1197 coreconfigitem(
1197 1198 b'experimental',
1198 1199 b'server.stream-narrow-clones',
1199 1200 default=False,
1200 1201 )
1201 1202 coreconfigitem(
1202 1203 b'experimental',
1203 1204 b'single-head-per-branch',
1204 1205 default=False,
1205 1206 )
1206 1207 coreconfigitem(
1207 1208 b'experimental',
1208 1209 b'single-head-per-branch:account-closed-heads',
1209 1210 default=False,
1210 1211 )
1211 1212 coreconfigitem(
1212 1213 b'experimental',
1213 1214 b'single-head-per-branch:public-changes-only',
1214 1215 default=False,
1215 1216 )
1216 1217 coreconfigitem(
1217 1218 b'experimental',
1218 1219 b'sshserver.support-v2',
1219 1220 default=False,
1220 1221 )
1221 1222 coreconfigitem(
1222 1223 b'experimental',
1223 1224 b'sparse-read',
1224 1225 default=False,
1225 1226 )
1226 1227 coreconfigitem(
1227 1228 b'experimental',
1228 1229 b'sparse-read.density-threshold',
1229 1230 default=0.50,
1230 1231 )
1231 1232 coreconfigitem(
1232 1233 b'experimental',
1233 1234 b'sparse-read.min-gap-size',
1234 1235 default=b'65K',
1235 1236 )
1236 1237 coreconfigitem(
1237 1238 b'experimental',
1238 1239 b'treemanifest',
1239 1240 default=False,
1240 1241 )
1241 1242 coreconfigitem(
1242 1243 b'experimental',
1243 1244 b'update.atomic-file',
1244 1245 default=False,
1245 1246 )
1246 1247 coreconfigitem(
1247 1248 b'experimental',
1248 1249 b'sshpeer.advertise-v2',
1249 1250 default=False,
1250 1251 )
1251 1252 coreconfigitem(
1252 1253 b'experimental',
1253 1254 b'web.apiserver',
1254 1255 default=False,
1255 1256 )
1256 1257 coreconfigitem(
1257 1258 b'experimental',
1258 1259 b'web.api.http-v2',
1259 1260 default=False,
1260 1261 )
1261 1262 coreconfigitem(
1262 1263 b'experimental',
1263 1264 b'web.api.debugreflect',
1264 1265 default=False,
1265 1266 )
1266 1267 coreconfigitem(
1267 1268 b'experimental',
1268 1269 b'worker.wdir-get-thread-safe',
1269 1270 default=False,
1270 1271 )
1271 1272 coreconfigitem(
1272 1273 b'experimental',
1273 1274 b'worker.repository-upgrade',
1274 1275 default=False,
1275 1276 )
1276 1277 coreconfigitem(
1277 1278 b'experimental',
1278 1279 b'xdiff',
1279 1280 default=False,
1280 1281 )
1281 1282 coreconfigitem(
1282 1283 b'extensions',
1283 1284 b'.*',
1284 1285 default=None,
1285 1286 generic=True,
1286 1287 )
1287 1288 coreconfigitem(
1288 1289 b'extdata',
1289 1290 b'.*',
1290 1291 default=None,
1291 1292 generic=True,
1292 1293 )
1293 1294 coreconfigitem(
1294 1295 b'format',
1295 1296 b'bookmarks-in-store',
1296 1297 default=False,
1297 1298 )
1298 1299 coreconfigitem(
1299 1300 b'format',
1300 1301 b'chunkcachesize',
1301 1302 default=None,
1302 1303 experimental=True,
1303 1304 )
1304 1305 coreconfigitem(
1305 1306 b'format',
1306 1307 b'dotencode',
1307 1308 default=True,
1308 1309 )
1309 1310 coreconfigitem(
1310 1311 b'format',
1311 1312 b'generaldelta',
1312 1313 default=False,
1313 1314 experimental=True,
1314 1315 )
1315 1316 coreconfigitem(
1316 1317 b'format',
1317 1318 b'manifestcachesize',
1318 1319 default=None,
1319 1320 experimental=True,
1320 1321 )
1321 1322 coreconfigitem(
1322 1323 b'format',
1323 1324 b'maxchainlen',
1324 1325 default=dynamicdefault,
1325 1326 experimental=True,
1326 1327 )
1327 1328 coreconfigitem(
1328 1329 b'format',
1329 1330 b'obsstore-version',
1330 1331 default=None,
1331 1332 )
1332 1333 coreconfigitem(
1333 1334 b'format',
1334 1335 b'sparse-revlog',
1335 1336 default=True,
1336 1337 )
1337 1338 coreconfigitem(
1338 1339 b'format',
1339 1340 b'revlog-compression',
1340 1341 default=lambda: [b'zstd', b'zlib'],
1341 1342 alias=[(b'experimental', b'format.compression')],
1342 1343 )
1343 1344 coreconfigitem(
1344 1345 b'format',
1345 1346 b'usefncache',
1346 1347 default=True,
1347 1348 )
1348 1349 coreconfigitem(
1349 1350 b'format',
1350 1351 b'usegeneraldelta',
1351 1352 default=True,
1352 1353 )
1353 1354 coreconfigitem(
1354 1355 b'format',
1355 1356 b'usestore',
1356 1357 default=True,
1357 1358 )
1358 1359
1359 1360
1360 1361 def _persistent_nodemap_default():
1361 1362 """compute `use-persistent-nodemap` default value
1362 1363
1363 1364 The feature is disabled unless a fast implementation is available.
1364 1365 """
1365 1366 from . import policy
1366 1367
1367 1368 return policy.importrust('revlog') is not None
1368 1369
1369 1370
1370 1371 coreconfigitem(
1371 1372 b'format',
1372 1373 b'use-persistent-nodemap',
1373 1374 default=_persistent_nodemap_default,
1374 1375 )
1375 1376 coreconfigitem(
1376 1377 b'format',
1377 1378 b'exp-use-copies-side-data-changeset',
1378 1379 default=False,
1379 1380 experimental=True,
1380 1381 )
1381 1382 coreconfigitem(
1382 1383 b'format',
1383 1384 b'use-share-safe',
1384 1385 default=False,
1385 1386 )
1386 1387 coreconfigitem(
1387 1388 b'format',
1388 1389 b'internal-phase',
1389 1390 default=False,
1390 1391 experimental=True,
1391 1392 )
1392 1393 coreconfigitem(
1393 1394 b'fsmonitor',
1394 1395 b'warn_when_unused',
1395 1396 default=True,
1396 1397 )
1397 1398 coreconfigitem(
1398 1399 b'fsmonitor',
1399 1400 b'warn_update_file_count',
1400 1401 default=50000,
1401 1402 )
1402 1403 coreconfigitem(
1403 1404 b'fsmonitor',
1404 1405 b'warn_update_file_count_rust',
1405 1406 default=400000,
1406 1407 )
1407 1408 coreconfigitem(
1408 1409 b'help',
1409 1410 br'hidden-command\..*',
1410 1411 default=False,
1411 1412 generic=True,
1412 1413 )
1413 1414 coreconfigitem(
1414 1415 b'help',
1415 1416 br'hidden-topic\..*',
1416 1417 default=False,
1417 1418 generic=True,
1418 1419 )
1419 1420 coreconfigitem(
1420 1421 b'hooks',
1421 1422 b'[^:]*',
1422 1423 default=dynamicdefault,
1423 1424 generic=True,
1424 1425 )
1425 1426 coreconfigitem(
1426 1427 b'hooks',
1427 1428 b'.*:run-with-plain',
1428 1429 default=True,
1429 1430 generic=True,
1430 1431 )
1431 1432 coreconfigitem(
1432 1433 b'hgweb-paths',
1433 1434 b'.*',
1434 1435 default=list,
1435 1436 generic=True,
1436 1437 )
1437 1438 coreconfigitem(
1438 1439 b'hostfingerprints',
1439 1440 b'.*',
1440 1441 default=list,
1441 1442 generic=True,
1442 1443 )
1443 1444 coreconfigitem(
1444 1445 b'hostsecurity',
1445 1446 b'ciphers',
1446 1447 default=None,
1447 1448 )
1448 1449 coreconfigitem(
1449 1450 b'hostsecurity',
1450 1451 b'minimumprotocol',
1451 1452 default=dynamicdefault,
1452 1453 )
1453 1454 coreconfigitem(
1454 1455 b'hostsecurity',
1455 1456 b'.*:minimumprotocol$',
1456 1457 default=dynamicdefault,
1457 1458 generic=True,
1458 1459 )
1459 1460 coreconfigitem(
1460 1461 b'hostsecurity',
1461 1462 b'.*:ciphers$',
1462 1463 default=dynamicdefault,
1463 1464 generic=True,
1464 1465 )
1465 1466 coreconfigitem(
1466 1467 b'hostsecurity',
1467 1468 b'.*:fingerprints$',
1468 1469 default=list,
1469 1470 generic=True,
1470 1471 )
1471 1472 coreconfigitem(
1472 1473 b'hostsecurity',
1473 1474 b'.*:verifycertsfile$',
1474 1475 default=None,
1475 1476 generic=True,
1476 1477 )
1477 1478
1478 1479 coreconfigitem(
1479 1480 b'http_proxy',
1480 1481 b'always',
1481 1482 default=False,
1482 1483 )
1483 1484 coreconfigitem(
1484 1485 b'http_proxy',
1485 1486 b'host',
1486 1487 default=None,
1487 1488 )
1488 1489 coreconfigitem(
1489 1490 b'http_proxy',
1490 1491 b'no',
1491 1492 default=list,
1492 1493 )
1493 1494 coreconfigitem(
1494 1495 b'http_proxy',
1495 1496 b'passwd',
1496 1497 default=None,
1497 1498 )
1498 1499 coreconfigitem(
1499 1500 b'http_proxy',
1500 1501 b'user',
1501 1502 default=None,
1502 1503 )
1503 1504
1504 1505 coreconfigitem(
1505 1506 b'http',
1506 1507 b'timeout',
1507 1508 default=None,
1508 1509 )
1509 1510
1510 1511 coreconfigitem(
1511 1512 b'logtoprocess',
1512 1513 b'commandexception',
1513 1514 default=None,
1514 1515 )
1515 1516 coreconfigitem(
1516 1517 b'logtoprocess',
1517 1518 b'commandfinish',
1518 1519 default=None,
1519 1520 )
1520 1521 coreconfigitem(
1521 1522 b'logtoprocess',
1522 1523 b'command',
1523 1524 default=None,
1524 1525 )
1525 1526 coreconfigitem(
1526 1527 b'logtoprocess',
1527 1528 b'develwarn',
1528 1529 default=None,
1529 1530 )
1530 1531 coreconfigitem(
1531 1532 b'logtoprocess',
1532 1533 b'uiblocked',
1533 1534 default=None,
1534 1535 )
1535 1536 coreconfigitem(
1536 1537 b'merge',
1537 1538 b'checkunknown',
1538 1539 default=b'abort',
1539 1540 )
1540 1541 coreconfigitem(
1541 1542 b'merge',
1542 1543 b'checkignored',
1543 1544 default=b'abort',
1544 1545 )
1545 1546 coreconfigitem(
1546 1547 b'experimental',
1547 1548 b'merge.checkpathconflicts',
1548 1549 default=False,
1549 1550 )
1550 1551 coreconfigitem(
1551 1552 b'merge',
1552 1553 b'followcopies',
1553 1554 default=True,
1554 1555 )
1555 1556 coreconfigitem(
1556 1557 b'merge',
1557 1558 b'on-failure',
1558 1559 default=b'continue',
1559 1560 )
1560 1561 coreconfigitem(
1561 1562 b'merge',
1562 1563 b'preferancestor',
1563 1564 default=lambda: [b'*'],
1564 1565 experimental=True,
1565 1566 )
1566 1567 coreconfigitem(
1567 1568 b'merge',
1568 1569 b'strict-capability-check',
1569 1570 default=False,
1570 1571 )
1571 1572 coreconfigitem(
1572 1573 b'merge-tools',
1573 1574 b'.*',
1574 1575 default=None,
1575 1576 generic=True,
1576 1577 )
1577 1578 coreconfigitem(
1578 1579 b'merge-tools',
1579 1580 br'.*\.args$',
1580 1581 default=b"$local $base $other",
1581 1582 generic=True,
1582 1583 priority=-1,
1583 1584 )
1584 1585 coreconfigitem(
1585 1586 b'merge-tools',
1586 1587 br'.*\.binary$',
1587 1588 default=False,
1588 1589 generic=True,
1589 1590 priority=-1,
1590 1591 )
1591 1592 coreconfigitem(
1592 1593 b'merge-tools',
1593 1594 br'.*\.check$',
1594 1595 default=list,
1595 1596 generic=True,
1596 1597 priority=-1,
1597 1598 )
1598 1599 coreconfigitem(
1599 1600 b'merge-tools',
1600 1601 br'.*\.checkchanged$',
1601 1602 default=False,
1602 1603 generic=True,
1603 1604 priority=-1,
1604 1605 )
1605 1606 coreconfigitem(
1606 1607 b'merge-tools',
1607 1608 br'.*\.executable$',
1608 1609 default=dynamicdefault,
1609 1610 generic=True,
1610 1611 priority=-1,
1611 1612 )
1612 1613 coreconfigitem(
1613 1614 b'merge-tools',
1614 1615 br'.*\.fixeol$',
1615 1616 default=False,
1616 1617 generic=True,
1617 1618 priority=-1,
1618 1619 )
1619 1620 coreconfigitem(
1620 1621 b'merge-tools',
1621 1622 br'.*\.gui$',
1622 1623 default=False,
1623 1624 generic=True,
1624 1625 priority=-1,
1625 1626 )
1626 1627 coreconfigitem(
1627 1628 b'merge-tools',
1628 1629 br'.*\.mergemarkers$',
1629 1630 default=b'basic',
1630 1631 generic=True,
1631 1632 priority=-1,
1632 1633 )
1633 1634 coreconfigitem(
1634 1635 b'merge-tools',
1635 1636 br'.*\.mergemarkertemplate$',
1636 1637 default=dynamicdefault, # take from command-templates.mergemarker
1637 1638 generic=True,
1638 1639 priority=-1,
1639 1640 )
1640 1641 coreconfigitem(
1641 1642 b'merge-tools',
1642 1643 br'.*\.priority$',
1643 1644 default=0,
1644 1645 generic=True,
1645 1646 priority=-1,
1646 1647 )
1647 1648 coreconfigitem(
1648 1649 b'merge-tools',
1649 1650 br'.*\.premerge$',
1650 1651 default=dynamicdefault,
1651 1652 generic=True,
1652 1653 priority=-1,
1653 1654 )
1654 1655 coreconfigitem(
1655 1656 b'merge-tools',
1656 1657 br'.*\.symlink$',
1657 1658 default=False,
1658 1659 generic=True,
1659 1660 priority=-1,
1660 1661 )
1661 1662 coreconfigitem(
1662 1663 b'pager',
1663 1664 b'attend-.*',
1664 1665 default=dynamicdefault,
1665 1666 generic=True,
1666 1667 )
1667 1668 coreconfigitem(
1668 1669 b'pager',
1669 1670 b'ignore',
1670 1671 default=list,
1671 1672 )
1672 1673 coreconfigitem(
1673 1674 b'pager',
1674 1675 b'pager',
1675 1676 default=dynamicdefault,
1676 1677 )
1677 1678 coreconfigitem(
1678 1679 b'patch',
1679 1680 b'eol',
1680 1681 default=b'strict',
1681 1682 )
1682 1683 coreconfigitem(
1683 1684 b'patch',
1684 1685 b'fuzz',
1685 1686 default=2,
1686 1687 )
1687 1688 coreconfigitem(
1688 1689 b'paths',
1689 1690 b'default',
1690 1691 default=None,
1691 1692 )
1692 1693 coreconfigitem(
1693 1694 b'paths',
1694 1695 b'default-push',
1695 1696 default=None,
1696 1697 )
1697 1698 coreconfigitem(
1698 1699 b'paths',
1699 1700 b'.*',
1700 1701 default=None,
1701 1702 generic=True,
1702 1703 )
1703 1704 coreconfigitem(
1704 1705 b'phases',
1705 1706 b'checksubrepos',
1706 1707 default=b'follow',
1707 1708 )
1708 1709 coreconfigitem(
1709 1710 b'phases',
1710 1711 b'new-commit',
1711 1712 default=b'draft',
1712 1713 )
1713 1714 coreconfigitem(
1714 1715 b'phases',
1715 1716 b'publish',
1716 1717 default=True,
1717 1718 )
1718 1719 coreconfigitem(
1719 1720 b'profiling',
1720 1721 b'enabled',
1721 1722 default=False,
1722 1723 )
1723 1724 coreconfigitem(
1724 1725 b'profiling',
1725 1726 b'format',
1726 1727 default=b'text',
1727 1728 )
1728 1729 coreconfigitem(
1729 1730 b'profiling',
1730 1731 b'freq',
1731 1732 default=1000,
1732 1733 )
1733 1734 coreconfigitem(
1734 1735 b'profiling',
1735 1736 b'limit',
1736 1737 default=30,
1737 1738 )
1738 1739 coreconfigitem(
1739 1740 b'profiling',
1740 1741 b'nested',
1741 1742 default=0,
1742 1743 )
1743 1744 coreconfigitem(
1744 1745 b'profiling',
1745 1746 b'output',
1746 1747 default=None,
1747 1748 )
1748 1749 coreconfigitem(
1749 1750 b'profiling',
1750 1751 b'showmax',
1751 1752 default=0.999,
1752 1753 )
1753 1754 coreconfigitem(
1754 1755 b'profiling',
1755 1756 b'showmin',
1756 1757 default=dynamicdefault,
1757 1758 )
1758 1759 coreconfigitem(
1759 1760 b'profiling',
1760 1761 b'showtime',
1761 1762 default=True,
1762 1763 )
1763 1764 coreconfigitem(
1764 1765 b'profiling',
1765 1766 b'sort',
1766 1767 default=b'inlinetime',
1767 1768 )
1768 1769 coreconfigitem(
1769 1770 b'profiling',
1770 1771 b'statformat',
1771 1772 default=b'hotpath',
1772 1773 )
1773 1774 coreconfigitem(
1774 1775 b'profiling',
1775 1776 b'time-track',
1776 1777 default=dynamicdefault,
1777 1778 )
1778 1779 coreconfigitem(
1779 1780 b'profiling',
1780 1781 b'type',
1781 1782 default=b'stat',
1782 1783 )
1783 1784 coreconfigitem(
1784 1785 b'progress',
1785 1786 b'assume-tty',
1786 1787 default=False,
1787 1788 )
1788 1789 coreconfigitem(
1789 1790 b'progress',
1790 1791 b'changedelay',
1791 1792 default=1,
1792 1793 )
1793 1794 coreconfigitem(
1794 1795 b'progress',
1795 1796 b'clear-complete',
1796 1797 default=True,
1797 1798 )
1798 1799 coreconfigitem(
1799 1800 b'progress',
1800 1801 b'debug',
1801 1802 default=False,
1802 1803 )
1803 1804 coreconfigitem(
1804 1805 b'progress',
1805 1806 b'delay',
1806 1807 default=3,
1807 1808 )
1808 1809 coreconfigitem(
1809 1810 b'progress',
1810 1811 b'disable',
1811 1812 default=False,
1812 1813 )
1813 1814 coreconfigitem(
1814 1815 b'progress',
1815 1816 b'estimateinterval',
1816 1817 default=60.0,
1817 1818 )
1818 1819 coreconfigitem(
1819 1820 b'progress',
1820 1821 b'format',
1821 1822 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1822 1823 )
1823 1824 coreconfigitem(
1824 1825 b'progress',
1825 1826 b'refresh',
1826 1827 default=0.1,
1827 1828 )
1828 1829 coreconfigitem(
1829 1830 b'progress',
1830 1831 b'width',
1831 1832 default=dynamicdefault,
1832 1833 )
1833 1834 coreconfigitem(
1834 1835 b'pull',
1835 1836 b'confirm',
1836 1837 default=False,
1837 1838 )
1838 1839 coreconfigitem(
1839 1840 b'push',
1840 1841 b'pushvars.server',
1841 1842 default=False,
1842 1843 )
1843 1844 coreconfigitem(
1844 1845 b'rewrite',
1845 1846 b'backup-bundle',
1846 1847 default=True,
1847 1848 alias=[(b'ui', b'history-editing-backup')],
1848 1849 )
1849 1850 coreconfigitem(
1850 1851 b'rewrite',
1851 1852 b'update-timestamp',
1852 1853 default=False,
1853 1854 )
1854 1855 coreconfigitem(
1855 1856 b'rewrite',
1856 1857 b'empty-successor',
1857 1858 default=b'skip',
1858 1859 experimental=True,
1859 1860 )
1860 1861 coreconfigitem(
1861 1862 b'storage',
1862 1863 b'new-repo-backend',
1863 1864 default=b'revlogv1',
1864 1865 experimental=True,
1865 1866 )
1866 1867 coreconfigitem(
1867 1868 b'storage',
1868 1869 b'revlog.optimize-delta-parent-choice',
1869 1870 default=True,
1870 1871 alias=[(b'format', b'aggressivemergedeltas')],
1871 1872 )
1872 1873 # experimental as long as rust is experimental (or a C version is implemented)
1873 1874 coreconfigitem(
1874 1875 b'storage',
1875 1876 b'revlog.persistent-nodemap.mmap',
1876 1877 default=True,
1877 1878 )
1878 1879 # experimental as long as format.use-persistent-nodemap is.
1879 1880 coreconfigitem(
1880 1881 b'storage',
1881 1882 b'revlog.persistent-nodemap.slow-path',
1882 1883 default=b"abort",
1883 1884 )
1884 1885
1885 1886 coreconfigitem(
1886 1887 b'storage',
1887 1888 b'revlog.reuse-external-delta',
1888 1889 default=True,
1889 1890 )
1890 1891 coreconfigitem(
1891 1892 b'storage',
1892 1893 b'revlog.reuse-external-delta-parent',
1893 1894 default=None,
1894 1895 )
1895 1896 coreconfigitem(
1896 1897 b'storage',
1897 1898 b'revlog.zlib.level',
1898 1899 default=None,
1899 1900 )
1900 1901 coreconfigitem(
1901 1902 b'storage',
1902 1903 b'revlog.zstd.level',
1903 1904 default=None,
1904 1905 )
1905 1906 coreconfigitem(
1906 1907 b'server',
1907 1908 b'bookmarks-pushkey-compat',
1908 1909 default=True,
1909 1910 )
1910 1911 coreconfigitem(
1911 1912 b'server',
1912 1913 b'bundle1',
1913 1914 default=True,
1914 1915 )
1915 1916 coreconfigitem(
1916 1917 b'server',
1917 1918 b'bundle1gd',
1918 1919 default=None,
1919 1920 )
1920 1921 coreconfigitem(
1921 1922 b'server',
1922 1923 b'bundle1.pull',
1923 1924 default=None,
1924 1925 )
1925 1926 coreconfigitem(
1926 1927 b'server',
1927 1928 b'bundle1gd.pull',
1928 1929 default=None,
1929 1930 )
1930 1931 coreconfigitem(
1931 1932 b'server',
1932 1933 b'bundle1.push',
1933 1934 default=None,
1934 1935 )
1935 1936 coreconfigitem(
1936 1937 b'server',
1937 1938 b'bundle1gd.push',
1938 1939 default=None,
1939 1940 )
1940 1941 coreconfigitem(
1941 1942 b'server',
1942 1943 b'bundle2.stream',
1943 1944 default=True,
1944 1945 alias=[(b'experimental', b'bundle2.stream')],
1945 1946 )
1946 1947 coreconfigitem(
1947 1948 b'server',
1948 1949 b'compressionengines',
1949 1950 default=list,
1950 1951 )
1951 1952 coreconfigitem(
1952 1953 b'server',
1953 1954 b'concurrent-push-mode',
1954 1955 default=b'check-related',
1955 1956 )
1956 1957 coreconfigitem(
1957 1958 b'server',
1958 1959 b'disablefullbundle',
1959 1960 default=False,
1960 1961 )
1961 1962 coreconfigitem(
1962 1963 b'server',
1963 1964 b'maxhttpheaderlen',
1964 1965 default=1024,
1965 1966 )
1966 1967 coreconfigitem(
1967 1968 b'server',
1968 1969 b'pullbundle',
1969 1970 default=False,
1970 1971 )
1971 1972 coreconfigitem(
1972 1973 b'server',
1973 1974 b'preferuncompressed',
1974 1975 default=False,
1975 1976 )
1976 1977 coreconfigitem(
1977 1978 b'server',
1978 1979 b'streamunbundle',
1979 1980 default=False,
1980 1981 )
1981 1982 coreconfigitem(
1982 1983 b'server',
1983 1984 b'uncompressed',
1984 1985 default=True,
1985 1986 )
1986 1987 coreconfigitem(
1987 1988 b'server',
1988 1989 b'uncompressedallowsecret',
1989 1990 default=False,
1990 1991 )
1991 1992 coreconfigitem(
1992 1993 b'server',
1993 1994 b'view',
1994 1995 default=b'served',
1995 1996 )
1996 1997 coreconfigitem(
1997 1998 b'server',
1998 1999 b'validate',
1999 2000 default=False,
2000 2001 )
2001 2002 coreconfigitem(
2002 2003 b'server',
2003 2004 b'zliblevel',
2004 2005 default=-1,
2005 2006 )
2006 2007 coreconfigitem(
2007 2008 b'server',
2008 2009 b'zstdlevel',
2009 2010 default=3,
2010 2011 )
2011 2012 coreconfigitem(
2012 2013 b'share',
2013 2014 b'pool',
2014 2015 default=None,
2015 2016 )
2016 2017 coreconfigitem(
2017 2018 b'share',
2018 2019 b'poolnaming',
2019 2020 default=b'identity',
2020 2021 )
2021 2022 coreconfigitem(
2022 2023 b'share',
2023 2024 b'safe-mismatch.source-not-safe',
2024 2025 default=b'abort',
2025 2026 )
2026 2027 coreconfigitem(
2027 2028 b'share',
2028 2029 b'safe-mismatch.source-safe',
2029 2030 default=b'abort',
2030 2031 )
2031 2032 coreconfigitem(
2032 2033 b'share',
2033 2034 b'safe-mismatch.source-not-safe.warn',
2034 2035 default=True,
2035 2036 )
2036 2037 coreconfigitem(
2037 2038 b'share',
2038 2039 b'safe-mismatch.source-safe.warn',
2039 2040 default=True,
2040 2041 )
2041 2042 coreconfigitem(
2042 2043 b'shelve',
2043 2044 b'maxbackups',
2044 2045 default=10,
2045 2046 )
2046 2047 coreconfigitem(
2047 2048 b'smtp',
2048 2049 b'host',
2049 2050 default=None,
2050 2051 )
2051 2052 coreconfigitem(
2052 2053 b'smtp',
2053 2054 b'local_hostname',
2054 2055 default=None,
2055 2056 )
2056 2057 coreconfigitem(
2057 2058 b'smtp',
2058 2059 b'password',
2059 2060 default=None,
2060 2061 )
2061 2062 coreconfigitem(
2062 2063 b'smtp',
2063 2064 b'port',
2064 2065 default=dynamicdefault,
2065 2066 )
2066 2067 coreconfigitem(
2067 2068 b'smtp',
2068 2069 b'tls',
2069 2070 default=b'none',
2070 2071 )
2071 2072 coreconfigitem(
2072 2073 b'smtp',
2073 2074 b'username',
2074 2075 default=None,
2075 2076 )
2076 2077 coreconfigitem(
2077 2078 b'sparse',
2078 2079 b'missingwarning',
2079 2080 default=True,
2080 2081 experimental=True,
2081 2082 )
2082 2083 coreconfigitem(
2083 2084 b'subrepos',
2084 2085 b'allowed',
2085 2086 default=dynamicdefault, # to make backporting simpler
2086 2087 )
2087 2088 coreconfigitem(
2088 2089 b'subrepos',
2089 2090 b'hg:allowed',
2090 2091 default=dynamicdefault,
2091 2092 )
2092 2093 coreconfigitem(
2093 2094 b'subrepos',
2094 2095 b'git:allowed',
2095 2096 default=dynamicdefault,
2096 2097 )
2097 2098 coreconfigitem(
2098 2099 b'subrepos',
2099 2100 b'svn:allowed',
2100 2101 default=dynamicdefault,
2101 2102 )
2102 2103 coreconfigitem(
2103 2104 b'templates',
2104 2105 b'.*',
2105 2106 default=None,
2106 2107 generic=True,
2107 2108 )
2108 2109 coreconfigitem(
2109 2110 b'templateconfig',
2110 2111 b'.*',
2111 2112 default=dynamicdefault,
2112 2113 generic=True,
2113 2114 )
2114 2115 coreconfigitem(
2115 2116 b'trusted',
2116 2117 b'groups',
2117 2118 default=list,
2118 2119 )
2119 2120 coreconfigitem(
2120 2121 b'trusted',
2121 2122 b'users',
2122 2123 default=list,
2123 2124 )
2124 2125 coreconfigitem(
2125 2126 b'ui',
2126 2127 b'_usedassubrepo',
2127 2128 default=False,
2128 2129 )
2129 2130 coreconfigitem(
2130 2131 b'ui',
2131 2132 b'allowemptycommit',
2132 2133 default=False,
2133 2134 )
2134 2135 coreconfigitem(
2135 2136 b'ui',
2136 2137 b'archivemeta',
2137 2138 default=True,
2138 2139 )
2139 2140 coreconfigitem(
2140 2141 b'ui',
2141 2142 b'askusername',
2142 2143 default=False,
2143 2144 )
2144 2145 coreconfigitem(
2145 2146 b'ui',
2146 2147 b'available-memory',
2147 2148 default=None,
2148 2149 )
2149 2150
2150 2151 coreconfigitem(
2151 2152 b'ui',
2152 2153 b'clonebundlefallback',
2153 2154 default=False,
2154 2155 )
2155 2156 coreconfigitem(
2156 2157 b'ui',
2157 2158 b'clonebundleprefers',
2158 2159 default=list,
2159 2160 )
2160 2161 coreconfigitem(
2161 2162 b'ui',
2162 2163 b'clonebundles',
2163 2164 default=True,
2164 2165 )
2165 2166 coreconfigitem(
2166 2167 b'ui',
2167 2168 b'color',
2168 2169 default=b'auto',
2169 2170 )
2170 2171 coreconfigitem(
2171 2172 b'ui',
2172 2173 b'commitsubrepos',
2173 2174 default=False,
2174 2175 )
2175 2176 coreconfigitem(
2176 2177 b'ui',
2177 2178 b'debug',
2178 2179 default=False,
2179 2180 )
2180 2181 coreconfigitem(
2181 2182 b'ui',
2182 2183 b'debugger',
2183 2184 default=None,
2184 2185 )
2185 2186 coreconfigitem(
2186 2187 b'ui',
2187 2188 b'editor',
2188 2189 default=dynamicdefault,
2189 2190 )
2190 2191 coreconfigitem(
2191 2192 b'ui',
2192 2193 b'detailed-exit-code',
2193 2194 default=False,
2194 2195 experimental=True,
2195 2196 )
2196 2197 coreconfigitem(
2197 2198 b'ui',
2198 2199 b'fallbackencoding',
2199 2200 default=None,
2200 2201 )
2201 2202 coreconfigitem(
2202 2203 b'ui',
2203 2204 b'forcecwd',
2204 2205 default=None,
2205 2206 )
2206 2207 coreconfigitem(
2207 2208 b'ui',
2208 2209 b'forcemerge',
2209 2210 default=None,
2210 2211 )
2211 2212 coreconfigitem(
2212 2213 b'ui',
2213 2214 b'formatdebug',
2214 2215 default=False,
2215 2216 )
2216 2217 coreconfigitem(
2217 2218 b'ui',
2218 2219 b'formatjson',
2219 2220 default=False,
2220 2221 )
2221 2222 coreconfigitem(
2222 2223 b'ui',
2223 2224 b'formatted',
2224 2225 default=None,
2225 2226 )
2226 2227 coreconfigitem(
2227 2228 b'ui',
2228 2229 b'interactive',
2229 2230 default=None,
2230 2231 )
2231 2232 coreconfigitem(
2232 2233 b'ui',
2233 2234 b'interface',
2234 2235 default=None,
2235 2236 )
2236 2237 coreconfigitem(
2237 2238 b'ui',
2238 2239 b'interface.chunkselector',
2239 2240 default=None,
2240 2241 )
2241 2242 coreconfigitem(
2242 2243 b'ui',
2243 2244 b'large-file-limit',
2244 2245 default=10000000,
2245 2246 )
2246 2247 coreconfigitem(
2247 2248 b'ui',
2248 2249 b'logblockedtimes',
2249 2250 default=False,
2250 2251 )
2251 2252 coreconfigitem(
2252 2253 b'ui',
2253 2254 b'merge',
2254 2255 default=None,
2255 2256 )
2256 2257 coreconfigitem(
2257 2258 b'ui',
2258 2259 b'mergemarkers',
2259 2260 default=b'basic',
2260 2261 )
2261 2262 coreconfigitem(
2262 2263 b'ui',
2263 2264 b'message-output',
2264 2265 default=b'stdio',
2265 2266 )
2266 2267 coreconfigitem(
2267 2268 b'ui',
2268 2269 b'nontty',
2269 2270 default=False,
2270 2271 )
2271 2272 coreconfigitem(
2272 2273 b'ui',
2273 2274 b'origbackuppath',
2274 2275 default=None,
2275 2276 )
2276 2277 coreconfigitem(
2277 2278 b'ui',
2278 2279 b'paginate',
2279 2280 default=True,
2280 2281 )
2281 2282 coreconfigitem(
2282 2283 b'ui',
2283 2284 b'patch',
2284 2285 default=None,
2285 2286 )
2286 2287 coreconfigitem(
2287 2288 b'ui',
2288 2289 b'portablefilenames',
2289 2290 default=b'warn',
2290 2291 )
2291 2292 coreconfigitem(
2292 2293 b'ui',
2293 2294 b'promptecho',
2294 2295 default=False,
2295 2296 )
2296 2297 coreconfigitem(
2297 2298 b'ui',
2298 2299 b'quiet',
2299 2300 default=False,
2300 2301 )
2301 2302 coreconfigitem(
2302 2303 b'ui',
2303 2304 b'quietbookmarkmove',
2304 2305 default=False,
2305 2306 )
2306 2307 coreconfigitem(
2307 2308 b'ui',
2308 2309 b'relative-paths',
2309 2310 default=b'legacy',
2310 2311 )
2311 2312 coreconfigitem(
2312 2313 b'ui',
2313 2314 b'remotecmd',
2314 2315 default=b'hg',
2315 2316 )
2316 2317 coreconfigitem(
2317 2318 b'ui',
2318 2319 b'report_untrusted',
2319 2320 default=True,
2320 2321 )
2321 2322 coreconfigitem(
2322 2323 b'ui',
2323 2324 b'rollback',
2324 2325 default=True,
2325 2326 )
2326 2327 coreconfigitem(
2327 2328 b'ui',
2328 2329 b'signal-safe-lock',
2329 2330 default=True,
2330 2331 )
2331 2332 coreconfigitem(
2332 2333 b'ui',
2333 2334 b'slash',
2334 2335 default=False,
2335 2336 )
2336 2337 coreconfigitem(
2337 2338 b'ui',
2338 2339 b'ssh',
2339 2340 default=b'ssh',
2340 2341 )
2341 2342 coreconfigitem(
2342 2343 b'ui',
2343 2344 b'ssherrorhint',
2344 2345 default=None,
2345 2346 )
2346 2347 coreconfigitem(
2347 2348 b'ui',
2348 2349 b'statuscopies',
2349 2350 default=False,
2350 2351 )
2351 2352 coreconfigitem(
2352 2353 b'ui',
2353 2354 b'strict',
2354 2355 default=False,
2355 2356 )
2356 2357 coreconfigitem(
2357 2358 b'ui',
2358 2359 b'style',
2359 2360 default=b'',
2360 2361 )
2361 2362 coreconfigitem(
2362 2363 b'ui',
2363 2364 b'supportcontact',
2364 2365 default=None,
2365 2366 )
2366 2367 coreconfigitem(
2367 2368 b'ui',
2368 2369 b'textwidth',
2369 2370 default=78,
2370 2371 )
2371 2372 coreconfigitem(
2372 2373 b'ui',
2373 2374 b'timeout',
2374 2375 default=b'600',
2375 2376 )
2376 2377 coreconfigitem(
2377 2378 b'ui',
2378 2379 b'timeout.warn',
2379 2380 default=0,
2380 2381 )
2381 2382 coreconfigitem(
2382 2383 b'ui',
2383 2384 b'timestamp-output',
2384 2385 default=False,
2385 2386 )
2386 2387 coreconfigitem(
2387 2388 b'ui',
2388 2389 b'traceback',
2389 2390 default=False,
2390 2391 )
2391 2392 coreconfigitem(
2392 2393 b'ui',
2393 2394 b'tweakdefaults',
2394 2395 default=False,
2395 2396 )
2396 2397 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2397 2398 coreconfigitem(
2398 2399 b'ui',
2399 2400 b'verbose',
2400 2401 default=False,
2401 2402 )
2402 2403 coreconfigitem(
2403 2404 b'verify',
2404 2405 b'skipflags',
2405 2406 default=None,
2406 2407 )
2407 2408 coreconfigitem(
2408 2409 b'web',
2409 2410 b'allowbz2',
2410 2411 default=False,
2411 2412 )
2412 2413 coreconfigitem(
2413 2414 b'web',
2414 2415 b'allowgz',
2415 2416 default=False,
2416 2417 )
2417 2418 coreconfigitem(
2418 2419 b'web',
2419 2420 b'allow-pull',
2420 2421 alias=[(b'web', b'allowpull')],
2421 2422 default=True,
2422 2423 )
2423 2424 coreconfigitem(
2424 2425 b'web',
2425 2426 b'allow-push',
2426 2427 alias=[(b'web', b'allow_push')],
2427 2428 default=list,
2428 2429 )
2429 2430 coreconfigitem(
2430 2431 b'web',
2431 2432 b'allowzip',
2432 2433 default=False,
2433 2434 )
2434 2435 coreconfigitem(
2435 2436 b'web',
2436 2437 b'archivesubrepos',
2437 2438 default=False,
2438 2439 )
2439 2440 coreconfigitem(
2440 2441 b'web',
2441 2442 b'cache',
2442 2443 default=True,
2443 2444 )
2444 2445 coreconfigitem(
2445 2446 b'web',
2446 2447 b'comparisoncontext',
2447 2448 default=5,
2448 2449 )
2449 2450 coreconfigitem(
2450 2451 b'web',
2451 2452 b'contact',
2452 2453 default=None,
2453 2454 )
2454 2455 coreconfigitem(
2455 2456 b'web',
2456 2457 b'deny_push',
2457 2458 default=list,
2458 2459 )
2459 2460 coreconfigitem(
2460 2461 b'web',
2461 2462 b'guessmime',
2462 2463 default=False,
2463 2464 )
2464 2465 coreconfigitem(
2465 2466 b'web',
2466 2467 b'hidden',
2467 2468 default=False,
2468 2469 )
2469 2470 coreconfigitem(
2470 2471 b'web',
2471 2472 b'labels',
2472 2473 default=list,
2473 2474 )
2474 2475 coreconfigitem(
2475 2476 b'web',
2476 2477 b'logoimg',
2477 2478 default=b'hglogo.png',
2478 2479 )
2479 2480 coreconfigitem(
2480 2481 b'web',
2481 2482 b'logourl',
2482 2483 default=b'https://mercurial-scm.org/',
2483 2484 )
2484 2485 coreconfigitem(
2485 2486 b'web',
2486 2487 b'accesslog',
2487 2488 default=b'-',
2488 2489 )
2489 2490 coreconfigitem(
2490 2491 b'web',
2491 2492 b'address',
2492 2493 default=b'',
2493 2494 )
2494 2495 coreconfigitem(
2495 2496 b'web',
2496 2497 b'allow-archive',
2497 2498 alias=[(b'web', b'allow_archive')],
2498 2499 default=list,
2499 2500 )
2500 2501 coreconfigitem(
2501 2502 b'web',
2502 2503 b'allow_read',
2503 2504 default=list,
2504 2505 )
2505 2506 coreconfigitem(
2506 2507 b'web',
2507 2508 b'baseurl',
2508 2509 default=None,
2509 2510 )
2510 2511 coreconfigitem(
2511 2512 b'web',
2512 2513 b'cacerts',
2513 2514 default=None,
2514 2515 )
2515 2516 coreconfigitem(
2516 2517 b'web',
2517 2518 b'certificate',
2518 2519 default=None,
2519 2520 )
2520 2521 coreconfigitem(
2521 2522 b'web',
2522 2523 b'collapse',
2523 2524 default=False,
2524 2525 )
2525 2526 coreconfigitem(
2526 2527 b'web',
2527 2528 b'csp',
2528 2529 default=None,
2529 2530 )
2530 2531 coreconfigitem(
2531 2532 b'web',
2532 2533 b'deny_read',
2533 2534 default=list,
2534 2535 )
2535 2536 coreconfigitem(
2536 2537 b'web',
2537 2538 b'descend',
2538 2539 default=True,
2539 2540 )
2540 2541 coreconfigitem(
2541 2542 b'web',
2542 2543 b'description',
2543 2544 default=b"",
2544 2545 )
2545 2546 coreconfigitem(
2546 2547 b'web',
2547 2548 b'encoding',
2548 2549 default=lambda: encoding.encoding,
2549 2550 )
2550 2551 coreconfigitem(
2551 2552 b'web',
2552 2553 b'errorlog',
2553 2554 default=b'-',
2554 2555 )
2555 2556 coreconfigitem(
2556 2557 b'web',
2557 2558 b'ipv6',
2558 2559 default=False,
2559 2560 )
2560 2561 coreconfigitem(
2561 2562 b'web',
2562 2563 b'maxchanges',
2563 2564 default=10,
2564 2565 )
2565 2566 coreconfigitem(
2566 2567 b'web',
2567 2568 b'maxfiles',
2568 2569 default=10,
2569 2570 )
2570 2571 coreconfigitem(
2571 2572 b'web',
2572 2573 b'maxshortchanges',
2573 2574 default=60,
2574 2575 )
2575 2576 coreconfigitem(
2576 2577 b'web',
2577 2578 b'motd',
2578 2579 default=b'',
2579 2580 )
2580 2581 coreconfigitem(
2581 2582 b'web',
2582 2583 b'name',
2583 2584 default=dynamicdefault,
2584 2585 )
2585 2586 coreconfigitem(
2586 2587 b'web',
2587 2588 b'port',
2588 2589 default=8000,
2589 2590 )
2590 2591 coreconfigitem(
2591 2592 b'web',
2592 2593 b'prefix',
2593 2594 default=b'',
2594 2595 )
2595 2596 coreconfigitem(
2596 2597 b'web',
2597 2598 b'push_ssl',
2598 2599 default=True,
2599 2600 )
2600 2601 coreconfigitem(
2601 2602 b'web',
2602 2603 b'refreshinterval',
2603 2604 default=20,
2604 2605 )
2605 2606 coreconfigitem(
2606 2607 b'web',
2607 2608 b'server-header',
2608 2609 default=None,
2609 2610 )
2610 2611 coreconfigitem(
2611 2612 b'web',
2612 2613 b'static',
2613 2614 default=None,
2614 2615 )
2615 2616 coreconfigitem(
2616 2617 b'web',
2617 2618 b'staticurl',
2618 2619 default=None,
2619 2620 )
2620 2621 coreconfigitem(
2621 2622 b'web',
2622 2623 b'stripes',
2623 2624 default=1,
2624 2625 )
2625 2626 coreconfigitem(
2626 2627 b'web',
2627 2628 b'style',
2628 2629 default=b'paper',
2629 2630 )
2630 2631 coreconfigitem(
2631 2632 b'web',
2632 2633 b'templates',
2633 2634 default=None,
2634 2635 )
2635 2636 coreconfigitem(
2636 2637 b'web',
2637 2638 b'view',
2638 2639 default=b'served',
2639 2640 experimental=True,
2640 2641 )
2641 2642 coreconfigitem(
2642 2643 b'worker',
2643 2644 b'backgroundclose',
2644 2645 default=dynamicdefault,
2645 2646 )
2646 2647 # Windows defaults to a limit of 512 open files. A buffer of 128
2647 2648 # should give us enough headway.
2648 2649 coreconfigitem(
2649 2650 b'worker',
2650 2651 b'backgroundclosemaxqueue',
2651 2652 default=384,
2652 2653 )
2653 2654 coreconfigitem(
2654 2655 b'worker',
2655 2656 b'backgroundcloseminfilecount',
2656 2657 default=2048,
2657 2658 )
2658 2659 coreconfigitem(
2659 2660 b'worker',
2660 2661 b'backgroundclosethreadcount',
2661 2662 default=4,
2662 2663 )
2663 2664 coreconfigitem(
2664 2665 b'worker',
2665 2666 b'enabled',
2666 2667 default=True,
2667 2668 )
2668 2669 coreconfigitem(
2669 2670 b'worker',
2670 2671 b'numcpus',
2671 2672 default=None,
2672 2673 )
2673 2674
2674 2675 # Rebase related configuration moved to core because other extension are doing
2675 2676 # strange things. For example, shelve import the extensions to reuse some bit
2676 2677 # without formally loading it.
2677 2678 coreconfigitem(
2678 2679 b'commands',
2679 2680 b'rebase.requiredest',
2680 2681 default=False,
2681 2682 )
2682 2683 coreconfigitem(
2683 2684 b'experimental',
2684 2685 b'rebaseskipobsolete',
2685 2686 default=True,
2686 2687 )
2687 2688 coreconfigitem(
2688 2689 b'rebase',
2689 2690 b'singletransaction',
2690 2691 default=False,
2691 2692 )
2692 2693 coreconfigitem(
2693 2694 b'rebase',
2694 2695 b'experimental.inmemory',
2695 2696 default=False,
2696 2697 )
@@ -1,3360 +1,3364
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 COMP_MODE_INLINE,
39 39 COMP_MODE_PLAIN,
40 40 FEATURES_BY_VERSION,
41 41 FLAG_GENERALDELTA,
42 42 FLAG_INLINE_DATA,
43 43 INDEX_HEADER,
44 44 REVLOGV0,
45 45 REVLOGV1,
46 46 REVLOGV1_FLAGS,
47 47 REVLOGV2,
48 48 REVLOGV2_FLAGS,
49 49 REVLOG_DEFAULT_FLAGS,
50 50 REVLOG_DEFAULT_FORMAT,
51 51 REVLOG_DEFAULT_VERSION,
52 52 SUPPORTED_FLAGS,
53 53 )
54 54 from .revlogutils.flagutil import (
55 55 REVIDX_DEFAULT_FLAGS,
56 56 REVIDX_ELLIPSIS,
57 57 REVIDX_EXTSTORED,
58 58 REVIDX_FLAGS_ORDER,
59 59 REVIDX_HASCOPIESINFO,
60 60 REVIDX_ISCENSORED,
61 61 REVIDX_RAWTEXT_CHANGING_FLAGS,
62 62 )
63 63 from .thirdparty import attr
64 64 from . import (
65 65 ancestor,
66 66 dagop,
67 67 error,
68 68 mdiff,
69 69 policy,
70 70 pycompat,
71 71 templatefilters,
72 72 util,
73 73 )
74 74 from .interfaces import (
75 75 repository,
76 76 util as interfaceutil,
77 77 )
78 78 from .revlogutils import (
79 79 deltas as deltautil,
80 80 docket as docketutil,
81 81 flagutil,
82 82 nodemap as nodemaputil,
83 83 revlogv0,
84 84 sidedata as sidedatautil,
85 85 )
86 86 from .utils import (
87 87 storageutil,
88 88 stringutil,
89 89 )
90 90
91 91 # blanked usage of all the name to prevent pyflakes constraints
92 92 # We need these name available in the module for extensions.
93 93
94 94 REVLOGV0
95 95 REVLOGV1
96 96 REVLOGV2
97 97 FLAG_INLINE_DATA
98 98 FLAG_GENERALDELTA
99 99 REVLOG_DEFAULT_FLAGS
100 100 REVLOG_DEFAULT_FORMAT
101 101 REVLOG_DEFAULT_VERSION
102 102 REVLOGV1_FLAGS
103 103 REVLOGV2_FLAGS
104 104 REVIDX_ISCENSORED
105 105 REVIDX_ELLIPSIS
106 106 REVIDX_HASCOPIESINFO
107 107 REVIDX_EXTSTORED
108 108 REVIDX_DEFAULT_FLAGS
109 109 REVIDX_FLAGS_ORDER
110 110 REVIDX_RAWTEXT_CHANGING_FLAGS
111 111
112 112 parsers = policy.importmod('parsers')
113 113 rustancestor = policy.importrust('ancestor')
114 114 rustdagop = policy.importrust('dagop')
115 115 rustrevlog = policy.importrust('revlog')
116 116
117 117 # Aliased for performance.
118 118 _zlibdecompress = zlib.decompress
119 119
120 120 # max size of revlog with inline data
121 121 _maxinline = 131072
122 122 _chunksize = 1048576
123 123
124 124 # Flag processors for REVIDX_ELLIPSIS.
125 125 def ellipsisreadprocessor(rl, text):
126 126 return text, False
127 127
128 128
129 129 def ellipsiswriteprocessor(rl, text):
130 130 return text, False
131 131
132 132
133 133 def ellipsisrawprocessor(rl, text):
134 134 return False
135 135
136 136
137 137 ellipsisprocessor = (
138 138 ellipsisreadprocessor,
139 139 ellipsiswriteprocessor,
140 140 ellipsisrawprocessor,
141 141 )
142 142
143 143
144 144 def offset_type(offset, type):
145 145 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
146 146 raise ValueError(b'unknown revlog index flags')
147 147 return int(int(offset) << 16 | type)
148 148
149 149
150 150 def _verify_revision(rl, skipflags, state, node):
151 151 """Verify the integrity of the given revlog ``node`` while providing a hook
152 152 point for extensions to influence the operation."""
153 153 if skipflags:
154 154 state[b'skipread'].add(node)
155 155 else:
156 156 # Side-effect: read content and verify hash.
157 157 rl.revision(node)
158 158
159 159
160 160 # True if a fast implementation for persistent-nodemap is available
161 161 #
162 162 # We also consider we have a "fast" implementation in "pure" python because
163 163 # people using pure don't really have performance consideration (and a
164 164 # wheelbarrow of other slowness source)
165 165 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
166 166 parsers, 'BaseIndexObject'
167 167 )
168 168
169 169
170 170 @attr.s(slots=True, frozen=True)
171 171 class _revisioninfo(object):
172 172 """Information about a revision that allows building its fulltext
173 173 node: expected hash of the revision
174 174 p1, p2: parent revs of the revision
175 175 btext: built text cache consisting of a one-element list
176 176 cachedelta: (baserev, uncompressed_delta) or None
177 177 flags: flags associated to the revision storage
178 178
179 179 One of btext[0] or cachedelta must be set.
180 180 """
181 181
182 182 node = attr.ib()
183 183 p1 = attr.ib()
184 184 p2 = attr.ib()
185 185 btext = attr.ib()
186 186 textlen = attr.ib()
187 187 cachedelta = attr.ib()
188 188 flags = attr.ib()
189 189
190 190
191 191 @interfaceutil.implementer(repository.irevisiondelta)
192 192 @attr.s(slots=True)
193 193 class revlogrevisiondelta(object):
194 194 node = attr.ib()
195 195 p1node = attr.ib()
196 196 p2node = attr.ib()
197 197 basenode = attr.ib()
198 198 flags = attr.ib()
199 199 baserevisionsize = attr.ib()
200 200 revision = attr.ib()
201 201 delta = attr.ib()
202 202 sidedata = attr.ib()
203 203 protocol_flags = attr.ib()
204 204 linknode = attr.ib(default=None)
205 205
206 206
207 207 @interfaceutil.implementer(repository.iverifyproblem)
208 208 @attr.s(frozen=True)
209 209 class revlogproblem(object):
210 210 warning = attr.ib(default=None)
211 211 error = attr.ib(default=None)
212 212 node = attr.ib(default=None)
213 213
214 214
215 215 def parse_index_v1(data, inline):
216 216 # call the C implementation to parse the index data
217 217 index, cache = parsers.parse_index2(data, inline)
218 218 return index, cache
219 219
220 220
221 221 def parse_index_v2(data, inline):
222 222 # call the C implementation to parse the index data
223 223 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
224 224 return index, cache
225 225
226 226
227 227 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
228 228
229 229 def parse_index_v1_nodemap(data, inline):
230 230 index, cache = parsers.parse_index_devel_nodemap(data, inline)
231 231 return index, cache
232 232
233 233
234 234 else:
235 235 parse_index_v1_nodemap = None
236 236
237 237
238 238 def parse_index_v1_mixed(data, inline):
239 239 index, cache = parse_index_v1(data, inline)
240 240 return rustrevlog.MixedIndex(index), cache
241 241
242 242
243 243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
244 244 # signed integer)
245 245 _maxentrysize = 0x7FFFFFFF
246 246
247 247
248 248 class revlog(object):
249 249 """
250 250 the underlying revision storage object
251 251
252 252 A revlog consists of two parts, an index and the revision data.
253 253
254 254 The index is a file with a fixed record size containing
255 255 information on each revision, including its nodeid (hash), the
256 256 nodeids of its parents, the position and offset of its data within
257 257 the data file, and the revision it's based on. Finally, each entry
258 258 contains a linkrev entry that can serve as a pointer to external
259 259 data.
260 260
261 261 The revision data itself is a linear collection of data chunks.
262 262 Each chunk represents a revision and is usually represented as a
263 263 delta against the previous chunk. To bound lookup time, runs of
264 264 deltas are limited to about 2 times the length of the original
265 265 version data. This makes retrieval of a version proportional to
266 266 its size, or O(1) relative to the number of revisions.
267 267
268 268 Both pieces of the revlog are written to in an append-only
269 269 fashion, which means we never need to rewrite a file to insert or
270 270 remove data, and can use some simple techniques to avoid the need
271 271 for locking while reading.
272 272
273 273 If checkambig, indexfile is opened with checkambig=True at
274 274 writing, to avoid file stat ambiguity.
275 275
276 276 If mmaplargeindex is True, and an mmapindexthreshold is set, the
277 277 index will be mmapped rather than read if it is larger than the
278 278 configured threshold.
279 279
280 280 If censorable is True, the revlog can have censored revisions.
281 281
282 282 If `upperboundcomp` is not None, this is the expected maximal gain from
283 283 compression for the data content.
284 284
285 285 `concurrencychecker` is an optional function that receives 3 arguments: a
286 286 file handle, a filename, and an expected position. It should check whether
287 287 the current position in the file handle is valid, and log/warn/fail (by
288 288 raising).
289 289
290 290
291 291 Internal details
292 292 ----------------
293 293
294 294 A large part of the revlog logic deals with revisions' "index entries", tuple
295 295 objects that contains the same "items" whatever the revlog version.
296 296 Different versions will have different ways of storing these items (sometimes
297 297 not having them at all), but the tuple will always be the same. New fields
298 298 are usually added at the end to avoid breaking existing code that relies
299 299 on the existing order. The field are defined as follows:
300 300
301 301 [0] offset:
302 302 The byte index of the start of revision data chunk.
303 303 That value is shifted up by 16 bits. use "offset = field >> 16" to
304 304 retrieve it.
305 305
306 306 flags:
307 307 A flag field that carries special information or changes the behavior
308 308 of the revision. (see `REVIDX_*` constants for details)
309 309 The flag field only occupies the first 16 bits of this field,
310 310 use "flags = field & 0xFFFF" to retrieve the value.
311 311
312 312 [1] compressed length:
313 313 The size, in bytes, of the chunk on disk
314 314
315 315 [2] uncompressed length:
316 316 The size, in bytes, of the full revision once reconstructed.
317 317
318 318 [3] base rev:
319 319 Either the base of the revision delta chain (without general
320 320 delta), or the base of the delta (stored in the data chunk)
321 321 with general delta.
322 322
323 323 [4] link rev:
324 324 Changelog revision number of the changeset introducing this
325 325 revision.
326 326
327 327 [5] parent 1 rev:
328 328 Revision number of the first parent
329 329
330 330 [6] parent 2 rev:
331 331 Revision number of the second parent
332 332
333 333 [7] node id:
334 334 The node id of the current revision
335 335
336 336 [8] sidedata offset:
337 337 The byte index of the start of the revision's side-data chunk.
338 338
339 339 [9] sidedata chunk length:
340 340 The size, in bytes, of the revision's side-data chunk.
341 341
342 342 [10] data compression mode:
343 343 two bits that detail the way the data chunk is compressed on disk.
344 344 (see "COMP_MODE_*" constants for details). For revlog version 0 and
345 345 1 this will always be COMP_MODE_INLINE.
346 346
347 347 """
348 348
349 349 _flagserrorclass = error.RevlogError
350 350
351 351 def __init__(
352 352 self,
353 353 opener,
354 354 target,
355 355 radix,
356 356 postfix=None, # only exist for `tmpcensored` now
357 357 checkambig=False,
358 358 mmaplargeindex=False,
359 359 censorable=False,
360 360 upperboundcomp=None,
361 361 persistentnodemap=False,
362 362 concurrencychecker=None,
363 363 trypending=False,
364 364 ):
365 365 """
366 366 create a revlog object
367 367
368 368 opener is a function that abstracts the file opening operation
369 369 and can be used to implement COW semantics or the like.
370 370
371 371 `target`: a (KIND, ID) tuple that identify the content stored in
372 372 this revlog. It help the rest of the code to understand what the revlog
373 373 is about without having to resort to heuristic and index filename
374 374 analysis. Note: that this must be reliably be set by normal code, but
375 375 that test, debug, or performance measurement code might not set this to
376 376 accurate value.
377 377 """
378 378 self.upperboundcomp = upperboundcomp
379 379
380 380 self.radix = radix
381 381
382 382 self._docket_file = None
383 383 self._indexfile = None
384 384 self._datafile = None
385 385 self._nodemap_file = None
386 386 self.postfix = postfix
387 387 self._trypending = trypending
388 388 self.opener = opener
389 389 if persistentnodemap:
390 390 self._nodemap_file = nodemaputil.get_nodemap_file(self)
391 391
392 392 assert target[0] in ALL_KINDS
393 393 assert len(target) == 2
394 394 self.target = target
395 395 # When True, indexfile is opened with checkambig=True at writing, to
396 396 # avoid file stat ambiguity.
397 397 self._checkambig = checkambig
398 398 self._mmaplargeindex = mmaplargeindex
399 399 self._censorable = censorable
400 400 # 3-tuple of (node, rev, text) for a raw revision.
401 401 self._revisioncache = None
402 402 # Maps rev to chain base rev.
403 403 self._chainbasecache = util.lrucachedict(100)
404 404 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
405 405 self._chunkcache = (0, b'')
406 406 # How much data to read and cache into the raw revlog data cache.
407 407 self._chunkcachesize = 65536
408 408 self._maxchainlen = None
409 409 self._deltabothparents = True
410 410 self.index = None
411 411 self._docket = None
412 412 self._nodemap_docket = None
413 413 # Mapping of partial identifiers to full nodes.
414 414 self._pcache = {}
415 415 # Mapping of revision integer to full node.
416 416 self._compengine = b'zlib'
417 417 self._compengineopts = {}
418 418 self._maxdeltachainspan = -1
419 419 self._withsparseread = False
420 420 self._sparserevlog = False
421 421 self.hassidedata = False
422 422 self._srdensitythreshold = 0.50
423 423 self._srmingapsize = 262144
424 424
425 425 # Make copy of flag processors so each revlog instance can support
426 426 # custom flags.
427 427 self._flagprocessors = dict(flagutil.flagprocessors)
428 428
429 429 # 2-tuple of file handles being used for active writing.
430 430 self._writinghandles = None
431 431 # prevent nesting of addgroup
432 432 self._adding_group = None
433 433
434 434 self._loadindex()
435 435
436 436 self._concurrencychecker = concurrencychecker
437 437
438 438 def _init_opts(self):
439 439 """process options (from above/config) to setup associated default revlog mode
440 440
441 441 These values might be affected when actually reading on disk information.
442 442
443 443 The relevant values are returned for use in _loadindex().
444 444
445 445 * newversionflags:
446 446 version header to use if we need to create a new revlog
447 447
448 448 * mmapindexthreshold:
449 449 minimal index size for start to use mmap
450 450
451 451 * force_nodemap:
452 452 force the usage of a "development" version of the nodemap code
453 453 """
454 454 mmapindexthreshold = None
455 455 opts = self.opener.options
456 456
457 457 if b'revlogv2' in opts:
458 458 new_header = REVLOGV2 | FLAG_INLINE_DATA
459 459 elif b'revlogv1' in opts:
460 460 new_header = REVLOGV1 | FLAG_INLINE_DATA
461 461 if b'generaldelta' in opts:
462 462 new_header |= FLAG_GENERALDELTA
463 463 elif b'revlogv0' in self.opener.options:
464 464 new_header = REVLOGV0
465 465 else:
466 466 new_header = REVLOG_DEFAULT_VERSION
467 467
468 468 if b'chunkcachesize' in opts:
469 469 self._chunkcachesize = opts[b'chunkcachesize']
470 470 if b'maxchainlen' in opts:
471 471 self._maxchainlen = opts[b'maxchainlen']
472 472 if b'deltabothparents' in opts:
473 473 self._deltabothparents = opts[b'deltabothparents']
474 474 self._lazydelta = bool(opts.get(b'lazydelta', True))
475 475 self._lazydeltabase = False
476 476 if self._lazydelta:
477 477 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
478 478 if b'compengine' in opts:
479 479 self._compengine = opts[b'compengine']
480 480 if b'zlib.level' in opts:
481 481 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
482 482 if b'zstd.level' in opts:
483 483 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
484 484 if b'maxdeltachainspan' in opts:
485 485 self._maxdeltachainspan = opts[b'maxdeltachainspan']
486 486 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
487 487 mmapindexthreshold = opts[b'mmapindexthreshold']
488 488 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
489 489 withsparseread = bool(opts.get(b'with-sparse-read', False))
490 490 # sparse-revlog forces sparse-read
491 491 self._withsparseread = self._sparserevlog or withsparseread
492 492 if b'sparse-read-density-threshold' in opts:
493 493 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
494 494 if b'sparse-read-min-gap-size' in opts:
495 495 self._srmingapsize = opts[b'sparse-read-min-gap-size']
496 496 if opts.get(b'enableellipsis'):
497 497 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
498 498
499 499 # revlog v0 doesn't have flag processors
500 500 for flag, processor in pycompat.iteritems(
501 501 opts.get(b'flagprocessors', {})
502 502 ):
503 503 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
504 504
505 505 if self._chunkcachesize <= 0:
506 506 raise error.RevlogError(
507 507 _(b'revlog chunk cache size %r is not greater than 0')
508 508 % self._chunkcachesize
509 509 )
510 510 elif self._chunkcachesize & (self._chunkcachesize - 1):
511 511 raise error.RevlogError(
512 512 _(b'revlog chunk cache size %r is not a power of 2')
513 513 % self._chunkcachesize
514 514 )
515 515 force_nodemap = opts.get(b'devel-force-nodemap', False)
516 516 return new_header, mmapindexthreshold, force_nodemap
517 517
518 518 def _get_data(self, filepath, mmap_threshold, size=None):
519 519 """return a file content with or without mmap
520 520
521 521 If the file is missing return the empty string"""
522 522 try:
523 523 with self.opener(filepath) as fp:
524 524 if mmap_threshold is not None:
525 525 file_size = self.opener.fstat(fp).st_size
526 526 if file_size >= mmap_threshold:
527 527 if size is not None:
528 528 # avoid potentiel mmap crash
529 529 size = min(file_size, size)
530 530 # TODO: should .close() to release resources without
531 531 # relying on Python GC
532 532 if size is None:
533 533 return util.buffer(util.mmapread(fp))
534 534 else:
535 535 return util.buffer(util.mmapread(fp, size))
536 536 if size is None:
537 537 return fp.read()
538 538 else:
539 539 return fp.read(size)
540 540 except IOError as inst:
541 541 if inst.errno != errno.ENOENT:
542 542 raise
543 543 return b''
544 544
545 545 def _loadindex(self):
546 546
547 547 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
548 548
549 549 if self.postfix is not None:
550 550 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
551 551 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
552 552 entry_point = b'%s.i.a' % self.radix
553 553 else:
554 554 entry_point = b'%s.i' % self.radix
555 555
556 556 entry_data = b''
557 557 self._initempty = True
558 558 entry_data = self._get_data(entry_point, mmapindexthreshold)
559 559 if len(entry_data) > 0:
560 560 header = INDEX_HEADER.unpack(entry_data[:4])[0]
561 561 self._initempty = False
562 562 else:
563 563 header = new_header
564 564
565 565 self._format_flags = header & ~0xFFFF
566 566 self._format_version = header & 0xFFFF
567 567
568 568 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
569 569 if supported_flags is None:
570 570 msg = _(b'unknown version (%d) in revlog %s')
571 571 msg %= (self._format_version, self.display_id)
572 572 raise error.RevlogError(msg)
573 573 elif self._format_flags & ~supported_flags:
574 574 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
575 575 display_flag = self._format_flags >> 16
576 576 msg %= (display_flag, self._format_version, self.display_id)
577 577 raise error.RevlogError(msg)
578 578
579 579 features = FEATURES_BY_VERSION[self._format_version]
580 580 self._inline = features[b'inline'](self._format_flags)
581 581 self._generaldelta = features[b'generaldelta'](self._format_flags)
582 582 self.hassidedata = features[b'sidedata']
583 583
584 584 if not features[b'docket']:
585 585 self._indexfile = entry_point
586 586 index_data = entry_data
587 587 else:
588 588 self._docket_file = entry_point
589 589 if self._initempty:
590 590 self._docket = docketutil.default_docket(self, header)
591 591 else:
592 592 self._docket = docketutil.parse_docket(
593 593 self, entry_data, use_pending=self._trypending
594 594 )
595 595 self._indexfile = self._docket.index_filepath()
596 596 index_data = b''
597 597 index_size = self._docket.index_end
598 598 if index_size > 0:
599 599 index_data = self._get_data(
600 600 self._indexfile, mmapindexthreshold, size=index_size
601 601 )
602 602 if len(index_data) < index_size:
603 603 msg = _(b'too few index data for %s: got %d, expected %d')
604 604 msg %= (self.display_id, len(index_data), index_size)
605 605 raise error.RevlogError(msg)
606 606
607 607 self._inline = False
608 608 # generaldelta implied by version 2 revlogs.
609 609 self._generaldelta = True
610 610 # the logic for persistent nodemap will be dealt with within the
611 611 # main docket, so disable it for now.
612 612 self._nodemap_file = None
613 613
614 614 if self.postfix is None:
615 615 self._datafile = b'%s.d' % self.radix
616 616 else:
617 617 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
618 618
619 619 self.nodeconstants = sha1nodeconstants
620 620 self.nullid = self.nodeconstants.nullid
621 621
622 622 # sparse-revlog can't be on without general-delta (issue6056)
623 623 if not self._generaldelta:
624 624 self._sparserevlog = False
625 625
626 626 self._storedeltachains = True
627 627
628 628 devel_nodemap = (
629 629 self._nodemap_file
630 630 and force_nodemap
631 631 and parse_index_v1_nodemap is not None
632 632 )
633 633
634 634 use_rust_index = False
635 635 if rustrevlog is not None:
636 636 if self._nodemap_file is not None:
637 637 use_rust_index = True
638 638 else:
639 639 use_rust_index = self.opener.options.get(b'rust.index')
640 640
641 641 self._parse_index = parse_index_v1
642 642 if self._format_version == REVLOGV0:
643 643 self._parse_index = revlogv0.parse_index_v0
644 644 elif self._format_version == REVLOGV2:
645 645 self._parse_index = parse_index_v2
646 646 elif devel_nodemap:
647 647 self._parse_index = parse_index_v1_nodemap
648 648 elif use_rust_index:
649 649 self._parse_index = parse_index_v1_mixed
650 650 try:
651 651 d = self._parse_index(index_data, self._inline)
652 652 index, _chunkcache = d
653 653 use_nodemap = (
654 654 not self._inline
655 655 and self._nodemap_file is not None
656 656 and util.safehasattr(index, 'update_nodemap_data')
657 657 )
658 658 if use_nodemap:
659 659 nodemap_data = nodemaputil.persisted_data(self)
660 660 if nodemap_data is not None:
661 661 docket = nodemap_data[0]
662 662 if (
663 663 len(d[0]) > docket.tip_rev
664 664 and d[0][docket.tip_rev][7] == docket.tip_node
665 665 ):
666 666 # no changelog tampering
667 667 self._nodemap_docket = docket
668 668 index.update_nodemap_data(*nodemap_data)
669 669 except (ValueError, IndexError):
670 670 raise error.RevlogError(
671 671 _(b"index %s is corrupted") % self.display_id
672 672 )
673 673 self.index, self._chunkcache = d
674 674 if not self._chunkcache:
675 675 self._chunkclear()
676 676 # revnum -> (chain-length, sum-delta-length)
677 677 self._chaininfocache = util.lrucachedict(500)
678 678 # revlog header -> revlog compressor
679 679 self._decompressors = {}
680 680
681 681 @util.propertycache
682 682 def revlog_kind(self):
683 683 return self.target[0]
684 684
685 685 @util.propertycache
686 686 def display_id(self):
687 687 """The public facing "ID" of the revlog that we use in message"""
688 688 # Maybe we should build a user facing representation of
689 689 # revlog.target instead of using `self.radix`
690 690 return self.radix
691 691
692 def _get_decompressor(self, t):
693 try:
694 compressor = self._decompressors[t]
695 except KeyError:
696 try:
697 engine = util.compengines.forrevlogheader(t)
698 compressor = engine.revlogcompressor(self._compengineopts)
699 self._decompressors[t] = compressor
700 except KeyError:
701 raise error.RevlogError(
702 _(b'unknown compression type %s') % binascii.hexlify(t)
703 )
704 return compressor
705
692 706 @util.propertycache
693 707 def _compressor(self):
694 708 engine = util.compengines[self._compengine]
695 709 return engine.revlogcompressor(self._compengineopts)
696 710
697 711 def _indexfp(self):
698 712 """file object for the revlog's index file"""
699 713 return self.opener(self._indexfile, mode=b"r")
700 714
701 715 def __index_write_fp(self):
702 716 # You should not use this directly and use `_writing` instead
703 717 try:
704 718 f = self.opener(
705 719 self._indexfile, mode=b"r+", checkambig=self._checkambig
706 720 )
707 721 if self._docket is None:
708 722 f.seek(0, os.SEEK_END)
709 723 else:
710 724 f.seek(self._docket.index_end, os.SEEK_SET)
711 725 return f
712 726 except IOError as inst:
713 727 if inst.errno != errno.ENOENT:
714 728 raise
715 729 return self.opener(
716 730 self._indexfile, mode=b"w+", checkambig=self._checkambig
717 731 )
718 732
719 733 def __index_new_fp(self):
720 734 # You should not use this unless you are upgrading from inline revlog
721 735 return self.opener(
722 736 self._indexfile,
723 737 mode=b"w",
724 738 checkambig=self._checkambig,
725 739 atomictemp=True,
726 740 )
727 741
728 742 def _datafp(self, mode=b'r'):
729 743 """file object for the revlog's data file"""
730 744 return self.opener(self._datafile, mode=mode)
731 745
732 746 @contextlib.contextmanager
733 747 def _datareadfp(self, existingfp=None):
734 748 """file object suitable to read data"""
735 749 # Use explicit file handle, if given.
736 750 if existingfp is not None:
737 751 yield existingfp
738 752
739 753 # Use a file handle being actively used for writes, if available.
740 754 # There is some danger to doing this because reads will seek the
741 755 # file. However, _writeentry() performs a SEEK_END before all writes,
742 756 # so we should be safe.
743 757 elif self._writinghandles:
744 758 if self._inline:
745 759 yield self._writinghandles[0]
746 760 else:
747 761 yield self._writinghandles[1]
748 762
749 763 # Otherwise open a new file handle.
750 764 else:
751 765 if self._inline:
752 766 func = self._indexfp
753 767 else:
754 768 func = self._datafp
755 769 with func() as fp:
756 770 yield fp
757 771
758 772 def tiprev(self):
759 773 return len(self.index) - 1
760 774
761 775 def tip(self):
762 776 return self.node(self.tiprev())
763 777
764 778 def __contains__(self, rev):
765 779 return 0 <= rev < len(self)
766 780
767 781 def __len__(self):
768 782 return len(self.index)
769 783
770 784 def __iter__(self):
771 785 return iter(pycompat.xrange(len(self)))
772 786
773 787 def revs(self, start=0, stop=None):
774 788 """iterate over all rev in this revlog (from start to stop)"""
775 789 return storageutil.iterrevs(len(self), start=start, stop=stop)
776 790
777 791 @property
778 792 def nodemap(self):
779 793 msg = (
780 794 b"revlog.nodemap is deprecated, "
781 795 b"use revlog.index.[has_node|rev|get_rev]"
782 796 )
783 797 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
784 798 return self.index.nodemap
785 799
786 800 @property
787 801 def _nodecache(self):
788 802 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
789 803 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
790 804 return self.index.nodemap
791 805
792 806 def hasnode(self, node):
793 807 try:
794 808 self.rev(node)
795 809 return True
796 810 except KeyError:
797 811 return False
798 812
799 813 def candelta(self, baserev, rev):
800 814 """whether two revisions (baserev, rev) can be delta-ed or not"""
801 815 # Disable delta if either rev requires a content-changing flag
802 816 # processor (ex. LFS). This is because such flag processor can alter
803 817 # the rawtext content that the delta will be based on, and two clients
804 818 # could have a same revlog node with different flags (i.e. different
805 819 # rawtext contents) and the delta could be incompatible.
806 820 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
807 821 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
808 822 ):
809 823 return False
810 824 return True
811 825
812 826 def update_caches(self, transaction):
813 827 if self._nodemap_file is not None:
814 828 if transaction is None:
815 829 nodemaputil.update_persistent_nodemap(self)
816 830 else:
817 831 nodemaputil.setup_persistent_nodemap(transaction, self)
818 832
819 833 def clearcaches(self):
820 834 self._revisioncache = None
821 835 self._chainbasecache.clear()
822 836 self._chunkcache = (0, b'')
823 837 self._pcache = {}
824 838 self._nodemap_docket = None
825 839 self.index.clearcaches()
826 840 # The python code is the one responsible for validating the docket, we
827 841 # end up having to refresh it here.
828 842 use_nodemap = (
829 843 not self._inline
830 844 and self._nodemap_file is not None
831 845 and util.safehasattr(self.index, 'update_nodemap_data')
832 846 )
833 847 if use_nodemap:
834 848 nodemap_data = nodemaputil.persisted_data(self)
835 849 if nodemap_data is not None:
836 850 self._nodemap_docket = nodemap_data[0]
837 851 self.index.update_nodemap_data(*nodemap_data)
838 852
839 853 def rev(self, node):
840 854 try:
841 855 return self.index.rev(node)
842 856 except TypeError:
843 857 raise
844 858 except error.RevlogError:
845 859 # parsers.c radix tree lookup failed
846 860 if (
847 861 node == self.nodeconstants.wdirid
848 862 or node in self.nodeconstants.wdirfilenodeids
849 863 ):
850 864 raise error.WdirUnsupported
851 865 raise error.LookupError(node, self.display_id, _(b'no node'))
852 866
853 867 # Accessors for index entries.
854 868
855 869 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
856 870 # are flags.
857 871 def start(self, rev):
858 872 return int(self.index[rev][0] >> 16)
859 873
860 874 def flags(self, rev):
861 875 return self.index[rev][0] & 0xFFFF
862 876
863 877 def length(self, rev):
864 878 return self.index[rev][1]
865 879
866 880 def sidedata_length(self, rev):
867 881 if not self.hassidedata:
868 882 return 0
869 883 return self.index[rev][9]
870 884
871 885 def rawsize(self, rev):
872 886 """return the length of the uncompressed text for a given revision"""
873 887 l = self.index[rev][2]
874 888 if l >= 0:
875 889 return l
876 890
877 891 t = self.rawdata(rev)
878 892 return len(t)
879 893
880 894 def size(self, rev):
881 895 """length of non-raw text (processed by a "read" flag processor)"""
882 896 # fast path: if no "read" flag processor could change the content,
883 897 # size is rawsize. note: ELLIPSIS is known to not change the content.
884 898 flags = self.flags(rev)
885 899 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
886 900 return self.rawsize(rev)
887 901
888 902 return len(self.revision(rev, raw=False))
889 903
890 904 def chainbase(self, rev):
891 905 base = self._chainbasecache.get(rev)
892 906 if base is not None:
893 907 return base
894 908
895 909 index = self.index
896 910 iterrev = rev
897 911 base = index[iterrev][3]
898 912 while base != iterrev:
899 913 iterrev = base
900 914 base = index[iterrev][3]
901 915
902 916 self._chainbasecache[rev] = base
903 917 return base
904 918
905 919 def linkrev(self, rev):
906 920 return self.index[rev][4]
907 921
908 922 def parentrevs(self, rev):
909 923 try:
910 924 entry = self.index[rev]
911 925 except IndexError:
912 926 if rev == wdirrev:
913 927 raise error.WdirUnsupported
914 928 raise
915 929 if entry[5] == nullrev:
916 930 return entry[6], entry[5]
917 931 else:
918 932 return entry[5], entry[6]
919 933
920 934 # fast parentrevs(rev) where rev isn't filtered
921 935 _uncheckedparentrevs = parentrevs
922 936
923 937 def node(self, rev):
924 938 try:
925 939 return self.index[rev][7]
926 940 except IndexError:
927 941 if rev == wdirrev:
928 942 raise error.WdirUnsupported
929 943 raise
930 944
931 945 # Derived from index values.
932 946
933 947 def end(self, rev):
934 948 return self.start(rev) + self.length(rev)
935 949
936 950 def parents(self, node):
937 951 i = self.index
938 952 d = i[self.rev(node)]
939 953 # inline node() to avoid function call overhead
940 954 if d[5] == self.nullid:
941 955 return i[d[6]][7], i[d[5]][7]
942 956 else:
943 957 return i[d[5]][7], i[d[6]][7]
944 958
945 959 def chainlen(self, rev):
946 960 return self._chaininfo(rev)[0]
947 961
948 962 def _chaininfo(self, rev):
949 963 chaininfocache = self._chaininfocache
950 964 if rev in chaininfocache:
951 965 return chaininfocache[rev]
952 966 index = self.index
953 967 generaldelta = self._generaldelta
954 968 iterrev = rev
955 969 e = index[iterrev]
956 970 clen = 0
957 971 compresseddeltalen = 0
958 972 while iterrev != e[3]:
959 973 clen += 1
960 974 compresseddeltalen += e[1]
961 975 if generaldelta:
962 976 iterrev = e[3]
963 977 else:
964 978 iterrev -= 1
965 979 if iterrev in chaininfocache:
966 980 t = chaininfocache[iterrev]
967 981 clen += t[0]
968 982 compresseddeltalen += t[1]
969 983 break
970 984 e = index[iterrev]
971 985 else:
972 986 # Add text length of base since decompressing that also takes
973 987 # work. For cache hits the length is already included.
974 988 compresseddeltalen += e[1]
975 989 r = (clen, compresseddeltalen)
976 990 chaininfocache[rev] = r
977 991 return r
978 992
979 993 def _deltachain(self, rev, stoprev=None):
980 994 """Obtain the delta chain for a revision.
981 995
982 996 ``stoprev`` specifies a revision to stop at. If not specified, we
983 997 stop at the base of the chain.
984 998
985 999 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
986 1000 revs in ascending order and ``stopped`` is a bool indicating whether
987 1001 ``stoprev`` was hit.
988 1002 """
989 1003 # Try C implementation.
990 1004 try:
991 1005 return self.index.deltachain(rev, stoprev, self._generaldelta)
992 1006 except AttributeError:
993 1007 pass
994 1008
995 1009 chain = []
996 1010
997 1011 # Alias to prevent attribute lookup in tight loop.
998 1012 index = self.index
999 1013 generaldelta = self._generaldelta
1000 1014
1001 1015 iterrev = rev
1002 1016 e = index[iterrev]
1003 1017 while iterrev != e[3] and iterrev != stoprev:
1004 1018 chain.append(iterrev)
1005 1019 if generaldelta:
1006 1020 iterrev = e[3]
1007 1021 else:
1008 1022 iterrev -= 1
1009 1023 e = index[iterrev]
1010 1024
1011 1025 if iterrev == stoprev:
1012 1026 stopped = True
1013 1027 else:
1014 1028 chain.append(iterrev)
1015 1029 stopped = False
1016 1030
1017 1031 chain.reverse()
1018 1032 return chain, stopped
1019 1033
1020 1034 def ancestors(self, revs, stoprev=0, inclusive=False):
1021 1035 """Generate the ancestors of 'revs' in reverse revision order.
1022 1036 Does not generate revs lower than stoprev.
1023 1037
1024 1038 See the documentation for ancestor.lazyancestors for more details."""
1025 1039
1026 1040 # first, make sure start revisions aren't filtered
1027 1041 revs = list(revs)
1028 1042 checkrev = self.node
1029 1043 for r in revs:
1030 1044 checkrev(r)
1031 1045 # and we're sure ancestors aren't filtered as well
1032 1046
1033 1047 if rustancestor is not None:
1034 1048 lazyancestors = rustancestor.LazyAncestors
1035 1049 arg = self.index
1036 1050 else:
1037 1051 lazyancestors = ancestor.lazyancestors
1038 1052 arg = self._uncheckedparentrevs
1039 1053 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1040 1054
1041 1055 def descendants(self, revs):
1042 1056 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1043 1057
1044 1058 def findcommonmissing(self, common=None, heads=None):
1045 1059 """Return a tuple of the ancestors of common and the ancestors of heads
1046 1060 that are not ancestors of common. In revset terminology, we return the
1047 1061 tuple:
1048 1062
1049 1063 ::common, (::heads) - (::common)
1050 1064
1051 1065 The list is sorted by revision number, meaning it is
1052 1066 topologically sorted.
1053 1067
1054 1068 'heads' and 'common' are both lists of node IDs. If heads is
1055 1069 not supplied, uses all of the revlog's heads. If common is not
1056 1070 supplied, uses nullid."""
1057 1071 if common is None:
1058 1072 common = [self.nullid]
1059 1073 if heads is None:
1060 1074 heads = self.heads()
1061 1075
1062 1076 common = [self.rev(n) for n in common]
1063 1077 heads = [self.rev(n) for n in heads]
1064 1078
1065 1079 # we want the ancestors, but inclusive
1066 1080 class lazyset(object):
1067 1081 def __init__(self, lazyvalues):
1068 1082 self.addedvalues = set()
1069 1083 self.lazyvalues = lazyvalues
1070 1084
1071 1085 def __contains__(self, value):
1072 1086 return value in self.addedvalues or value in self.lazyvalues
1073 1087
1074 1088 def __iter__(self):
1075 1089 added = self.addedvalues
1076 1090 for r in added:
1077 1091 yield r
1078 1092 for r in self.lazyvalues:
1079 1093 if not r in added:
1080 1094 yield r
1081 1095
1082 1096 def add(self, value):
1083 1097 self.addedvalues.add(value)
1084 1098
1085 1099 def update(self, values):
1086 1100 self.addedvalues.update(values)
1087 1101
1088 1102 has = lazyset(self.ancestors(common))
1089 1103 has.add(nullrev)
1090 1104 has.update(common)
1091 1105
1092 1106 # take all ancestors from heads that aren't in has
1093 1107 missing = set()
1094 1108 visit = collections.deque(r for r in heads if r not in has)
1095 1109 while visit:
1096 1110 r = visit.popleft()
1097 1111 if r in missing:
1098 1112 continue
1099 1113 else:
1100 1114 missing.add(r)
1101 1115 for p in self.parentrevs(r):
1102 1116 if p not in has:
1103 1117 visit.append(p)
1104 1118 missing = list(missing)
1105 1119 missing.sort()
1106 1120 return has, [self.node(miss) for miss in missing]
1107 1121
1108 1122 def incrementalmissingrevs(self, common=None):
1109 1123 """Return an object that can be used to incrementally compute the
1110 1124 revision numbers of the ancestors of arbitrary sets that are not
1111 1125 ancestors of common. This is an ancestor.incrementalmissingancestors
1112 1126 object.
1113 1127
1114 1128 'common' is a list of revision numbers. If common is not supplied, uses
1115 1129 nullrev.
1116 1130 """
1117 1131 if common is None:
1118 1132 common = [nullrev]
1119 1133
1120 1134 if rustancestor is not None:
1121 1135 return rustancestor.MissingAncestors(self.index, common)
1122 1136 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1123 1137
1124 1138 def findmissingrevs(self, common=None, heads=None):
1125 1139 """Return the revision numbers of the ancestors of heads that
1126 1140 are not ancestors of common.
1127 1141
1128 1142 More specifically, return a list of revision numbers corresponding to
1129 1143 nodes N such that every N satisfies the following constraints:
1130 1144
1131 1145 1. N is an ancestor of some node in 'heads'
1132 1146 2. N is not an ancestor of any node in 'common'
1133 1147
1134 1148 The list is sorted by revision number, meaning it is
1135 1149 topologically sorted.
1136 1150
1137 1151 'heads' and 'common' are both lists of revision numbers. If heads is
1138 1152 not supplied, uses all of the revlog's heads. If common is not
1139 1153 supplied, uses nullid."""
1140 1154 if common is None:
1141 1155 common = [nullrev]
1142 1156 if heads is None:
1143 1157 heads = self.headrevs()
1144 1158
1145 1159 inc = self.incrementalmissingrevs(common=common)
1146 1160 return inc.missingancestors(heads)
1147 1161
1148 1162 def findmissing(self, common=None, heads=None):
1149 1163 """Return the ancestors of heads that are not ancestors of common.
1150 1164
1151 1165 More specifically, return a list of nodes N such that every N
1152 1166 satisfies the following constraints:
1153 1167
1154 1168 1. N is an ancestor of some node in 'heads'
1155 1169 2. N is not an ancestor of any node in 'common'
1156 1170
1157 1171 The list is sorted by revision number, meaning it is
1158 1172 topologically sorted.
1159 1173
1160 1174 'heads' and 'common' are both lists of node IDs. If heads is
1161 1175 not supplied, uses all of the revlog's heads. If common is not
1162 1176 supplied, uses nullid."""
1163 1177 if common is None:
1164 1178 common = [self.nullid]
1165 1179 if heads is None:
1166 1180 heads = self.heads()
1167 1181
1168 1182 common = [self.rev(n) for n in common]
1169 1183 heads = [self.rev(n) for n in heads]
1170 1184
1171 1185 inc = self.incrementalmissingrevs(common=common)
1172 1186 return [self.node(r) for r in inc.missingancestors(heads)]
1173 1187
1174 1188 def nodesbetween(self, roots=None, heads=None):
1175 1189 """Return a topological path from 'roots' to 'heads'.
1176 1190
1177 1191 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1178 1192 topologically sorted list of all nodes N that satisfy both of
1179 1193 these constraints:
1180 1194
1181 1195 1. N is a descendant of some node in 'roots'
1182 1196 2. N is an ancestor of some node in 'heads'
1183 1197
1184 1198 Every node is considered to be both a descendant and an ancestor
1185 1199 of itself, so every reachable node in 'roots' and 'heads' will be
1186 1200 included in 'nodes'.
1187 1201
1188 1202 'outroots' is the list of reachable nodes in 'roots', i.e., the
1189 1203 subset of 'roots' that is returned in 'nodes'. Likewise,
1190 1204 'outheads' is the subset of 'heads' that is also in 'nodes'.
1191 1205
1192 1206 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1193 1207 unspecified, uses nullid as the only root. If 'heads' is
1194 1208 unspecified, uses list of all of the revlog's heads."""
1195 1209 nonodes = ([], [], [])
1196 1210 if roots is not None:
1197 1211 roots = list(roots)
1198 1212 if not roots:
1199 1213 return nonodes
1200 1214 lowestrev = min([self.rev(n) for n in roots])
1201 1215 else:
1202 1216 roots = [self.nullid] # Everybody's a descendant of nullid
1203 1217 lowestrev = nullrev
1204 1218 if (lowestrev == nullrev) and (heads is None):
1205 1219 # We want _all_ the nodes!
1206 1220 return (
1207 1221 [self.node(r) for r in self],
1208 1222 [self.nullid],
1209 1223 list(self.heads()),
1210 1224 )
1211 1225 if heads is None:
1212 1226 # All nodes are ancestors, so the latest ancestor is the last
1213 1227 # node.
1214 1228 highestrev = len(self) - 1
1215 1229 # Set ancestors to None to signal that every node is an ancestor.
1216 1230 ancestors = None
1217 1231 # Set heads to an empty dictionary for later discovery of heads
1218 1232 heads = {}
1219 1233 else:
1220 1234 heads = list(heads)
1221 1235 if not heads:
1222 1236 return nonodes
1223 1237 ancestors = set()
1224 1238 # Turn heads into a dictionary so we can remove 'fake' heads.
1225 1239 # Also, later we will be using it to filter out the heads we can't
1226 1240 # find from roots.
1227 1241 heads = dict.fromkeys(heads, False)
1228 1242 # Start at the top and keep marking parents until we're done.
1229 1243 nodestotag = set(heads)
1230 1244 # Remember where the top was so we can use it as a limit later.
1231 1245 highestrev = max([self.rev(n) for n in nodestotag])
1232 1246 while nodestotag:
1233 1247 # grab a node to tag
1234 1248 n = nodestotag.pop()
1235 1249 # Never tag nullid
1236 1250 if n == self.nullid:
1237 1251 continue
1238 1252 # A node's revision number represents its place in a
1239 1253 # topologically sorted list of nodes.
1240 1254 r = self.rev(n)
1241 1255 if r >= lowestrev:
1242 1256 if n not in ancestors:
1243 1257 # If we are possibly a descendant of one of the roots
1244 1258 # and we haven't already been marked as an ancestor
1245 1259 ancestors.add(n) # Mark as ancestor
1246 1260 # Add non-nullid parents to list of nodes to tag.
1247 1261 nodestotag.update(
1248 1262 [p for p in self.parents(n) if p != self.nullid]
1249 1263 )
1250 1264 elif n in heads: # We've seen it before, is it a fake head?
1251 1265 # So it is, real heads should not be the ancestors of
1252 1266 # any other heads.
1253 1267 heads.pop(n)
1254 1268 if not ancestors:
1255 1269 return nonodes
1256 1270 # Now that we have our set of ancestors, we want to remove any
1257 1271 # roots that are not ancestors.
1258 1272
1259 1273 # If one of the roots was nullid, everything is included anyway.
1260 1274 if lowestrev > nullrev:
1261 1275 # But, since we weren't, let's recompute the lowest rev to not
1262 1276 # include roots that aren't ancestors.
1263 1277
1264 1278 # Filter out roots that aren't ancestors of heads
1265 1279 roots = [root for root in roots if root in ancestors]
1266 1280 # Recompute the lowest revision
1267 1281 if roots:
1268 1282 lowestrev = min([self.rev(root) for root in roots])
1269 1283 else:
1270 1284 # No more roots? Return empty list
1271 1285 return nonodes
1272 1286 else:
1273 1287 # We are descending from nullid, and don't need to care about
1274 1288 # any other roots.
1275 1289 lowestrev = nullrev
1276 1290 roots = [self.nullid]
1277 1291 # Transform our roots list into a set.
1278 1292 descendants = set(roots)
1279 1293 # Also, keep the original roots so we can filter out roots that aren't
1280 1294 # 'real' roots (i.e. are descended from other roots).
1281 1295 roots = descendants.copy()
1282 1296 # Our topologically sorted list of output nodes.
1283 1297 orderedout = []
1284 1298 # Don't start at nullid since we don't want nullid in our output list,
1285 1299 # and if nullid shows up in descendants, empty parents will look like
1286 1300 # they're descendants.
1287 1301 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1288 1302 n = self.node(r)
1289 1303 isdescendant = False
1290 1304 if lowestrev == nullrev: # Everybody is a descendant of nullid
1291 1305 isdescendant = True
1292 1306 elif n in descendants:
1293 1307 # n is already a descendant
1294 1308 isdescendant = True
1295 1309 # This check only needs to be done here because all the roots
1296 1310 # will start being marked is descendants before the loop.
1297 1311 if n in roots:
1298 1312 # If n was a root, check if it's a 'real' root.
1299 1313 p = tuple(self.parents(n))
1300 1314 # If any of its parents are descendants, it's not a root.
1301 1315 if (p[0] in descendants) or (p[1] in descendants):
1302 1316 roots.remove(n)
1303 1317 else:
1304 1318 p = tuple(self.parents(n))
1305 1319 # A node is a descendant if either of its parents are
1306 1320 # descendants. (We seeded the dependents list with the roots
1307 1321 # up there, remember?)
1308 1322 if (p[0] in descendants) or (p[1] in descendants):
1309 1323 descendants.add(n)
1310 1324 isdescendant = True
1311 1325 if isdescendant and ((ancestors is None) or (n in ancestors)):
1312 1326 # Only include nodes that are both descendants and ancestors.
1313 1327 orderedout.append(n)
1314 1328 if (ancestors is not None) and (n in heads):
1315 1329 # We're trying to figure out which heads are reachable
1316 1330 # from roots.
1317 1331 # Mark this head as having been reached
1318 1332 heads[n] = True
1319 1333 elif ancestors is None:
1320 1334 # Otherwise, we're trying to discover the heads.
1321 1335 # Assume this is a head because if it isn't, the next step
1322 1336 # will eventually remove it.
1323 1337 heads[n] = True
1324 1338 # But, obviously its parents aren't.
1325 1339 for p in self.parents(n):
1326 1340 heads.pop(p, None)
1327 1341 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1328 1342 roots = list(roots)
1329 1343 assert orderedout
1330 1344 assert roots
1331 1345 assert heads
1332 1346 return (orderedout, roots, heads)
1333 1347
1334 1348 def headrevs(self, revs=None):
1335 1349 if revs is None:
1336 1350 try:
1337 1351 return self.index.headrevs()
1338 1352 except AttributeError:
1339 1353 return self._headrevs()
1340 1354 if rustdagop is not None:
1341 1355 return rustdagop.headrevs(self.index, revs)
1342 1356 return dagop.headrevs(revs, self._uncheckedparentrevs)
1343 1357
1344 1358 def computephases(self, roots):
1345 1359 return self.index.computephasesmapsets(roots)
1346 1360
1347 1361 def _headrevs(self):
1348 1362 count = len(self)
1349 1363 if not count:
1350 1364 return [nullrev]
1351 1365 # we won't iter over filtered rev so nobody is a head at start
1352 1366 ishead = [0] * (count + 1)
1353 1367 index = self.index
1354 1368 for r in self:
1355 1369 ishead[r] = 1 # I may be an head
1356 1370 e = index[r]
1357 1371 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1358 1372 return [r for r, val in enumerate(ishead) if val]
1359 1373
1360 1374 def heads(self, start=None, stop=None):
1361 1375 """return the list of all nodes that have no children
1362 1376
1363 1377 if start is specified, only heads that are descendants of
1364 1378 start will be returned
1365 1379 if stop is specified, it will consider all the revs from stop
1366 1380 as if they had no children
1367 1381 """
1368 1382 if start is None and stop is None:
1369 1383 if not len(self):
1370 1384 return [self.nullid]
1371 1385 return [self.node(r) for r in self.headrevs()]
1372 1386
1373 1387 if start is None:
1374 1388 start = nullrev
1375 1389 else:
1376 1390 start = self.rev(start)
1377 1391
1378 1392 stoprevs = {self.rev(n) for n in stop or []}
1379 1393
1380 1394 revs = dagop.headrevssubset(
1381 1395 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1382 1396 )
1383 1397
1384 1398 return [self.node(rev) for rev in revs]
1385 1399
1386 1400 def children(self, node):
1387 1401 """find the children of a given node"""
1388 1402 c = []
1389 1403 p = self.rev(node)
1390 1404 for r in self.revs(start=p + 1):
1391 1405 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1392 1406 if prevs:
1393 1407 for pr in prevs:
1394 1408 if pr == p:
1395 1409 c.append(self.node(r))
1396 1410 elif p == nullrev:
1397 1411 c.append(self.node(r))
1398 1412 return c
1399 1413
1400 1414 def commonancestorsheads(self, a, b):
1401 1415 """calculate all the heads of the common ancestors of nodes a and b"""
1402 1416 a, b = self.rev(a), self.rev(b)
1403 1417 ancs = self._commonancestorsheads(a, b)
1404 1418 return pycompat.maplist(self.node, ancs)
1405 1419
1406 1420 def _commonancestorsheads(self, *revs):
1407 1421 """calculate all the heads of the common ancestors of revs"""
1408 1422 try:
1409 1423 ancs = self.index.commonancestorsheads(*revs)
1410 1424 except (AttributeError, OverflowError): # C implementation failed
1411 1425 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1412 1426 return ancs
1413 1427
1414 1428 def isancestor(self, a, b):
1415 1429 """return True if node a is an ancestor of node b
1416 1430
1417 1431 A revision is considered an ancestor of itself."""
1418 1432 a, b = self.rev(a), self.rev(b)
1419 1433 return self.isancestorrev(a, b)
1420 1434
1421 1435 def isancestorrev(self, a, b):
1422 1436 """return True if revision a is an ancestor of revision b
1423 1437
1424 1438 A revision is considered an ancestor of itself.
1425 1439
1426 1440 The implementation of this is trivial but the use of
1427 1441 reachableroots is not."""
1428 1442 if a == nullrev:
1429 1443 return True
1430 1444 elif a == b:
1431 1445 return True
1432 1446 elif a > b:
1433 1447 return False
1434 1448 return bool(self.reachableroots(a, [b], [a], includepath=False))
1435 1449
1436 1450 def reachableroots(self, minroot, heads, roots, includepath=False):
1437 1451 """return (heads(::(<roots> and <roots>::<heads>)))
1438 1452
1439 1453 If includepath is True, return (<roots>::<heads>)."""
1440 1454 try:
1441 1455 return self.index.reachableroots2(
1442 1456 minroot, heads, roots, includepath
1443 1457 )
1444 1458 except AttributeError:
1445 1459 return dagop._reachablerootspure(
1446 1460 self.parentrevs, minroot, roots, heads, includepath
1447 1461 )
1448 1462
1449 1463 def ancestor(self, a, b):
1450 1464 """calculate the "best" common ancestor of nodes a and b"""
1451 1465
1452 1466 a, b = self.rev(a), self.rev(b)
1453 1467 try:
1454 1468 ancs = self.index.ancestors(a, b)
1455 1469 except (AttributeError, OverflowError):
1456 1470 ancs = ancestor.ancestors(self.parentrevs, a, b)
1457 1471 if ancs:
1458 1472 # choose a consistent winner when there's a tie
1459 1473 return min(map(self.node, ancs))
1460 1474 return self.nullid
1461 1475
1462 1476 def _match(self, id):
1463 1477 if isinstance(id, int):
1464 1478 # rev
1465 1479 return self.node(id)
1466 1480 if len(id) == self.nodeconstants.nodelen:
1467 1481 # possibly a binary node
1468 1482 # odds of a binary node being all hex in ASCII are 1 in 10**25
1469 1483 try:
1470 1484 node = id
1471 1485 self.rev(node) # quick search the index
1472 1486 return node
1473 1487 except error.LookupError:
1474 1488 pass # may be partial hex id
1475 1489 try:
1476 1490 # str(rev)
1477 1491 rev = int(id)
1478 1492 if b"%d" % rev != id:
1479 1493 raise ValueError
1480 1494 if rev < 0:
1481 1495 rev = len(self) + rev
1482 1496 if rev < 0 or rev >= len(self):
1483 1497 raise ValueError
1484 1498 return self.node(rev)
1485 1499 except (ValueError, OverflowError):
1486 1500 pass
1487 1501 if len(id) == 2 * self.nodeconstants.nodelen:
1488 1502 try:
1489 1503 # a full hex nodeid?
1490 1504 node = bin(id)
1491 1505 self.rev(node)
1492 1506 return node
1493 1507 except (TypeError, error.LookupError):
1494 1508 pass
1495 1509
1496 1510 def _partialmatch(self, id):
1497 1511 # we don't care wdirfilenodeids as they should be always full hash
1498 1512 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1499 1513 try:
1500 1514 partial = self.index.partialmatch(id)
1501 1515 if partial and self.hasnode(partial):
1502 1516 if maybewdir:
1503 1517 # single 'ff...' match in radix tree, ambiguous with wdir
1504 1518 raise error.RevlogError
1505 1519 return partial
1506 1520 if maybewdir:
1507 1521 # no 'ff...' match in radix tree, wdir identified
1508 1522 raise error.WdirUnsupported
1509 1523 return None
1510 1524 except error.RevlogError:
1511 1525 # parsers.c radix tree lookup gave multiple matches
1512 1526 # fast path: for unfiltered changelog, radix tree is accurate
1513 1527 if not getattr(self, 'filteredrevs', None):
1514 1528 raise error.AmbiguousPrefixLookupError(
1515 1529 id, self.display_id, _(b'ambiguous identifier')
1516 1530 )
1517 1531 # fall through to slow path that filters hidden revisions
1518 1532 except (AttributeError, ValueError):
1519 1533 # we are pure python, or key was too short to search radix tree
1520 1534 pass
1521 1535
1522 1536 if id in self._pcache:
1523 1537 return self._pcache[id]
1524 1538
1525 1539 if len(id) <= 40:
1526 1540 try:
1527 1541 # hex(node)[:...]
1528 1542 l = len(id) // 2 # grab an even number of digits
1529 1543 prefix = bin(id[: l * 2])
1530 1544 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1531 1545 nl = [
1532 1546 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1533 1547 ]
1534 1548 if self.nodeconstants.nullhex.startswith(id):
1535 1549 nl.append(self.nullid)
1536 1550 if len(nl) > 0:
1537 1551 if len(nl) == 1 and not maybewdir:
1538 1552 self._pcache[id] = nl[0]
1539 1553 return nl[0]
1540 1554 raise error.AmbiguousPrefixLookupError(
1541 1555 id, self.display_id, _(b'ambiguous identifier')
1542 1556 )
1543 1557 if maybewdir:
1544 1558 raise error.WdirUnsupported
1545 1559 return None
1546 1560 except TypeError:
1547 1561 pass
1548 1562
1549 1563 def lookup(self, id):
1550 1564 """locate a node based on:
1551 1565 - revision number or str(revision number)
1552 1566 - nodeid or subset of hex nodeid
1553 1567 """
1554 1568 n = self._match(id)
1555 1569 if n is not None:
1556 1570 return n
1557 1571 n = self._partialmatch(id)
1558 1572 if n:
1559 1573 return n
1560 1574
1561 1575 raise error.LookupError(id, self.display_id, _(b'no match found'))
1562 1576
1563 1577 def shortest(self, node, minlength=1):
1564 1578 """Find the shortest unambiguous prefix that matches node."""
1565 1579
1566 1580 def isvalid(prefix):
1567 1581 try:
1568 1582 matchednode = self._partialmatch(prefix)
1569 1583 except error.AmbiguousPrefixLookupError:
1570 1584 return False
1571 1585 except error.WdirUnsupported:
1572 1586 # single 'ff...' match
1573 1587 return True
1574 1588 if matchednode is None:
1575 1589 raise error.LookupError(node, self.display_id, _(b'no node'))
1576 1590 return True
1577 1591
1578 1592 def maybewdir(prefix):
1579 1593 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1580 1594
1581 1595 hexnode = hex(node)
1582 1596
1583 1597 def disambiguate(hexnode, minlength):
1584 1598 """Disambiguate against wdirid."""
1585 1599 for length in range(minlength, len(hexnode) + 1):
1586 1600 prefix = hexnode[:length]
1587 1601 if not maybewdir(prefix):
1588 1602 return prefix
1589 1603
1590 1604 if not getattr(self, 'filteredrevs', None):
1591 1605 try:
1592 1606 length = max(self.index.shortest(node), minlength)
1593 1607 return disambiguate(hexnode, length)
1594 1608 except error.RevlogError:
1595 1609 if node != self.nodeconstants.wdirid:
1596 1610 raise error.LookupError(
1597 1611 node, self.display_id, _(b'no node')
1598 1612 )
1599 1613 except AttributeError:
1600 1614 # Fall through to pure code
1601 1615 pass
1602 1616
1603 1617 if node == self.nodeconstants.wdirid:
1604 1618 for length in range(minlength, len(hexnode) + 1):
1605 1619 prefix = hexnode[:length]
1606 1620 if isvalid(prefix):
1607 1621 return prefix
1608 1622
1609 1623 for length in range(minlength, len(hexnode) + 1):
1610 1624 prefix = hexnode[:length]
1611 1625 if isvalid(prefix):
1612 1626 return disambiguate(hexnode, length)
1613 1627
1614 1628 def cmp(self, node, text):
1615 1629 """compare text with a given file revision
1616 1630
1617 1631 returns True if text is different than what is stored.
1618 1632 """
1619 1633 p1, p2 = self.parents(node)
1620 1634 return storageutil.hashrevisionsha1(text, p1, p2) != node
1621 1635
1622 1636 def _cachesegment(self, offset, data):
1623 1637 """Add a segment to the revlog cache.
1624 1638
1625 1639 Accepts an absolute offset and the data that is at that location.
1626 1640 """
1627 1641 o, d = self._chunkcache
1628 1642 # try to add to existing cache
1629 1643 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1630 1644 self._chunkcache = o, d + data
1631 1645 else:
1632 1646 self._chunkcache = offset, data
1633 1647
1634 1648 def _readsegment(self, offset, length, df=None):
1635 1649 """Load a segment of raw data from the revlog.
1636 1650
1637 1651 Accepts an absolute offset, length to read, and an optional existing
1638 1652 file handle to read from.
1639 1653
1640 1654 If an existing file handle is passed, it will be seeked and the
1641 1655 original seek position will NOT be restored.
1642 1656
1643 1657 Returns a str or buffer of raw byte data.
1644 1658
1645 1659 Raises if the requested number of bytes could not be read.
1646 1660 """
1647 1661 # Cache data both forward and backward around the requested
1648 1662 # data, in a fixed size window. This helps speed up operations
1649 1663 # involving reading the revlog backwards.
1650 1664 cachesize = self._chunkcachesize
1651 1665 realoffset = offset & ~(cachesize - 1)
1652 1666 reallength = (
1653 1667 (offset + length + cachesize) & ~(cachesize - 1)
1654 1668 ) - realoffset
1655 1669 with self._datareadfp(df) as df:
1656 1670 df.seek(realoffset)
1657 1671 d = df.read(reallength)
1658 1672
1659 1673 self._cachesegment(realoffset, d)
1660 1674 if offset != realoffset or reallength != length:
1661 1675 startoffset = offset - realoffset
1662 1676 if len(d) - startoffset < length:
1663 1677 raise error.RevlogError(
1664 1678 _(
1665 1679 b'partial read of revlog %s; expected %d bytes from '
1666 1680 b'offset %d, got %d'
1667 1681 )
1668 1682 % (
1669 1683 self._indexfile if self._inline else self._datafile,
1670 1684 length,
1671 1685 offset,
1672 1686 len(d) - startoffset,
1673 1687 )
1674 1688 )
1675 1689
1676 1690 return util.buffer(d, startoffset, length)
1677 1691
1678 1692 if len(d) < length:
1679 1693 raise error.RevlogError(
1680 1694 _(
1681 1695 b'partial read of revlog %s; expected %d bytes from offset '
1682 1696 b'%d, got %d'
1683 1697 )
1684 1698 % (
1685 1699 self._indexfile if self._inline else self._datafile,
1686 1700 length,
1687 1701 offset,
1688 1702 len(d),
1689 1703 )
1690 1704 )
1691 1705
1692 1706 return d
1693 1707
1694 1708 def _getsegment(self, offset, length, df=None):
1695 1709 """Obtain a segment of raw data from the revlog.
1696 1710
1697 1711 Accepts an absolute offset, length of bytes to obtain, and an
1698 1712 optional file handle to the already-opened revlog. If the file
1699 1713 handle is used, it's original seek position will not be preserved.
1700 1714
1701 1715 Requests for data may be returned from a cache.
1702 1716
1703 1717 Returns a str or a buffer instance of raw byte data.
1704 1718 """
1705 1719 o, d = self._chunkcache
1706 1720 l = len(d)
1707 1721
1708 1722 # is it in the cache?
1709 1723 cachestart = offset - o
1710 1724 cacheend = cachestart + length
1711 1725 if cachestart >= 0 and cacheend <= l:
1712 1726 if cachestart == 0 and cacheend == l:
1713 1727 return d # avoid a copy
1714 1728 return util.buffer(d, cachestart, cacheend - cachestart)
1715 1729
1716 1730 return self._readsegment(offset, length, df=df)
1717 1731
1718 1732 def _getsegmentforrevs(self, startrev, endrev, df=None):
1719 1733 """Obtain a segment of raw data corresponding to a range of revisions.
1720 1734
1721 1735 Accepts the start and end revisions and an optional already-open
1722 1736 file handle to be used for reading. If the file handle is read, its
1723 1737 seek position will not be preserved.
1724 1738
1725 1739 Requests for data may be satisfied by a cache.
1726 1740
1727 1741 Returns a 2-tuple of (offset, data) for the requested range of
1728 1742 revisions. Offset is the integer offset from the beginning of the
1729 1743 revlog and data is a str or buffer of the raw byte data.
1730 1744
1731 1745 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1732 1746 to determine where each revision's data begins and ends.
1733 1747 """
1734 1748 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1735 1749 # (functions are expensive).
1736 1750 index = self.index
1737 1751 istart = index[startrev]
1738 1752 start = int(istart[0] >> 16)
1739 1753 if startrev == endrev:
1740 1754 end = start + istart[1]
1741 1755 else:
1742 1756 iend = index[endrev]
1743 1757 end = int(iend[0] >> 16) + iend[1]
1744 1758
1745 1759 if self._inline:
1746 1760 start += (startrev + 1) * self.index.entry_size
1747 1761 end += (endrev + 1) * self.index.entry_size
1748 1762 length = end - start
1749 1763
1750 1764 return start, self._getsegment(start, length, df=df)
1751 1765
1752 1766 def _chunk(self, rev, df=None):
1753 1767 """Obtain a single decompressed chunk for a revision.
1754 1768
1755 1769 Accepts an integer revision and an optional already-open file handle
1756 1770 to be used for reading. If used, the seek position of the file will not
1757 1771 be preserved.
1758 1772
1759 1773 Returns a str holding uncompressed data for the requested revision.
1760 1774 """
1761 1775 compression_mode = self.index[rev][10]
1762 1776 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1763 1777 if compression_mode == COMP_MODE_PLAIN:
1764 1778 return data
1765 1779 elif compression_mode == COMP_MODE_INLINE:
1766 1780 return self.decompress(data)
1767 1781 else:
1768 1782 msg = 'unknown compression mode %d'
1769 1783 msg %= compression_mode
1770 1784 raise error.RevlogError(msg)
1771 1785
1772 1786 def _chunks(self, revs, df=None, targetsize=None):
1773 1787 """Obtain decompressed chunks for the specified revisions.
1774 1788
1775 1789 Accepts an iterable of numeric revisions that are assumed to be in
1776 1790 ascending order. Also accepts an optional already-open file handle
1777 1791 to be used for reading. If used, the seek position of the file will
1778 1792 not be preserved.
1779 1793
1780 1794 This function is similar to calling ``self._chunk()`` multiple times,
1781 1795 but is faster.
1782 1796
1783 1797 Returns a list with decompressed data for each requested revision.
1784 1798 """
1785 1799 if not revs:
1786 1800 return []
1787 1801 start = self.start
1788 1802 length = self.length
1789 1803 inline = self._inline
1790 1804 iosize = self.index.entry_size
1791 1805 buffer = util.buffer
1792 1806
1793 1807 l = []
1794 1808 ladd = l.append
1795 1809
1796 1810 if not self._withsparseread:
1797 1811 slicedchunks = (revs,)
1798 1812 else:
1799 1813 slicedchunks = deltautil.slicechunk(
1800 1814 self, revs, targetsize=targetsize
1801 1815 )
1802 1816
1803 1817 for revschunk in slicedchunks:
1804 1818 firstrev = revschunk[0]
1805 1819 # Skip trailing revisions with empty diff
1806 1820 for lastrev in revschunk[::-1]:
1807 1821 if length(lastrev) != 0:
1808 1822 break
1809 1823
1810 1824 try:
1811 1825 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1812 1826 except OverflowError:
1813 1827 # issue4215 - we can't cache a run of chunks greater than
1814 1828 # 2G on Windows
1815 1829 return [self._chunk(rev, df=df) for rev in revschunk]
1816 1830
1817 1831 decomp = self.decompress
1818 1832 for rev in revschunk:
1819 1833 chunkstart = start(rev)
1820 1834 if inline:
1821 1835 chunkstart += (rev + 1) * iosize
1822 1836 chunklength = length(rev)
1823 1837 comp_mode = self.index[rev][10]
1824 1838 c = buffer(data, chunkstart - offset, chunklength)
1825 1839 if comp_mode == COMP_MODE_PLAIN:
1826 1840 ladd(c)
1827 1841 elif comp_mode == COMP_MODE_INLINE:
1828 1842 ladd(decomp(c))
1829 1843 else:
1830 1844 msg = 'unknown compression mode %d'
1831 1845 msg %= comp_mode
1832 1846 raise error.RevlogError(msg)
1833 1847
1834 1848 return l
1835 1849
1836 1850 def _chunkclear(self):
1837 1851 """Clear the raw chunk cache."""
1838 1852 self._chunkcache = (0, b'')
1839 1853
1840 1854 def deltaparent(self, rev):
1841 1855 """return deltaparent of the given revision"""
1842 1856 base = self.index[rev][3]
1843 1857 if base == rev:
1844 1858 return nullrev
1845 1859 elif self._generaldelta:
1846 1860 return base
1847 1861 else:
1848 1862 return rev - 1
1849 1863
1850 1864 def issnapshot(self, rev):
1851 1865 """tells whether rev is a snapshot"""
1852 1866 if not self._sparserevlog:
1853 1867 return self.deltaparent(rev) == nullrev
1854 1868 elif util.safehasattr(self.index, b'issnapshot'):
1855 1869 # directly assign the method to cache the testing and access
1856 1870 self.issnapshot = self.index.issnapshot
1857 1871 return self.issnapshot(rev)
1858 1872 if rev == nullrev:
1859 1873 return True
1860 1874 entry = self.index[rev]
1861 1875 base = entry[3]
1862 1876 if base == rev:
1863 1877 return True
1864 1878 if base == nullrev:
1865 1879 return True
1866 1880 p1 = entry[5]
1867 1881 p2 = entry[6]
1868 1882 if base == p1 or base == p2:
1869 1883 return False
1870 1884 return self.issnapshot(base)
1871 1885
1872 1886 def snapshotdepth(self, rev):
1873 1887 """number of snapshot in the chain before this one"""
1874 1888 if not self.issnapshot(rev):
1875 1889 raise error.ProgrammingError(b'revision %d not a snapshot')
1876 1890 return len(self._deltachain(rev)[0]) - 1
1877 1891
1878 1892 def revdiff(self, rev1, rev2):
1879 1893 """return or calculate a delta between two revisions
1880 1894
1881 1895 The delta calculated is in binary form and is intended to be written to
1882 1896 revlog data directly. So this function needs raw revision data.
1883 1897 """
1884 1898 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1885 1899 return bytes(self._chunk(rev2))
1886 1900
1887 1901 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1888 1902
1889 1903 def _processflags(self, text, flags, operation, raw=False):
1890 1904 """deprecated entry point to access flag processors"""
1891 1905 msg = b'_processflag(...) use the specialized variant'
1892 1906 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1893 1907 if raw:
1894 1908 return text, flagutil.processflagsraw(self, text, flags)
1895 1909 elif operation == b'read':
1896 1910 return flagutil.processflagsread(self, text, flags)
1897 1911 else: # write operation
1898 1912 return flagutil.processflagswrite(self, text, flags)
1899 1913
1900 1914 def revision(self, nodeorrev, _df=None, raw=False):
1901 1915 """return an uncompressed revision of a given node or revision
1902 1916 number.
1903 1917
1904 1918 _df - an existing file handle to read from. (internal-only)
1905 1919 raw - an optional argument specifying if the revision data is to be
1906 1920 treated as raw data when applying flag transforms. 'raw' should be set
1907 1921 to True when generating changegroups or in debug commands.
1908 1922 """
1909 1923 if raw:
1910 1924 msg = (
1911 1925 b'revlog.revision(..., raw=True) is deprecated, '
1912 1926 b'use revlog.rawdata(...)'
1913 1927 )
1914 1928 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1915 1929 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1916 1930
1917 1931 def sidedata(self, nodeorrev, _df=None):
1918 1932 """a map of extra data related to the changeset but not part of the hash
1919 1933
1920 1934 This function currently return a dictionary. However, more advanced
1921 1935 mapping object will likely be used in the future for a more
1922 1936 efficient/lazy code.
1923 1937 """
1924 1938 return self._revisiondata(nodeorrev, _df)[1]
1925 1939
1926 1940 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1927 1941 # deal with <nodeorrev> argument type
1928 1942 if isinstance(nodeorrev, int):
1929 1943 rev = nodeorrev
1930 1944 node = self.node(rev)
1931 1945 else:
1932 1946 node = nodeorrev
1933 1947 rev = None
1934 1948
1935 1949 # fast path the special `nullid` rev
1936 1950 if node == self.nullid:
1937 1951 return b"", {}
1938 1952
1939 1953 # ``rawtext`` is the text as stored inside the revlog. Might be the
1940 1954 # revision or might need to be processed to retrieve the revision.
1941 1955 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1942 1956
1943 1957 if self.hassidedata:
1944 1958 if rev is None:
1945 1959 rev = self.rev(node)
1946 1960 sidedata = self._sidedata(rev)
1947 1961 else:
1948 1962 sidedata = {}
1949 1963
1950 1964 if raw and validated:
1951 1965 # if we don't want to process the raw text and that raw
1952 1966 # text is cached, we can exit early.
1953 1967 return rawtext, sidedata
1954 1968 if rev is None:
1955 1969 rev = self.rev(node)
1956 1970 # the revlog's flag for this revision
1957 1971 # (usually alter its state or content)
1958 1972 flags = self.flags(rev)
1959 1973
1960 1974 if validated and flags == REVIDX_DEFAULT_FLAGS:
1961 1975 # no extra flags set, no flag processor runs, text = rawtext
1962 1976 return rawtext, sidedata
1963 1977
1964 1978 if raw:
1965 1979 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1966 1980 text = rawtext
1967 1981 else:
1968 1982 r = flagutil.processflagsread(self, rawtext, flags)
1969 1983 text, validatehash = r
1970 1984 if validatehash:
1971 1985 self.checkhash(text, node, rev=rev)
1972 1986 if not validated:
1973 1987 self._revisioncache = (node, rev, rawtext)
1974 1988
1975 1989 return text, sidedata
1976 1990
1977 1991 def _rawtext(self, node, rev, _df=None):
1978 1992 """return the possibly unvalidated rawtext for a revision
1979 1993
1980 1994 returns (rev, rawtext, validated)
1981 1995 """
1982 1996
1983 1997 # revision in the cache (could be useful to apply delta)
1984 1998 cachedrev = None
1985 1999 # An intermediate text to apply deltas to
1986 2000 basetext = None
1987 2001
1988 2002 # Check if we have the entry in cache
1989 2003 # The cache entry looks like (node, rev, rawtext)
1990 2004 if self._revisioncache:
1991 2005 if self._revisioncache[0] == node:
1992 2006 return (rev, self._revisioncache[2], True)
1993 2007 cachedrev = self._revisioncache[1]
1994 2008
1995 2009 if rev is None:
1996 2010 rev = self.rev(node)
1997 2011
1998 2012 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1999 2013 if stopped:
2000 2014 basetext = self._revisioncache[2]
2001 2015
2002 2016 # drop cache to save memory, the caller is expected to
2003 2017 # update self._revisioncache after validating the text
2004 2018 self._revisioncache = None
2005 2019
2006 2020 targetsize = None
2007 2021 rawsize = self.index[rev][2]
2008 2022 if 0 <= rawsize:
2009 2023 targetsize = 4 * rawsize
2010 2024
2011 2025 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2012 2026 if basetext is None:
2013 2027 basetext = bytes(bins[0])
2014 2028 bins = bins[1:]
2015 2029
2016 2030 rawtext = mdiff.patches(basetext, bins)
2017 2031 del basetext # let us have a chance to free memory early
2018 2032 return (rev, rawtext, False)
2019 2033
2020 2034 def _sidedata(self, rev):
2021 2035 """Return the sidedata for a given revision number."""
2022 2036 index_entry = self.index[rev]
2023 2037 sidedata_offset = index_entry[8]
2024 2038 sidedata_size = index_entry[9]
2025 2039
2026 2040 if self._inline:
2027 2041 sidedata_offset += self.index.entry_size * (1 + rev)
2028 2042 if sidedata_size == 0:
2029 2043 return {}
2030 2044
2031 2045 segment = self._getsegment(sidedata_offset, sidedata_size)
2032 2046 sidedata = sidedatautil.deserialize_sidedata(segment)
2033 2047 return sidedata
2034 2048
2035 2049 def rawdata(self, nodeorrev, _df=None):
2036 2050 """return an uncompressed raw data of a given node or revision number.
2037 2051
2038 2052 _df - an existing file handle to read from. (internal-only)
2039 2053 """
2040 2054 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2041 2055
2042 2056 def hash(self, text, p1, p2):
2043 2057 """Compute a node hash.
2044 2058
2045 2059 Available as a function so that subclasses can replace the hash
2046 2060 as needed.
2047 2061 """
2048 2062 return storageutil.hashrevisionsha1(text, p1, p2)
2049 2063
2050 2064 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2051 2065 """Check node hash integrity.
2052 2066
2053 2067 Available as a function so that subclasses can extend hash mismatch
2054 2068 behaviors as needed.
2055 2069 """
2056 2070 try:
2057 2071 if p1 is None and p2 is None:
2058 2072 p1, p2 = self.parents(node)
2059 2073 if node != self.hash(text, p1, p2):
2060 2074 # Clear the revision cache on hash failure. The revision cache
2061 2075 # only stores the raw revision and clearing the cache does have
2062 2076 # the side-effect that we won't have a cache hit when the raw
2063 2077 # revision data is accessed. But this case should be rare and
2064 2078 # it is extra work to teach the cache about the hash
2065 2079 # verification state.
2066 2080 if self._revisioncache and self._revisioncache[0] == node:
2067 2081 self._revisioncache = None
2068 2082
2069 2083 revornode = rev
2070 2084 if revornode is None:
2071 2085 revornode = templatefilters.short(hex(node))
2072 2086 raise error.RevlogError(
2073 2087 _(b"integrity check failed on %s:%s")
2074 2088 % (self.display_id, pycompat.bytestr(revornode))
2075 2089 )
2076 2090 except error.RevlogError:
2077 2091 if self._censorable and storageutil.iscensoredtext(text):
2078 2092 raise error.CensoredNodeError(self.display_id, node, text)
2079 2093 raise
2080 2094
2081 2095 def _enforceinlinesize(self, tr):
2082 2096 """Check if the revlog is too big for inline and convert if so.
2083 2097
2084 2098 This should be called after revisions are added to the revlog. If the
2085 2099 revlog has grown too large to be an inline revlog, it will convert it
2086 2100 to use multiple index and data files.
2087 2101 """
2088 2102 tiprev = len(self) - 1
2089 2103 total_size = self.start(tiprev) + self.length(tiprev)
2090 2104 if not self._inline or total_size < _maxinline:
2091 2105 return
2092 2106
2093 2107 troffset = tr.findoffset(self._indexfile)
2094 2108 if troffset is None:
2095 2109 raise error.RevlogError(
2096 2110 _(b"%s not found in the transaction") % self._indexfile
2097 2111 )
2098 2112 trindex = 0
2099 2113 tr.add(self._datafile, 0)
2100 2114
2101 2115 existing_handles = False
2102 2116 if self._writinghandles is not None:
2103 2117 existing_handles = True
2104 2118 fp = self._writinghandles[0]
2105 2119 fp.flush()
2106 2120 fp.close()
2107 2121 # We can't use the cached file handle after close(). So prevent
2108 2122 # its usage.
2109 2123 self._writinghandles = None
2110 2124
2111 2125 new_dfh = self._datafp(b'w+')
2112 2126 new_dfh.truncate(0) # drop any potentially existing data
2113 2127 try:
2114 2128 with self._indexfp() as read_ifh:
2115 2129 for r in self:
2116 2130 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2117 2131 if troffset <= self.start(r):
2118 2132 trindex = r
2119 2133 new_dfh.flush()
2120 2134
2121 2135 with self.__index_new_fp() as fp:
2122 2136 self._format_flags &= ~FLAG_INLINE_DATA
2123 2137 self._inline = False
2124 2138 for i in self:
2125 2139 e = self.index.entry_binary(i)
2126 2140 if i == 0 and self._docket is None:
2127 2141 header = self._format_flags | self._format_version
2128 2142 header = self.index.pack_header(header)
2129 2143 e = header + e
2130 2144 fp.write(e)
2131 2145 if self._docket is not None:
2132 2146 self._docket.index_end = fp.tell()
2133 2147 # the temp file replace the real index when we exit the context
2134 2148 # manager
2135 2149
2136 2150 tr.replace(self._indexfile, trindex * self.index.entry_size)
2137 2151 nodemaputil.setup_persistent_nodemap(tr, self)
2138 2152 self._chunkclear()
2139 2153
2140 2154 if existing_handles:
2141 2155 # switched from inline to conventional reopen the index
2142 2156 ifh = self.__index_write_fp()
2143 2157 self._writinghandles = (ifh, new_dfh)
2144 2158 new_dfh = None
2145 2159 finally:
2146 2160 if new_dfh is not None:
2147 2161 new_dfh.close()
2148 2162
2149 2163 def _nodeduplicatecallback(self, transaction, node):
2150 2164 """called when trying to add a node already stored."""
2151 2165
2152 2166 @contextlib.contextmanager
2153 2167 def _writing(self, transaction):
2154 2168 if self._trypending:
2155 2169 msg = b'try to write in a `trypending` revlog: %s'
2156 2170 msg %= self.display_id
2157 2171 raise error.ProgrammingError(msg)
2158 2172 if self._writinghandles is not None:
2159 2173 yield
2160 2174 else:
2161 2175 r = len(self)
2162 2176 dsize = 0
2163 2177 if r:
2164 2178 dsize = self.end(r - 1)
2165 2179 dfh = None
2166 2180 if not self._inline:
2167 2181 try:
2168 2182 dfh = self._datafp(b"r+")
2169 2183 if self._docket is None:
2170 2184 dfh.seek(0, os.SEEK_END)
2171 2185 else:
2172 2186 dfh.seek(self._docket.data_end, os.SEEK_SET)
2173 2187 except IOError as inst:
2174 2188 if inst.errno != errno.ENOENT:
2175 2189 raise
2176 2190 dfh = self._datafp(b"w+")
2177 2191 transaction.add(self._datafile, dsize)
2178 2192 try:
2179 2193 isize = r * self.index.entry_size
2180 2194 ifh = self.__index_write_fp()
2181 2195 if self._inline:
2182 2196 transaction.add(self._indexfile, dsize + isize)
2183 2197 else:
2184 2198 transaction.add(self._indexfile, isize)
2185 2199 try:
2186 2200 self._writinghandles = (ifh, dfh)
2187 2201 try:
2188 2202 yield
2189 2203 if self._docket is not None:
2190 2204 self._write_docket(transaction)
2191 2205 finally:
2192 2206 self._writinghandles = None
2193 2207 finally:
2194 2208 ifh.close()
2195 2209 finally:
2196 2210 if dfh is not None:
2197 2211 dfh.close()
2198 2212
2199 2213 def _write_docket(self, transaction):
2200 2214 """write the current docket on disk
2201 2215
2202 2216 Exist as a method to help changelog to implement transaction logic
2203 2217
2204 2218 We could also imagine using the same transaction logic for all revlog
2205 2219 since docket are cheap."""
2206 2220 self._docket.write(transaction)
2207 2221
2208 2222 def addrevision(
2209 2223 self,
2210 2224 text,
2211 2225 transaction,
2212 2226 link,
2213 2227 p1,
2214 2228 p2,
2215 2229 cachedelta=None,
2216 2230 node=None,
2217 2231 flags=REVIDX_DEFAULT_FLAGS,
2218 2232 deltacomputer=None,
2219 2233 sidedata=None,
2220 2234 ):
2221 2235 """add a revision to the log
2222 2236
2223 2237 text - the revision data to add
2224 2238 transaction - the transaction object used for rollback
2225 2239 link - the linkrev data to add
2226 2240 p1, p2 - the parent nodeids of the revision
2227 2241 cachedelta - an optional precomputed delta
2228 2242 node - nodeid of revision; typically node is not specified, and it is
2229 2243 computed by default as hash(text, p1, p2), however subclasses might
2230 2244 use different hashing method (and override checkhash() in such case)
2231 2245 flags - the known flags to set on the revision
2232 2246 deltacomputer - an optional deltacomputer instance shared between
2233 2247 multiple calls
2234 2248 """
2235 2249 if link == nullrev:
2236 2250 raise error.RevlogError(
2237 2251 _(b"attempted to add linkrev -1 to %s") % self.display_id
2238 2252 )
2239 2253
2240 2254 if sidedata is None:
2241 2255 sidedata = {}
2242 2256 elif sidedata and not self.hassidedata:
2243 2257 raise error.ProgrammingError(
2244 2258 _(b"trying to add sidedata to a revlog who don't support them")
2245 2259 )
2246 2260
2247 2261 if flags:
2248 2262 node = node or self.hash(text, p1, p2)
2249 2263
2250 2264 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2251 2265
2252 2266 # If the flag processor modifies the revision data, ignore any provided
2253 2267 # cachedelta.
2254 2268 if rawtext != text:
2255 2269 cachedelta = None
2256 2270
2257 2271 if len(rawtext) > _maxentrysize:
2258 2272 raise error.RevlogError(
2259 2273 _(
2260 2274 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2261 2275 )
2262 2276 % (self.display_id, len(rawtext))
2263 2277 )
2264 2278
2265 2279 node = node or self.hash(rawtext, p1, p2)
2266 2280 rev = self.index.get_rev(node)
2267 2281 if rev is not None:
2268 2282 return rev
2269 2283
2270 2284 if validatehash:
2271 2285 self.checkhash(rawtext, node, p1=p1, p2=p2)
2272 2286
2273 2287 return self.addrawrevision(
2274 2288 rawtext,
2275 2289 transaction,
2276 2290 link,
2277 2291 p1,
2278 2292 p2,
2279 2293 node,
2280 2294 flags,
2281 2295 cachedelta=cachedelta,
2282 2296 deltacomputer=deltacomputer,
2283 2297 sidedata=sidedata,
2284 2298 )
2285 2299
2286 2300 def addrawrevision(
2287 2301 self,
2288 2302 rawtext,
2289 2303 transaction,
2290 2304 link,
2291 2305 p1,
2292 2306 p2,
2293 2307 node,
2294 2308 flags,
2295 2309 cachedelta=None,
2296 2310 deltacomputer=None,
2297 2311 sidedata=None,
2298 2312 ):
2299 2313 """add a raw revision with known flags, node and parents
2300 2314 useful when reusing a revision not stored in this revlog (ex: received
2301 2315 over wire, or read from an external bundle).
2302 2316 """
2303 2317 with self._writing(transaction):
2304 2318 return self._addrevision(
2305 2319 node,
2306 2320 rawtext,
2307 2321 transaction,
2308 2322 link,
2309 2323 p1,
2310 2324 p2,
2311 2325 flags,
2312 2326 cachedelta,
2313 2327 deltacomputer=deltacomputer,
2314 2328 sidedata=sidedata,
2315 2329 )
2316 2330
2317 2331 def compress(self, data):
2318 2332 """Generate a possibly-compressed representation of data."""
2319 2333 if not data:
2320 2334 return b'', data
2321 2335
2322 2336 compressed = self._compressor.compress(data)
2323 2337
2324 2338 if compressed:
2325 2339 # The revlog compressor added the header in the returned data.
2326 2340 return b'', compressed
2327 2341
2328 2342 if data[0:1] == b'\0':
2329 2343 return b'', data
2330 2344 return b'u', data
2331 2345
2332 2346 def decompress(self, data):
2333 2347 """Decompress a revlog chunk.
2334 2348
2335 2349 The chunk is expected to begin with a header identifying the
2336 2350 format type so it can be routed to an appropriate decompressor.
2337 2351 """
2338 2352 if not data:
2339 2353 return data
2340 2354
2341 2355 # Revlogs are read much more frequently than they are written and many
2342 2356 # chunks only take microseconds to decompress, so performance is
2343 2357 # important here.
2344 2358 #
2345 2359 # We can make a few assumptions about revlogs:
2346 2360 #
2347 2361 # 1) the majority of chunks will be compressed (as opposed to inline
2348 2362 # raw data).
2349 2363 # 2) decompressing *any* data will likely by at least 10x slower than
2350 2364 # returning raw inline data.
2351 2365 # 3) we want to prioritize common and officially supported compression
2352 2366 # engines
2353 2367 #
2354 2368 # It follows that we want to optimize for "decompress compressed data
2355 2369 # when encoded with common and officially supported compression engines"
2356 2370 # case over "raw data" and "data encoded by less common or non-official
2357 2371 # compression engines." That is why we have the inline lookup first
2358 2372 # followed by the compengines lookup.
2359 2373 #
2360 2374 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2361 2375 # compressed chunks. And this matters for changelog and manifest reads.
2362 2376 t = data[0:1]
2363 2377
2364 2378 if t == b'x':
2365 2379 try:
2366 2380 return _zlibdecompress(data)
2367 2381 except zlib.error as e:
2368 2382 raise error.RevlogError(
2369 2383 _(b'revlog decompress error: %s')
2370 2384 % stringutil.forcebytestr(e)
2371 2385 )
2372 2386 # '\0' is more common than 'u' so it goes first.
2373 2387 elif t == b'\0':
2374 2388 return data
2375 2389 elif t == b'u':
2376 2390 return util.buffer(data, 1)
2377 2391
2378 try:
2379 compressor = self._decompressors[t]
2380 except KeyError:
2381 try:
2382 engine = util.compengines.forrevlogheader(t)
2383 compressor = engine.revlogcompressor(self._compengineopts)
2384 self._decompressors[t] = compressor
2385 except KeyError:
2386 raise error.RevlogError(
2387 _(b'unknown compression type %s') % binascii.hexlify(t)
2388 )
2392 compressor = self._get_decompressor(t)
2389 2393
2390 2394 return compressor.decompress(data)
2391 2395
2392 2396 def _addrevision(
2393 2397 self,
2394 2398 node,
2395 2399 rawtext,
2396 2400 transaction,
2397 2401 link,
2398 2402 p1,
2399 2403 p2,
2400 2404 flags,
2401 2405 cachedelta,
2402 2406 alwayscache=False,
2403 2407 deltacomputer=None,
2404 2408 sidedata=None,
2405 2409 ):
2406 2410 """internal function to add revisions to the log
2407 2411
2408 2412 see addrevision for argument descriptions.
2409 2413
2410 2414 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2411 2415
2412 2416 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2413 2417 be used.
2414 2418
2415 2419 invariants:
2416 2420 - rawtext is optional (can be None); if not set, cachedelta must be set.
2417 2421 if both are set, they must correspond to each other.
2418 2422 """
2419 2423 if node == self.nullid:
2420 2424 raise error.RevlogError(
2421 2425 _(b"%s: attempt to add null revision") % self.display_id
2422 2426 )
2423 2427 if (
2424 2428 node == self.nodeconstants.wdirid
2425 2429 or node in self.nodeconstants.wdirfilenodeids
2426 2430 ):
2427 2431 raise error.RevlogError(
2428 2432 _(b"%s: attempt to add wdir revision") % self.display_id
2429 2433 )
2430 2434 if self._writinghandles is None:
2431 2435 msg = b'adding revision outside `revlog._writing` context'
2432 2436 raise error.ProgrammingError(msg)
2433 2437
2434 2438 if self._inline:
2435 2439 fh = self._writinghandles[0]
2436 2440 else:
2437 2441 fh = self._writinghandles[1]
2438 2442
2439 2443 btext = [rawtext]
2440 2444
2441 2445 curr = len(self)
2442 2446 prev = curr - 1
2443 2447
2444 2448 offset = self._get_data_offset(prev)
2445 2449
2446 2450 if self._concurrencychecker:
2447 2451 ifh, dfh = self._writinghandles
2448 2452 if self._inline:
2449 2453 # offset is "as if" it were in the .d file, so we need to add on
2450 2454 # the size of the entry metadata.
2451 2455 self._concurrencychecker(
2452 2456 ifh, self._indexfile, offset + curr * self.index.entry_size
2453 2457 )
2454 2458 else:
2455 2459 # Entries in the .i are a consistent size.
2456 2460 self._concurrencychecker(
2457 2461 ifh, self._indexfile, curr * self.index.entry_size
2458 2462 )
2459 2463 self._concurrencychecker(dfh, self._datafile, offset)
2460 2464
2461 2465 p1r, p2r = self.rev(p1), self.rev(p2)
2462 2466
2463 2467 # full versions are inserted when the needed deltas
2464 2468 # become comparable to the uncompressed text
2465 2469 if rawtext is None:
2466 2470 # need rawtext size, before changed by flag processors, which is
2467 2471 # the non-raw size. use revlog explicitly to avoid filelog's extra
2468 2472 # logic that might remove metadata size.
2469 2473 textlen = mdiff.patchedsize(
2470 2474 revlog.size(self, cachedelta[0]), cachedelta[1]
2471 2475 )
2472 2476 else:
2473 2477 textlen = len(rawtext)
2474 2478
2475 2479 if deltacomputer is None:
2476 2480 deltacomputer = deltautil.deltacomputer(self)
2477 2481
2478 2482 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2479 2483
2480 2484 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2481 2485
2482 2486 compression_mode = COMP_MODE_INLINE
2483 2487 if self._docket is not None:
2484 2488 h, d = deltainfo.data
2485 2489 if not h and not d:
2486 2490 # not data to store at all... declare them uncompressed
2487 2491 compression_mode = COMP_MODE_PLAIN
2488 2492 elif not h and d[0:1] == b'\0':
2489 2493 compression_mode = COMP_MODE_PLAIN
2490 2494 elif h == b'u':
2491 2495 # we have a more efficient way to declare uncompressed
2492 2496 h = b''
2493 2497 compression_mode = COMP_MODE_PLAIN
2494 2498 deltainfo = deltautil.drop_u_compression(deltainfo)
2495 2499
2496 2500 if sidedata and self.hassidedata:
2497 2501 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2498 2502 sidedata_offset = offset + deltainfo.deltalen
2499 2503 else:
2500 2504 serialized_sidedata = b""
2501 2505 # Don't store the offset if the sidedata is empty, that way
2502 2506 # we can easily detect empty sidedata and they will be no different
2503 2507 # than ones we manually add.
2504 2508 sidedata_offset = 0
2505 2509
2506 2510 e = (
2507 2511 offset_type(offset, flags),
2508 2512 deltainfo.deltalen,
2509 2513 textlen,
2510 2514 deltainfo.base,
2511 2515 link,
2512 2516 p1r,
2513 2517 p2r,
2514 2518 node,
2515 2519 sidedata_offset,
2516 2520 len(serialized_sidedata),
2517 2521 compression_mode,
2518 2522 )
2519 2523
2520 2524 self.index.append(e)
2521 2525 entry = self.index.entry_binary(curr)
2522 2526 if curr == 0 and self._docket is None:
2523 2527 header = self._format_flags | self._format_version
2524 2528 header = self.index.pack_header(header)
2525 2529 entry = header + entry
2526 2530 self._writeentry(
2527 2531 transaction,
2528 2532 entry,
2529 2533 deltainfo.data,
2530 2534 link,
2531 2535 offset,
2532 2536 serialized_sidedata,
2533 2537 )
2534 2538
2535 2539 rawtext = btext[0]
2536 2540
2537 2541 if alwayscache and rawtext is None:
2538 2542 rawtext = deltacomputer.buildtext(revinfo, fh)
2539 2543
2540 2544 if type(rawtext) == bytes: # only accept immutable objects
2541 2545 self._revisioncache = (node, curr, rawtext)
2542 2546 self._chainbasecache[curr] = deltainfo.chainbase
2543 2547 return curr
2544 2548
2545 2549 def _get_data_offset(self, prev):
2546 2550 """Returns the current offset in the (in-transaction) data file.
2547 2551 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2548 2552 file to store that information: since sidedata can be rewritten to the
2549 2553 end of the data file within a transaction, you can have cases where, for
2550 2554 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2551 2555 to `n - 1`'s sidedata being written after `n`'s data.
2552 2556
2553 2557 TODO cache this in a docket file before getting out of experimental."""
2554 2558 if self._docket is None:
2555 2559 return self.end(prev)
2556 2560 else:
2557 2561 return self._docket.data_end
2558 2562
2559 2563 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2560 2564 # Files opened in a+ mode have inconsistent behavior on various
2561 2565 # platforms. Windows requires that a file positioning call be made
2562 2566 # when the file handle transitions between reads and writes. See
2563 2567 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2564 2568 # platforms, Python or the platform itself can be buggy. Some versions
2565 2569 # of Solaris have been observed to not append at the end of the file
2566 2570 # if the file was seeked to before the end. See issue4943 for more.
2567 2571 #
2568 2572 # We work around this issue by inserting a seek() before writing.
2569 2573 # Note: This is likely not necessary on Python 3. However, because
2570 2574 # the file handle is reused for reads and may be seeked there, we need
2571 2575 # to be careful before changing this.
2572 2576 if self._writinghandles is None:
2573 2577 msg = b'adding revision outside `revlog._writing` context'
2574 2578 raise error.ProgrammingError(msg)
2575 2579 ifh, dfh = self._writinghandles
2576 2580 if self._docket is None:
2577 2581 ifh.seek(0, os.SEEK_END)
2578 2582 else:
2579 2583 ifh.seek(self._docket.index_end, os.SEEK_SET)
2580 2584 if dfh:
2581 2585 if self._docket is None:
2582 2586 dfh.seek(0, os.SEEK_END)
2583 2587 else:
2584 2588 dfh.seek(self._docket.data_end, os.SEEK_SET)
2585 2589
2586 2590 curr = len(self) - 1
2587 2591 if not self._inline:
2588 2592 transaction.add(self._datafile, offset)
2589 2593 transaction.add(self._indexfile, curr * len(entry))
2590 2594 if data[0]:
2591 2595 dfh.write(data[0])
2592 2596 dfh.write(data[1])
2593 2597 if sidedata:
2594 2598 dfh.write(sidedata)
2595 2599 ifh.write(entry)
2596 2600 else:
2597 2601 offset += curr * self.index.entry_size
2598 2602 transaction.add(self._indexfile, offset)
2599 2603 ifh.write(entry)
2600 2604 ifh.write(data[0])
2601 2605 ifh.write(data[1])
2602 2606 if sidedata:
2603 2607 ifh.write(sidedata)
2604 2608 self._enforceinlinesize(transaction)
2605 2609 if self._docket is not None:
2606 2610 self._docket.index_end = self._writinghandles[0].tell()
2607 2611 self._docket.data_end = self._writinghandles[1].tell()
2608 2612
2609 2613 nodemaputil.setup_persistent_nodemap(transaction, self)
2610 2614
2611 2615 def addgroup(
2612 2616 self,
2613 2617 deltas,
2614 2618 linkmapper,
2615 2619 transaction,
2616 2620 alwayscache=False,
2617 2621 addrevisioncb=None,
2618 2622 duplicaterevisioncb=None,
2619 2623 ):
2620 2624 """
2621 2625 add a delta group
2622 2626
2623 2627 given a set of deltas, add them to the revision log. the
2624 2628 first delta is against its parent, which should be in our
2625 2629 log, the rest are against the previous delta.
2626 2630
2627 2631 If ``addrevisioncb`` is defined, it will be called with arguments of
2628 2632 this revlog and the node that was added.
2629 2633 """
2630 2634
2631 2635 if self._adding_group:
2632 2636 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2633 2637
2634 2638 self._adding_group = True
2635 2639 empty = True
2636 2640 try:
2637 2641 with self._writing(transaction):
2638 2642 deltacomputer = deltautil.deltacomputer(self)
2639 2643 # loop through our set of deltas
2640 2644 for data in deltas:
2641 2645 (
2642 2646 node,
2643 2647 p1,
2644 2648 p2,
2645 2649 linknode,
2646 2650 deltabase,
2647 2651 delta,
2648 2652 flags,
2649 2653 sidedata,
2650 2654 ) = data
2651 2655 link = linkmapper(linknode)
2652 2656 flags = flags or REVIDX_DEFAULT_FLAGS
2653 2657
2654 2658 rev = self.index.get_rev(node)
2655 2659 if rev is not None:
2656 2660 # this can happen if two branches make the same change
2657 2661 self._nodeduplicatecallback(transaction, rev)
2658 2662 if duplicaterevisioncb:
2659 2663 duplicaterevisioncb(self, rev)
2660 2664 empty = False
2661 2665 continue
2662 2666
2663 2667 for p in (p1, p2):
2664 2668 if not self.index.has_node(p):
2665 2669 raise error.LookupError(
2666 2670 p, self.radix, _(b'unknown parent')
2667 2671 )
2668 2672
2669 2673 if not self.index.has_node(deltabase):
2670 2674 raise error.LookupError(
2671 2675 deltabase, self.display_id, _(b'unknown delta base')
2672 2676 )
2673 2677
2674 2678 baserev = self.rev(deltabase)
2675 2679
2676 2680 if baserev != nullrev and self.iscensored(baserev):
2677 2681 # if base is censored, delta must be full replacement in a
2678 2682 # single patch operation
2679 2683 hlen = struct.calcsize(b">lll")
2680 2684 oldlen = self.rawsize(baserev)
2681 2685 newlen = len(delta) - hlen
2682 2686 if delta[:hlen] != mdiff.replacediffheader(
2683 2687 oldlen, newlen
2684 2688 ):
2685 2689 raise error.CensoredBaseError(
2686 2690 self.display_id, self.node(baserev)
2687 2691 )
2688 2692
2689 2693 if not flags and self._peek_iscensored(baserev, delta):
2690 2694 flags |= REVIDX_ISCENSORED
2691 2695
2692 2696 # We assume consumers of addrevisioncb will want to retrieve
2693 2697 # the added revision, which will require a call to
2694 2698 # revision(). revision() will fast path if there is a cache
2695 2699 # hit. So, we tell _addrevision() to always cache in this case.
2696 2700 # We're only using addgroup() in the context of changegroup
2697 2701 # generation so the revision data can always be handled as raw
2698 2702 # by the flagprocessor.
2699 2703 rev = self._addrevision(
2700 2704 node,
2701 2705 None,
2702 2706 transaction,
2703 2707 link,
2704 2708 p1,
2705 2709 p2,
2706 2710 flags,
2707 2711 (baserev, delta),
2708 2712 alwayscache=alwayscache,
2709 2713 deltacomputer=deltacomputer,
2710 2714 sidedata=sidedata,
2711 2715 )
2712 2716
2713 2717 if addrevisioncb:
2714 2718 addrevisioncb(self, rev)
2715 2719 empty = False
2716 2720 finally:
2717 2721 self._adding_group = False
2718 2722 return not empty
2719 2723
2720 2724 def iscensored(self, rev):
2721 2725 """Check if a file revision is censored."""
2722 2726 if not self._censorable:
2723 2727 return False
2724 2728
2725 2729 return self.flags(rev) & REVIDX_ISCENSORED
2726 2730
2727 2731 def _peek_iscensored(self, baserev, delta):
2728 2732 """Quickly check if a delta produces a censored revision."""
2729 2733 if not self._censorable:
2730 2734 return False
2731 2735
2732 2736 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2733 2737
2734 2738 def getstrippoint(self, minlink):
2735 2739 """find the minimum rev that must be stripped to strip the linkrev
2736 2740
2737 2741 Returns a tuple containing the minimum rev and a set of all revs that
2738 2742 have linkrevs that will be broken by this strip.
2739 2743 """
2740 2744 return storageutil.resolvestripinfo(
2741 2745 minlink,
2742 2746 len(self) - 1,
2743 2747 self.headrevs(),
2744 2748 self.linkrev,
2745 2749 self.parentrevs,
2746 2750 )
2747 2751
2748 2752 def strip(self, minlink, transaction):
2749 2753 """truncate the revlog on the first revision with a linkrev >= minlink
2750 2754
2751 2755 This function is called when we're stripping revision minlink and
2752 2756 its descendants from the repository.
2753 2757
2754 2758 We have to remove all revisions with linkrev >= minlink, because
2755 2759 the equivalent changelog revisions will be renumbered after the
2756 2760 strip.
2757 2761
2758 2762 So we truncate the revlog on the first of these revisions, and
2759 2763 trust that the caller has saved the revisions that shouldn't be
2760 2764 removed and that it'll re-add them after this truncation.
2761 2765 """
2762 2766 if len(self) == 0:
2763 2767 return
2764 2768
2765 2769 rev, _ = self.getstrippoint(minlink)
2766 2770 if rev == len(self):
2767 2771 return
2768 2772
2769 2773 # first truncate the files on disk
2770 2774 data_end = self.start(rev)
2771 2775 if not self._inline:
2772 2776 transaction.add(self._datafile, data_end)
2773 2777 end = rev * self.index.entry_size
2774 2778 else:
2775 2779 end = data_end + (rev * self.index.entry_size)
2776 2780
2777 2781 transaction.add(self._indexfile, end)
2778 2782 if self._docket is not None:
2779 2783 # XXX we could, leverage the docket while stripping. However it is
2780 2784 # not powerfull enough at the time of this comment
2781 2785 self._docket.index_end = end
2782 2786 self._docket.data_end = data_end
2783 2787 self._docket.write(transaction, stripping=True)
2784 2788
2785 2789 # then reset internal state in memory to forget those revisions
2786 2790 self._revisioncache = None
2787 2791 self._chaininfocache = util.lrucachedict(500)
2788 2792 self._chunkclear()
2789 2793
2790 2794 del self.index[rev:-1]
2791 2795
2792 2796 def checksize(self):
2793 2797 """Check size of index and data files
2794 2798
2795 2799 return a (dd, di) tuple.
2796 2800 - dd: extra bytes for the "data" file
2797 2801 - di: extra bytes for the "index" file
2798 2802
2799 2803 A healthy revlog will return (0, 0).
2800 2804 """
2801 2805 expected = 0
2802 2806 if len(self):
2803 2807 expected = max(0, self.end(len(self) - 1))
2804 2808
2805 2809 try:
2806 2810 with self._datafp() as f:
2807 2811 f.seek(0, io.SEEK_END)
2808 2812 actual = f.tell()
2809 2813 dd = actual - expected
2810 2814 except IOError as inst:
2811 2815 if inst.errno != errno.ENOENT:
2812 2816 raise
2813 2817 dd = 0
2814 2818
2815 2819 try:
2816 2820 f = self.opener(self._indexfile)
2817 2821 f.seek(0, io.SEEK_END)
2818 2822 actual = f.tell()
2819 2823 f.close()
2820 2824 s = self.index.entry_size
2821 2825 i = max(0, actual // s)
2822 2826 di = actual - (i * s)
2823 2827 if self._inline:
2824 2828 databytes = 0
2825 2829 for r in self:
2826 2830 databytes += max(0, self.length(r))
2827 2831 dd = 0
2828 2832 di = actual - len(self) * s - databytes
2829 2833 except IOError as inst:
2830 2834 if inst.errno != errno.ENOENT:
2831 2835 raise
2832 2836 di = 0
2833 2837
2834 2838 return (dd, di)
2835 2839
2836 2840 def files(self):
2837 2841 res = [self._indexfile]
2838 2842 if not self._inline:
2839 2843 res.append(self._datafile)
2840 2844 return res
2841 2845
2842 2846 def emitrevisions(
2843 2847 self,
2844 2848 nodes,
2845 2849 nodesorder=None,
2846 2850 revisiondata=False,
2847 2851 assumehaveparentrevisions=False,
2848 2852 deltamode=repository.CG_DELTAMODE_STD,
2849 2853 sidedata_helpers=None,
2850 2854 ):
2851 2855 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2852 2856 raise error.ProgrammingError(
2853 2857 b'unhandled value for nodesorder: %s' % nodesorder
2854 2858 )
2855 2859
2856 2860 if nodesorder is None and not self._generaldelta:
2857 2861 nodesorder = b'storage'
2858 2862
2859 2863 if (
2860 2864 not self._storedeltachains
2861 2865 and deltamode != repository.CG_DELTAMODE_PREV
2862 2866 ):
2863 2867 deltamode = repository.CG_DELTAMODE_FULL
2864 2868
2865 2869 return storageutil.emitrevisions(
2866 2870 self,
2867 2871 nodes,
2868 2872 nodesorder,
2869 2873 revlogrevisiondelta,
2870 2874 deltaparentfn=self.deltaparent,
2871 2875 candeltafn=self.candelta,
2872 2876 rawsizefn=self.rawsize,
2873 2877 revdifffn=self.revdiff,
2874 2878 flagsfn=self.flags,
2875 2879 deltamode=deltamode,
2876 2880 revisiondata=revisiondata,
2877 2881 assumehaveparentrevisions=assumehaveparentrevisions,
2878 2882 sidedata_helpers=sidedata_helpers,
2879 2883 )
2880 2884
2881 2885 DELTAREUSEALWAYS = b'always'
2882 2886 DELTAREUSESAMEREVS = b'samerevs'
2883 2887 DELTAREUSENEVER = b'never'
2884 2888
2885 2889 DELTAREUSEFULLADD = b'fulladd'
2886 2890
2887 2891 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2888 2892
2889 2893 def clone(
2890 2894 self,
2891 2895 tr,
2892 2896 destrevlog,
2893 2897 addrevisioncb=None,
2894 2898 deltareuse=DELTAREUSESAMEREVS,
2895 2899 forcedeltabothparents=None,
2896 2900 sidedata_helpers=None,
2897 2901 ):
2898 2902 """Copy this revlog to another, possibly with format changes.
2899 2903
2900 2904 The destination revlog will contain the same revisions and nodes.
2901 2905 However, it may not be bit-for-bit identical due to e.g. delta encoding
2902 2906 differences.
2903 2907
2904 2908 The ``deltareuse`` argument control how deltas from the existing revlog
2905 2909 are preserved in the destination revlog. The argument can have the
2906 2910 following values:
2907 2911
2908 2912 DELTAREUSEALWAYS
2909 2913 Deltas will always be reused (if possible), even if the destination
2910 2914 revlog would not select the same revisions for the delta. This is the
2911 2915 fastest mode of operation.
2912 2916 DELTAREUSESAMEREVS
2913 2917 Deltas will be reused if the destination revlog would pick the same
2914 2918 revisions for the delta. This mode strikes a balance between speed
2915 2919 and optimization.
2916 2920 DELTAREUSENEVER
2917 2921 Deltas will never be reused. This is the slowest mode of execution.
2918 2922 This mode can be used to recompute deltas (e.g. if the diff/delta
2919 2923 algorithm changes).
2920 2924 DELTAREUSEFULLADD
2921 2925 Revision will be re-added as if their were new content. This is
2922 2926 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2923 2927 eg: large file detection and handling.
2924 2928
2925 2929 Delta computation can be slow, so the choice of delta reuse policy can
2926 2930 significantly affect run time.
2927 2931
2928 2932 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2929 2933 two extremes. Deltas will be reused if they are appropriate. But if the
2930 2934 delta could choose a better revision, it will do so. This means if you
2931 2935 are converting a non-generaldelta revlog to a generaldelta revlog,
2932 2936 deltas will be recomputed if the delta's parent isn't a parent of the
2933 2937 revision.
2934 2938
2935 2939 In addition to the delta policy, the ``forcedeltabothparents``
2936 2940 argument controls whether to force compute deltas against both parents
2937 2941 for merges. By default, the current default is used.
2938 2942
2939 2943 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2940 2944 `sidedata_helpers`.
2941 2945 """
2942 2946 if deltareuse not in self.DELTAREUSEALL:
2943 2947 raise ValueError(
2944 2948 _(b'value for deltareuse invalid: %s') % deltareuse
2945 2949 )
2946 2950
2947 2951 if len(destrevlog):
2948 2952 raise ValueError(_(b'destination revlog is not empty'))
2949 2953
2950 2954 if getattr(self, 'filteredrevs', None):
2951 2955 raise ValueError(_(b'source revlog has filtered revisions'))
2952 2956 if getattr(destrevlog, 'filteredrevs', None):
2953 2957 raise ValueError(_(b'destination revlog has filtered revisions'))
2954 2958
2955 2959 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2956 2960 # if possible.
2957 2961 oldlazydelta = destrevlog._lazydelta
2958 2962 oldlazydeltabase = destrevlog._lazydeltabase
2959 2963 oldamd = destrevlog._deltabothparents
2960 2964
2961 2965 try:
2962 2966 if deltareuse == self.DELTAREUSEALWAYS:
2963 2967 destrevlog._lazydeltabase = True
2964 2968 destrevlog._lazydelta = True
2965 2969 elif deltareuse == self.DELTAREUSESAMEREVS:
2966 2970 destrevlog._lazydeltabase = False
2967 2971 destrevlog._lazydelta = True
2968 2972 elif deltareuse == self.DELTAREUSENEVER:
2969 2973 destrevlog._lazydeltabase = False
2970 2974 destrevlog._lazydelta = False
2971 2975
2972 2976 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2973 2977
2974 2978 self._clone(
2975 2979 tr,
2976 2980 destrevlog,
2977 2981 addrevisioncb,
2978 2982 deltareuse,
2979 2983 forcedeltabothparents,
2980 2984 sidedata_helpers,
2981 2985 )
2982 2986
2983 2987 finally:
2984 2988 destrevlog._lazydelta = oldlazydelta
2985 2989 destrevlog._lazydeltabase = oldlazydeltabase
2986 2990 destrevlog._deltabothparents = oldamd
2987 2991
2988 2992 def _clone(
2989 2993 self,
2990 2994 tr,
2991 2995 destrevlog,
2992 2996 addrevisioncb,
2993 2997 deltareuse,
2994 2998 forcedeltabothparents,
2995 2999 sidedata_helpers,
2996 3000 ):
2997 3001 """perform the core duty of `revlog.clone` after parameter processing"""
2998 3002 deltacomputer = deltautil.deltacomputer(destrevlog)
2999 3003 index = self.index
3000 3004 for rev in self:
3001 3005 entry = index[rev]
3002 3006
3003 3007 # Some classes override linkrev to take filtered revs into
3004 3008 # account. Use raw entry from index.
3005 3009 flags = entry[0] & 0xFFFF
3006 3010 linkrev = entry[4]
3007 3011 p1 = index[entry[5]][7]
3008 3012 p2 = index[entry[6]][7]
3009 3013 node = entry[7]
3010 3014
3011 3015 # (Possibly) reuse the delta from the revlog if allowed and
3012 3016 # the revlog chunk is a delta.
3013 3017 cachedelta = None
3014 3018 rawtext = None
3015 3019 if deltareuse == self.DELTAREUSEFULLADD:
3016 3020 text, sidedata = self._revisiondata(rev)
3017 3021
3018 3022 if sidedata_helpers is not None:
3019 3023 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3020 3024 self, sidedata_helpers, sidedata, rev
3021 3025 )
3022 3026 flags = flags | new_flags[0] & ~new_flags[1]
3023 3027
3024 3028 destrevlog.addrevision(
3025 3029 text,
3026 3030 tr,
3027 3031 linkrev,
3028 3032 p1,
3029 3033 p2,
3030 3034 cachedelta=cachedelta,
3031 3035 node=node,
3032 3036 flags=flags,
3033 3037 deltacomputer=deltacomputer,
3034 3038 sidedata=sidedata,
3035 3039 )
3036 3040 else:
3037 3041 if destrevlog._lazydelta:
3038 3042 dp = self.deltaparent(rev)
3039 3043 if dp != nullrev:
3040 3044 cachedelta = (dp, bytes(self._chunk(rev)))
3041 3045
3042 3046 sidedata = None
3043 3047 if not cachedelta:
3044 3048 rawtext, sidedata = self._revisiondata(rev)
3045 3049 if sidedata is None:
3046 3050 sidedata = self.sidedata(rev)
3047 3051
3048 3052 if sidedata_helpers is not None:
3049 3053 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3050 3054 self, sidedata_helpers, sidedata, rev
3051 3055 )
3052 3056 flags = flags | new_flags[0] & ~new_flags[1]
3053 3057
3054 3058 with destrevlog._writing(tr):
3055 3059 destrevlog._addrevision(
3056 3060 node,
3057 3061 rawtext,
3058 3062 tr,
3059 3063 linkrev,
3060 3064 p1,
3061 3065 p2,
3062 3066 flags,
3063 3067 cachedelta,
3064 3068 deltacomputer=deltacomputer,
3065 3069 sidedata=sidedata,
3066 3070 )
3067 3071
3068 3072 if addrevisioncb:
3069 3073 addrevisioncb(self, rev, node)
3070 3074
3071 3075 def censorrevision(self, tr, censornode, tombstone=b''):
3072 3076 if self._format_version == REVLOGV0:
3073 3077 raise error.RevlogError(
3074 3078 _(b'cannot censor with version %d revlogs')
3075 3079 % self._format_version
3076 3080 )
3077 3081
3078 3082 censorrev = self.rev(censornode)
3079 3083 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3080 3084
3081 3085 if len(tombstone) > self.rawsize(censorrev):
3082 3086 raise error.Abort(
3083 3087 _(b'censor tombstone must be no longer than censored data')
3084 3088 )
3085 3089
3086 3090 # Rewriting the revlog in place is hard. Our strategy for censoring is
3087 3091 # to create a new revlog, copy all revisions to it, then replace the
3088 3092 # revlogs on transaction close.
3089 3093 #
3090 3094 # This is a bit dangerous. We could easily have a mismatch of state.
3091 3095 newrl = revlog(
3092 3096 self.opener,
3093 3097 target=self.target,
3094 3098 radix=self.radix,
3095 3099 postfix=b'tmpcensored',
3096 3100 censorable=True,
3097 3101 )
3098 3102 newrl._format_version = self._format_version
3099 3103 newrl._format_flags = self._format_flags
3100 3104 newrl._generaldelta = self._generaldelta
3101 3105 newrl._parse_index = self._parse_index
3102 3106
3103 3107 for rev in self.revs():
3104 3108 node = self.node(rev)
3105 3109 p1, p2 = self.parents(node)
3106 3110
3107 3111 if rev == censorrev:
3108 3112 newrl.addrawrevision(
3109 3113 tombstone,
3110 3114 tr,
3111 3115 self.linkrev(censorrev),
3112 3116 p1,
3113 3117 p2,
3114 3118 censornode,
3115 3119 REVIDX_ISCENSORED,
3116 3120 )
3117 3121
3118 3122 if newrl.deltaparent(rev) != nullrev:
3119 3123 raise error.Abort(
3120 3124 _(
3121 3125 b'censored revision stored as delta; '
3122 3126 b'cannot censor'
3123 3127 ),
3124 3128 hint=_(
3125 3129 b'censoring of revlogs is not '
3126 3130 b'fully implemented; please report '
3127 3131 b'this bug'
3128 3132 ),
3129 3133 )
3130 3134 continue
3131 3135
3132 3136 if self.iscensored(rev):
3133 3137 if self.deltaparent(rev) != nullrev:
3134 3138 raise error.Abort(
3135 3139 _(
3136 3140 b'cannot censor due to censored '
3137 3141 b'revision having delta stored'
3138 3142 )
3139 3143 )
3140 3144 rawtext = self._chunk(rev)
3141 3145 else:
3142 3146 rawtext = self.rawdata(rev)
3143 3147
3144 3148 newrl.addrawrevision(
3145 3149 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3146 3150 )
3147 3151
3148 3152 tr.addbackup(self._indexfile, location=b'store')
3149 3153 if not self._inline:
3150 3154 tr.addbackup(self._datafile, location=b'store')
3151 3155
3152 3156 self.opener.rename(newrl._indexfile, self._indexfile)
3153 3157 if not self._inline:
3154 3158 self.opener.rename(newrl._datafile, self._datafile)
3155 3159
3156 3160 self.clearcaches()
3157 3161 self._loadindex()
3158 3162
3159 3163 def verifyintegrity(self, state):
3160 3164 """Verifies the integrity of the revlog.
3161 3165
3162 3166 Yields ``revlogproblem`` instances describing problems that are
3163 3167 found.
3164 3168 """
3165 3169 dd, di = self.checksize()
3166 3170 if dd:
3167 3171 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3168 3172 if di:
3169 3173 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3170 3174
3171 3175 version = self._format_version
3172 3176
3173 3177 # The verifier tells us what version revlog we should be.
3174 3178 if version != state[b'expectedversion']:
3175 3179 yield revlogproblem(
3176 3180 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3177 3181 % (self.display_id, version, state[b'expectedversion'])
3178 3182 )
3179 3183
3180 3184 state[b'skipread'] = set()
3181 3185 state[b'safe_renamed'] = set()
3182 3186
3183 3187 for rev in self:
3184 3188 node = self.node(rev)
3185 3189
3186 3190 # Verify contents. 4 cases to care about:
3187 3191 #
3188 3192 # common: the most common case
3189 3193 # rename: with a rename
3190 3194 # meta: file content starts with b'\1\n', the metadata
3191 3195 # header defined in filelog.py, but without a rename
3192 3196 # ext: content stored externally
3193 3197 #
3194 3198 # More formally, their differences are shown below:
3195 3199 #
3196 3200 # | common | rename | meta | ext
3197 3201 # -------------------------------------------------------
3198 3202 # flags() | 0 | 0 | 0 | not 0
3199 3203 # renamed() | False | True | False | ?
3200 3204 # rawtext[0:2]=='\1\n'| False | True | True | ?
3201 3205 #
3202 3206 # "rawtext" means the raw text stored in revlog data, which
3203 3207 # could be retrieved by "rawdata(rev)". "text"
3204 3208 # mentioned below is "revision(rev)".
3205 3209 #
3206 3210 # There are 3 different lengths stored physically:
3207 3211 # 1. L1: rawsize, stored in revlog index
3208 3212 # 2. L2: len(rawtext), stored in revlog data
3209 3213 # 3. L3: len(text), stored in revlog data if flags==0, or
3210 3214 # possibly somewhere else if flags!=0
3211 3215 #
3212 3216 # L1 should be equal to L2. L3 could be different from them.
3213 3217 # "text" may or may not affect commit hash depending on flag
3214 3218 # processors (see flagutil.addflagprocessor).
3215 3219 #
3216 3220 # | common | rename | meta | ext
3217 3221 # -------------------------------------------------
3218 3222 # rawsize() | L1 | L1 | L1 | L1
3219 3223 # size() | L1 | L2-LM | L1(*) | L1 (?)
3220 3224 # len(rawtext) | L2 | L2 | L2 | L2
3221 3225 # len(text) | L2 | L2 | L2 | L3
3222 3226 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3223 3227 #
3224 3228 # LM: length of metadata, depending on rawtext
3225 3229 # (*): not ideal, see comment in filelog.size
3226 3230 # (?): could be "- len(meta)" if the resolved content has
3227 3231 # rename metadata
3228 3232 #
3229 3233 # Checks needed to be done:
3230 3234 # 1. length check: L1 == L2, in all cases.
3231 3235 # 2. hash check: depending on flag processor, we may need to
3232 3236 # use either "text" (external), or "rawtext" (in revlog).
3233 3237
3234 3238 try:
3235 3239 skipflags = state.get(b'skipflags', 0)
3236 3240 if skipflags:
3237 3241 skipflags &= self.flags(rev)
3238 3242
3239 3243 _verify_revision(self, skipflags, state, node)
3240 3244
3241 3245 l1 = self.rawsize(rev)
3242 3246 l2 = len(self.rawdata(node))
3243 3247
3244 3248 if l1 != l2:
3245 3249 yield revlogproblem(
3246 3250 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3247 3251 node=node,
3248 3252 )
3249 3253
3250 3254 except error.CensoredNodeError:
3251 3255 if state[b'erroroncensored']:
3252 3256 yield revlogproblem(
3253 3257 error=_(b'censored file data'), node=node
3254 3258 )
3255 3259 state[b'skipread'].add(node)
3256 3260 except Exception as e:
3257 3261 yield revlogproblem(
3258 3262 error=_(b'unpacking %s: %s')
3259 3263 % (short(node), stringutil.forcebytestr(e)),
3260 3264 node=node,
3261 3265 )
3262 3266 state[b'skipread'].add(node)
3263 3267
3264 3268 def storageinfo(
3265 3269 self,
3266 3270 exclusivefiles=False,
3267 3271 sharedfiles=False,
3268 3272 revisionscount=False,
3269 3273 trackedsize=False,
3270 3274 storedsize=False,
3271 3275 ):
3272 3276 d = {}
3273 3277
3274 3278 if exclusivefiles:
3275 3279 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3276 3280 if not self._inline:
3277 3281 d[b'exclusivefiles'].append((self.opener, self._datafile))
3278 3282
3279 3283 if sharedfiles:
3280 3284 d[b'sharedfiles'] = []
3281 3285
3282 3286 if revisionscount:
3283 3287 d[b'revisionscount'] = len(self)
3284 3288
3285 3289 if trackedsize:
3286 3290 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3287 3291
3288 3292 if storedsize:
3289 3293 d[b'storedsize'] = sum(
3290 3294 self.opener.stat(path).st_size for path in self.files()
3291 3295 )
3292 3296
3293 3297 return d
3294 3298
3295 3299 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3296 3300 if not self.hassidedata:
3297 3301 return
3298 3302 # revlog formats with sidedata support does not support inline
3299 3303 assert not self._inline
3300 3304 if not helpers[1] and not helpers[2]:
3301 3305 # Nothing to generate or remove
3302 3306 return
3303 3307
3304 3308 new_entries = []
3305 3309 # append the new sidedata
3306 3310 with self._writing(transaction):
3307 3311 ifh, dfh = self._writinghandles
3308 3312 if self._docket is not None:
3309 3313 dfh.seek(self._docket.data_end, os.SEEK_SET)
3310 3314 else:
3311 3315 dfh.seek(0, os.SEEK_END)
3312 3316
3313 3317 current_offset = dfh.tell()
3314 3318 for rev in range(startrev, endrev + 1):
3315 3319 entry = self.index[rev]
3316 3320 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3317 3321 store=self,
3318 3322 sidedata_helpers=helpers,
3319 3323 sidedata={},
3320 3324 rev=rev,
3321 3325 )
3322 3326
3323 3327 serialized_sidedata = sidedatautil.serialize_sidedata(
3324 3328 new_sidedata
3325 3329 )
3326 3330 if entry[8] != 0 or entry[9] != 0:
3327 3331 # rewriting entries that already have sidedata is not
3328 3332 # supported yet, because it introduces garbage data in the
3329 3333 # revlog.
3330 3334 msg = b"rewriting existing sidedata is not supported yet"
3331 3335 raise error.Abort(msg)
3332 3336
3333 3337 # Apply (potential) flags to add and to remove after running
3334 3338 # the sidedata helpers
3335 3339 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3336 3340 entry_update = (
3337 3341 current_offset,
3338 3342 len(serialized_sidedata),
3339 3343 new_offset_flags,
3340 3344 )
3341 3345
3342 3346 # the sidedata computation might have move the file cursors around
3343 3347 dfh.seek(current_offset, os.SEEK_SET)
3344 3348 dfh.write(serialized_sidedata)
3345 3349 new_entries.append(entry_update)
3346 3350 current_offset += len(serialized_sidedata)
3347 3351 if self._docket is not None:
3348 3352 self._docket.data_end = dfh.tell()
3349 3353
3350 3354 # rewrite the new index entries
3351 3355 ifh.seek(startrev * self.index.entry_size)
3352 3356 for i, e in enumerate(new_entries):
3353 3357 rev = startrev + i
3354 3358 self.index.replace_sidedata_info(rev, *e)
3355 3359 packed = self.index.entry_binary(rev)
3356 3360 if rev == 0 and self._docket is None:
3357 3361 header = self._format_flags | self._format_version
3358 3362 header = self.index.pack_header(header)
3359 3363 packed = header + packed
3360 3364 ifh.write(packed)
General Comments 0
You need to be logged in to leave comments. Login now