##// END OF EJS Templates
changelogv2: use a dedicated on disk format for changelogv2...
marmoute -
r48044:25ce16bf default
parent child Browse files
Show More
@@ -1,2712 +1,2711
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 1153 # * include management of a persistent nodemap in the main docket
1154 1154 # * enforce a "no-truncate" policy for mmap safety
1155 1155 # - for censoring operation
1156 1156 # - for stripping operation
1157 1157 # - for rollback operation
1158 1158 # * proper streaming (race free) of the docket file
1159 1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 1160 # * Exchange-wise, we will also need to do something more efficient than
1161 1161 # keeping references to the affected revlogs, especially memory-wise when
1162 1162 # rewriting sidedata.
1163 1163 # * sidedata compression
1164 1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 1165 # * Improvement to consider
1166 1166 # - avoid compression header in chunk using the default compression?
1167 1167 # - forbid "inline" compression mode entirely?
1168 1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 1170 # - keep track of chain base or size (probably not that useful anymore)
1171 1171 # - store data and sidedata in different files
1172 1172 coreconfigitem(
1173 1173 b'experimental',
1174 1174 b'revlogv2',
1175 1175 default=None,
1176 1176 )
1177 1177 coreconfigitem(
1178 1178 b'experimental',
1179 1179 b'revisions.disambiguatewithin',
1180 1180 default=None,
1181 1181 )
1182 1182 coreconfigitem(
1183 1183 b'experimental',
1184 1184 b'rust.index',
1185 1185 default=False,
1186 1186 )
1187 1187 coreconfigitem(
1188 1188 b'experimental',
1189 1189 b'server.filesdata.recommended-batch-size',
1190 1190 default=50000,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'experimental',
1194 1194 b'server.manifestdata.recommended-batch-size',
1195 1195 default=100000,
1196 1196 )
1197 1197 coreconfigitem(
1198 1198 b'experimental',
1199 1199 b'server.stream-narrow-clones',
1200 1200 default=False,
1201 1201 )
1202 1202 coreconfigitem(
1203 1203 b'experimental',
1204 1204 b'single-head-per-branch',
1205 1205 default=False,
1206 1206 )
1207 1207 coreconfigitem(
1208 1208 b'experimental',
1209 1209 b'single-head-per-branch:account-closed-heads',
1210 1210 default=False,
1211 1211 )
1212 1212 coreconfigitem(
1213 1213 b'experimental',
1214 1214 b'single-head-per-branch:public-changes-only',
1215 1215 default=False,
1216 1216 )
1217 1217 coreconfigitem(
1218 1218 b'experimental',
1219 1219 b'sshserver.support-v2',
1220 1220 default=False,
1221 1221 )
1222 1222 coreconfigitem(
1223 1223 b'experimental',
1224 1224 b'sparse-read',
1225 1225 default=False,
1226 1226 )
1227 1227 coreconfigitem(
1228 1228 b'experimental',
1229 1229 b'sparse-read.density-threshold',
1230 1230 default=0.50,
1231 1231 )
1232 1232 coreconfigitem(
1233 1233 b'experimental',
1234 1234 b'sparse-read.min-gap-size',
1235 1235 default=b'65K',
1236 1236 )
1237 1237 coreconfigitem(
1238 1238 b'experimental',
1239 1239 b'treemanifest',
1240 1240 default=False,
1241 1241 )
1242 1242 coreconfigitem(
1243 1243 b'experimental',
1244 1244 b'update.atomic-file',
1245 1245 default=False,
1246 1246 )
1247 1247 coreconfigitem(
1248 1248 b'experimental',
1249 1249 b'sshpeer.advertise-v2',
1250 1250 default=False,
1251 1251 )
1252 1252 coreconfigitem(
1253 1253 b'experimental',
1254 1254 b'web.apiserver',
1255 1255 default=False,
1256 1256 )
1257 1257 coreconfigitem(
1258 1258 b'experimental',
1259 1259 b'web.api.http-v2',
1260 1260 default=False,
1261 1261 )
1262 1262 coreconfigitem(
1263 1263 b'experimental',
1264 1264 b'web.api.debugreflect',
1265 1265 default=False,
1266 1266 )
1267 1267 coreconfigitem(
1268 1268 b'experimental',
1269 1269 b'worker.wdir-get-thread-safe',
1270 1270 default=False,
1271 1271 )
1272 1272 coreconfigitem(
1273 1273 b'experimental',
1274 1274 b'worker.repository-upgrade',
1275 1275 default=False,
1276 1276 )
1277 1277 coreconfigitem(
1278 1278 b'experimental',
1279 1279 b'xdiff',
1280 1280 default=False,
1281 1281 )
1282 1282 coreconfigitem(
1283 1283 b'extensions',
1284 1284 b'.*',
1285 1285 default=None,
1286 1286 generic=True,
1287 1287 )
1288 1288 coreconfigitem(
1289 1289 b'extdata',
1290 1290 b'.*',
1291 1291 default=None,
1292 1292 generic=True,
1293 1293 )
1294 1294 coreconfigitem(
1295 1295 b'format',
1296 1296 b'bookmarks-in-store',
1297 1297 default=False,
1298 1298 )
1299 1299 coreconfigitem(
1300 1300 b'format',
1301 1301 b'chunkcachesize',
1302 1302 default=None,
1303 1303 experimental=True,
1304 1304 )
1305 1305 coreconfigitem(
1306 1306 b'format',
1307 1307 b'dotencode',
1308 1308 default=True,
1309 1309 )
1310 1310 coreconfigitem(
1311 1311 b'format',
1312 1312 b'generaldelta',
1313 1313 default=False,
1314 1314 experimental=True,
1315 1315 )
1316 1316 coreconfigitem(
1317 1317 b'format',
1318 1318 b'manifestcachesize',
1319 1319 default=None,
1320 1320 experimental=True,
1321 1321 )
1322 1322 coreconfigitem(
1323 1323 b'format',
1324 1324 b'maxchainlen',
1325 1325 default=dynamicdefault,
1326 1326 experimental=True,
1327 1327 )
1328 1328 coreconfigitem(
1329 1329 b'format',
1330 1330 b'obsstore-version',
1331 1331 default=None,
1332 1332 )
1333 1333 coreconfigitem(
1334 1334 b'format',
1335 1335 b'sparse-revlog',
1336 1336 default=True,
1337 1337 )
1338 1338 coreconfigitem(
1339 1339 b'format',
1340 1340 b'revlog-compression',
1341 1341 default=lambda: [b'zstd', b'zlib'],
1342 1342 alias=[(b'experimental', b'format.compression')],
1343 1343 )
1344 1344 # Experimental TODOs:
1345 1345 #
1346 1346 # * Same as for evlogv2 (but for the reduction of the number of files)
1347 # * drop the storage of the base
1348 1347 # * Improvement to investigate
1349 1348 # - storing .hgtags fnode
1350 1349 # - storing `rank` of changesets
1351 1350 # - storing branch related identifier
1352 1351
1353 1352 coreconfigitem(
1354 1353 b'format',
1355 1354 b'exp-use-changelog-v2',
1356 1355 default=None,
1357 1356 experimental=True,
1358 1357 )
1359 1358 coreconfigitem(
1360 1359 b'format',
1361 1360 b'usefncache',
1362 1361 default=True,
1363 1362 )
1364 1363 coreconfigitem(
1365 1364 b'format',
1366 1365 b'usegeneraldelta',
1367 1366 default=True,
1368 1367 )
1369 1368 coreconfigitem(
1370 1369 b'format',
1371 1370 b'usestore',
1372 1371 default=True,
1373 1372 )
1374 1373
1375 1374
1376 1375 def _persistent_nodemap_default():
1377 1376 """compute `use-persistent-nodemap` default value
1378 1377
1379 1378 The feature is disabled unless a fast implementation is available.
1380 1379 """
1381 1380 from . import policy
1382 1381
1383 1382 return policy.importrust('revlog') is not None
1384 1383
1385 1384
1386 1385 coreconfigitem(
1387 1386 b'format',
1388 1387 b'use-persistent-nodemap',
1389 1388 default=_persistent_nodemap_default,
1390 1389 )
1391 1390 coreconfigitem(
1392 1391 b'format',
1393 1392 b'exp-use-copies-side-data-changeset',
1394 1393 default=False,
1395 1394 experimental=True,
1396 1395 )
1397 1396 coreconfigitem(
1398 1397 b'format',
1399 1398 b'use-share-safe',
1400 1399 default=False,
1401 1400 )
1402 1401 coreconfigitem(
1403 1402 b'format',
1404 1403 b'internal-phase',
1405 1404 default=False,
1406 1405 experimental=True,
1407 1406 )
1408 1407 coreconfigitem(
1409 1408 b'fsmonitor',
1410 1409 b'warn_when_unused',
1411 1410 default=True,
1412 1411 )
1413 1412 coreconfigitem(
1414 1413 b'fsmonitor',
1415 1414 b'warn_update_file_count',
1416 1415 default=50000,
1417 1416 )
1418 1417 coreconfigitem(
1419 1418 b'fsmonitor',
1420 1419 b'warn_update_file_count_rust',
1421 1420 default=400000,
1422 1421 )
1423 1422 coreconfigitem(
1424 1423 b'help',
1425 1424 br'hidden-command\..*',
1426 1425 default=False,
1427 1426 generic=True,
1428 1427 )
1429 1428 coreconfigitem(
1430 1429 b'help',
1431 1430 br'hidden-topic\..*',
1432 1431 default=False,
1433 1432 generic=True,
1434 1433 )
1435 1434 coreconfigitem(
1436 1435 b'hooks',
1437 1436 b'[^:]*',
1438 1437 default=dynamicdefault,
1439 1438 generic=True,
1440 1439 )
1441 1440 coreconfigitem(
1442 1441 b'hooks',
1443 1442 b'.*:run-with-plain',
1444 1443 default=True,
1445 1444 generic=True,
1446 1445 )
1447 1446 coreconfigitem(
1448 1447 b'hgweb-paths',
1449 1448 b'.*',
1450 1449 default=list,
1451 1450 generic=True,
1452 1451 )
1453 1452 coreconfigitem(
1454 1453 b'hostfingerprints',
1455 1454 b'.*',
1456 1455 default=list,
1457 1456 generic=True,
1458 1457 )
1459 1458 coreconfigitem(
1460 1459 b'hostsecurity',
1461 1460 b'ciphers',
1462 1461 default=None,
1463 1462 )
1464 1463 coreconfigitem(
1465 1464 b'hostsecurity',
1466 1465 b'minimumprotocol',
1467 1466 default=dynamicdefault,
1468 1467 )
1469 1468 coreconfigitem(
1470 1469 b'hostsecurity',
1471 1470 b'.*:minimumprotocol$',
1472 1471 default=dynamicdefault,
1473 1472 generic=True,
1474 1473 )
1475 1474 coreconfigitem(
1476 1475 b'hostsecurity',
1477 1476 b'.*:ciphers$',
1478 1477 default=dynamicdefault,
1479 1478 generic=True,
1480 1479 )
1481 1480 coreconfigitem(
1482 1481 b'hostsecurity',
1483 1482 b'.*:fingerprints$',
1484 1483 default=list,
1485 1484 generic=True,
1486 1485 )
1487 1486 coreconfigitem(
1488 1487 b'hostsecurity',
1489 1488 b'.*:verifycertsfile$',
1490 1489 default=None,
1491 1490 generic=True,
1492 1491 )
1493 1492
1494 1493 coreconfigitem(
1495 1494 b'http_proxy',
1496 1495 b'always',
1497 1496 default=False,
1498 1497 )
1499 1498 coreconfigitem(
1500 1499 b'http_proxy',
1501 1500 b'host',
1502 1501 default=None,
1503 1502 )
1504 1503 coreconfigitem(
1505 1504 b'http_proxy',
1506 1505 b'no',
1507 1506 default=list,
1508 1507 )
1509 1508 coreconfigitem(
1510 1509 b'http_proxy',
1511 1510 b'passwd',
1512 1511 default=None,
1513 1512 )
1514 1513 coreconfigitem(
1515 1514 b'http_proxy',
1516 1515 b'user',
1517 1516 default=None,
1518 1517 )
1519 1518
1520 1519 coreconfigitem(
1521 1520 b'http',
1522 1521 b'timeout',
1523 1522 default=None,
1524 1523 )
1525 1524
1526 1525 coreconfigitem(
1527 1526 b'logtoprocess',
1528 1527 b'commandexception',
1529 1528 default=None,
1530 1529 )
1531 1530 coreconfigitem(
1532 1531 b'logtoprocess',
1533 1532 b'commandfinish',
1534 1533 default=None,
1535 1534 )
1536 1535 coreconfigitem(
1537 1536 b'logtoprocess',
1538 1537 b'command',
1539 1538 default=None,
1540 1539 )
1541 1540 coreconfigitem(
1542 1541 b'logtoprocess',
1543 1542 b'develwarn',
1544 1543 default=None,
1545 1544 )
1546 1545 coreconfigitem(
1547 1546 b'logtoprocess',
1548 1547 b'uiblocked',
1549 1548 default=None,
1550 1549 )
1551 1550 coreconfigitem(
1552 1551 b'merge',
1553 1552 b'checkunknown',
1554 1553 default=b'abort',
1555 1554 )
1556 1555 coreconfigitem(
1557 1556 b'merge',
1558 1557 b'checkignored',
1559 1558 default=b'abort',
1560 1559 )
1561 1560 coreconfigitem(
1562 1561 b'experimental',
1563 1562 b'merge.checkpathconflicts',
1564 1563 default=False,
1565 1564 )
1566 1565 coreconfigitem(
1567 1566 b'merge',
1568 1567 b'followcopies',
1569 1568 default=True,
1570 1569 )
1571 1570 coreconfigitem(
1572 1571 b'merge',
1573 1572 b'on-failure',
1574 1573 default=b'continue',
1575 1574 )
1576 1575 coreconfigitem(
1577 1576 b'merge',
1578 1577 b'preferancestor',
1579 1578 default=lambda: [b'*'],
1580 1579 experimental=True,
1581 1580 )
1582 1581 coreconfigitem(
1583 1582 b'merge',
1584 1583 b'strict-capability-check',
1585 1584 default=False,
1586 1585 )
1587 1586 coreconfigitem(
1588 1587 b'merge-tools',
1589 1588 b'.*',
1590 1589 default=None,
1591 1590 generic=True,
1592 1591 )
1593 1592 coreconfigitem(
1594 1593 b'merge-tools',
1595 1594 br'.*\.args$',
1596 1595 default=b"$local $base $other",
1597 1596 generic=True,
1598 1597 priority=-1,
1599 1598 )
1600 1599 coreconfigitem(
1601 1600 b'merge-tools',
1602 1601 br'.*\.binary$',
1603 1602 default=False,
1604 1603 generic=True,
1605 1604 priority=-1,
1606 1605 )
1607 1606 coreconfigitem(
1608 1607 b'merge-tools',
1609 1608 br'.*\.check$',
1610 1609 default=list,
1611 1610 generic=True,
1612 1611 priority=-1,
1613 1612 )
1614 1613 coreconfigitem(
1615 1614 b'merge-tools',
1616 1615 br'.*\.checkchanged$',
1617 1616 default=False,
1618 1617 generic=True,
1619 1618 priority=-1,
1620 1619 )
1621 1620 coreconfigitem(
1622 1621 b'merge-tools',
1623 1622 br'.*\.executable$',
1624 1623 default=dynamicdefault,
1625 1624 generic=True,
1626 1625 priority=-1,
1627 1626 )
1628 1627 coreconfigitem(
1629 1628 b'merge-tools',
1630 1629 br'.*\.fixeol$',
1631 1630 default=False,
1632 1631 generic=True,
1633 1632 priority=-1,
1634 1633 )
1635 1634 coreconfigitem(
1636 1635 b'merge-tools',
1637 1636 br'.*\.gui$',
1638 1637 default=False,
1639 1638 generic=True,
1640 1639 priority=-1,
1641 1640 )
1642 1641 coreconfigitem(
1643 1642 b'merge-tools',
1644 1643 br'.*\.mergemarkers$',
1645 1644 default=b'basic',
1646 1645 generic=True,
1647 1646 priority=-1,
1648 1647 )
1649 1648 coreconfigitem(
1650 1649 b'merge-tools',
1651 1650 br'.*\.mergemarkertemplate$',
1652 1651 default=dynamicdefault, # take from command-templates.mergemarker
1653 1652 generic=True,
1654 1653 priority=-1,
1655 1654 )
1656 1655 coreconfigitem(
1657 1656 b'merge-tools',
1658 1657 br'.*\.priority$',
1659 1658 default=0,
1660 1659 generic=True,
1661 1660 priority=-1,
1662 1661 )
1663 1662 coreconfigitem(
1664 1663 b'merge-tools',
1665 1664 br'.*\.premerge$',
1666 1665 default=dynamicdefault,
1667 1666 generic=True,
1668 1667 priority=-1,
1669 1668 )
1670 1669 coreconfigitem(
1671 1670 b'merge-tools',
1672 1671 br'.*\.symlink$',
1673 1672 default=False,
1674 1673 generic=True,
1675 1674 priority=-1,
1676 1675 )
1677 1676 coreconfigitem(
1678 1677 b'pager',
1679 1678 b'attend-.*',
1680 1679 default=dynamicdefault,
1681 1680 generic=True,
1682 1681 )
1683 1682 coreconfigitem(
1684 1683 b'pager',
1685 1684 b'ignore',
1686 1685 default=list,
1687 1686 )
1688 1687 coreconfigitem(
1689 1688 b'pager',
1690 1689 b'pager',
1691 1690 default=dynamicdefault,
1692 1691 )
1693 1692 coreconfigitem(
1694 1693 b'patch',
1695 1694 b'eol',
1696 1695 default=b'strict',
1697 1696 )
1698 1697 coreconfigitem(
1699 1698 b'patch',
1700 1699 b'fuzz',
1701 1700 default=2,
1702 1701 )
1703 1702 coreconfigitem(
1704 1703 b'paths',
1705 1704 b'default',
1706 1705 default=None,
1707 1706 )
1708 1707 coreconfigitem(
1709 1708 b'paths',
1710 1709 b'default-push',
1711 1710 default=None,
1712 1711 )
1713 1712 coreconfigitem(
1714 1713 b'paths',
1715 1714 b'.*',
1716 1715 default=None,
1717 1716 generic=True,
1718 1717 )
1719 1718 coreconfigitem(
1720 1719 b'phases',
1721 1720 b'checksubrepos',
1722 1721 default=b'follow',
1723 1722 )
1724 1723 coreconfigitem(
1725 1724 b'phases',
1726 1725 b'new-commit',
1727 1726 default=b'draft',
1728 1727 )
1729 1728 coreconfigitem(
1730 1729 b'phases',
1731 1730 b'publish',
1732 1731 default=True,
1733 1732 )
1734 1733 coreconfigitem(
1735 1734 b'profiling',
1736 1735 b'enabled',
1737 1736 default=False,
1738 1737 )
1739 1738 coreconfigitem(
1740 1739 b'profiling',
1741 1740 b'format',
1742 1741 default=b'text',
1743 1742 )
1744 1743 coreconfigitem(
1745 1744 b'profiling',
1746 1745 b'freq',
1747 1746 default=1000,
1748 1747 )
1749 1748 coreconfigitem(
1750 1749 b'profiling',
1751 1750 b'limit',
1752 1751 default=30,
1753 1752 )
1754 1753 coreconfigitem(
1755 1754 b'profiling',
1756 1755 b'nested',
1757 1756 default=0,
1758 1757 )
1759 1758 coreconfigitem(
1760 1759 b'profiling',
1761 1760 b'output',
1762 1761 default=None,
1763 1762 )
1764 1763 coreconfigitem(
1765 1764 b'profiling',
1766 1765 b'showmax',
1767 1766 default=0.999,
1768 1767 )
1769 1768 coreconfigitem(
1770 1769 b'profiling',
1771 1770 b'showmin',
1772 1771 default=dynamicdefault,
1773 1772 )
1774 1773 coreconfigitem(
1775 1774 b'profiling',
1776 1775 b'showtime',
1777 1776 default=True,
1778 1777 )
1779 1778 coreconfigitem(
1780 1779 b'profiling',
1781 1780 b'sort',
1782 1781 default=b'inlinetime',
1783 1782 )
1784 1783 coreconfigitem(
1785 1784 b'profiling',
1786 1785 b'statformat',
1787 1786 default=b'hotpath',
1788 1787 )
1789 1788 coreconfigitem(
1790 1789 b'profiling',
1791 1790 b'time-track',
1792 1791 default=dynamicdefault,
1793 1792 )
1794 1793 coreconfigitem(
1795 1794 b'profiling',
1796 1795 b'type',
1797 1796 default=b'stat',
1798 1797 )
1799 1798 coreconfigitem(
1800 1799 b'progress',
1801 1800 b'assume-tty',
1802 1801 default=False,
1803 1802 )
1804 1803 coreconfigitem(
1805 1804 b'progress',
1806 1805 b'changedelay',
1807 1806 default=1,
1808 1807 )
1809 1808 coreconfigitem(
1810 1809 b'progress',
1811 1810 b'clear-complete',
1812 1811 default=True,
1813 1812 )
1814 1813 coreconfigitem(
1815 1814 b'progress',
1816 1815 b'debug',
1817 1816 default=False,
1818 1817 )
1819 1818 coreconfigitem(
1820 1819 b'progress',
1821 1820 b'delay',
1822 1821 default=3,
1823 1822 )
1824 1823 coreconfigitem(
1825 1824 b'progress',
1826 1825 b'disable',
1827 1826 default=False,
1828 1827 )
1829 1828 coreconfigitem(
1830 1829 b'progress',
1831 1830 b'estimateinterval',
1832 1831 default=60.0,
1833 1832 )
1834 1833 coreconfigitem(
1835 1834 b'progress',
1836 1835 b'format',
1837 1836 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1838 1837 )
1839 1838 coreconfigitem(
1840 1839 b'progress',
1841 1840 b'refresh',
1842 1841 default=0.1,
1843 1842 )
1844 1843 coreconfigitem(
1845 1844 b'progress',
1846 1845 b'width',
1847 1846 default=dynamicdefault,
1848 1847 )
1849 1848 coreconfigitem(
1850 1849 b'pull',
1851 1850 b'confirm',
1852 1851 default=False,
1853 1852 )
1854 1853 coreconfigitem(
1855 1854 b'push',
1856 1855 b'pushvars.server',
1857 1856 default=False,
1858 1857 )
1859 1858 coreconfigitem(
1860 1859 b'rewrite',
1861 1860 b'backup-bundle',
1862 1861 default=True,
1863 1862 alias=[(b'ui', b'history-editing-backup')],
1864 1863 )
1865 1864 coreconfigitem(
1866 1865 b'rewrite',
1867 1866 b'update-timestamp',
1868 1867 default=False,
1869 1868 )
1870 1869 coreconfigitem(
1871 1870 b'rewrite',
1872 1871 b'empty-successor',
1873 1872 default=b'skip',
1874 1873 experimental=True,
1875 1874 )
1876 1875 coreconfigitem(
1877 1876 b'storage',
1878 1877 b'new-repo-backend',
1879 1878 default=b'revlogv1',
1880 1879 experimental=True,
1881 1880 )
1882 1881 coreconfigitem(
1883 1882 b'storage',
1884 1883 b'revlog.optimize-delta-parent-choice',
1885 1884 default=True,
1886 1885 alias=[(b'format', b'aggressivemergedeltas')],
1887 1886 )
1888 1887 # experimental as long as rust is experimental (or a C version is implemented)
1889 1888 coreconfigitem(
1890 1889 b'storage',
1891 1890 b'revlog.persistent-nodemap.mmap',
1892 1891 default=True,
1893 1892 )
1894 1893 # experimental as long as format.use-persistent-nodemap is.
1895 1894 coreconfigitem(
1896 1895 b'storage',
1897 1896 b'revlog.persistent-nodemap.slow-path',
1898 1897 default=b"abort",
1899 1898 )
1900 1899
1901 1900 coreconfigitem(
1902 1901 b'storage',
1903 1902 b'revlog.reuse-external-delta',
1904 1903 default=True,
1905 1904 )
1906 1905 coreconfigitem(
1907 1906 b'storage',
1908 1907 b'revlog.reuse-external-delta-parent',
1909 1908 default=None,
1910 1909 )
1911 1910 coreconfigitem(
1912 1911 b'storage',
1913 1912 b'revlog.zlib.level',
1914 1913 default=None,
1915 1914 )
1916 1915 coreconfigitem(
1917 1916 b'storage',
1918 1917 b'revlog.zstd.level',
1919 1918 default=None,
1920 1919 )
1921 1920 coreconfigitem(
1922 1921 b'server',
1923 1922 b'bookmarks-pushkey-compat',
1924 1923 default=True,
1925 1924 )
1926 1925 coreconfigitem(
1927 1926 b'server',
1928 1927 b'bundle1',
1929 1928 default=True,
1930 1929 )
1931 1930 coreconfigitem(
1932 1931 b'server',
1933 1932 b'bundle1gd',
1934 1933 default=None,
1935 1934 )
1936 1935 coreconfigitem(
1937 1936 b'server',
1938 1937 b'bundle1.pull',
1939 1938 default=None,
1940 1939 )
1941 1940 coreconfigitem(
1942 1941 b'server',
1943 1942 b'bundle1gd.pull',
1944 1943 default=None,
1945 1944 )
1946 1945 coreconfigitem(
1947 1946 b'server',
1948 1947 b'bundle1.push',
1949 1948 default=None,
1950 1949 )
1951 1950 coreconfigitem(
1952 1951 b'server',
1953 1952 b'bundle1gd.push',
1954 1953 default=None,
1955 1954 )
1956 1955 coreconfigitem(
1957 1956 b'server',
1958 1957 b'bundle2.stream',
1959 1958 default=True,
1960 1959 alias=[(b'experimental', b'bundle2.stream')],
1961 1960 )
1962 1961 coreconfigitem(
1963 1962 b'server',
1964 1963 b'compressionengines',
1965 1964 default=list,
1966 1965 )
1967 1966 coreconfigitem(
1968 1967 b'server',
1969 1968 b'concurrent-push-mode',
1970 1969 default=b'check-related',
1971 1970 )
1972 1971 coreconfigitem(
1973 1972 b'server',
1974 1973 b'disablefullbundle',
1975 1974 default=False,
1976 1975 )
1977 1976 coreconfigitem(
1978 1977 b'server',
1979 1978 b'maxhttpheaderlen',
1980 1979 default=1024,
1981 1980 )
1982 1981 coreconfigitem(
1983 1982 b'server',
1984 1983 b'pullbundle',
1985 1984 default=False,
1986 1985 )
1987 1986 coreconfigitem(
1988 1987 b'server',
1989 1988 b'preferuncompressed',
1990 1989 default=False,
1991 1990 )
1992 1991 coreconfigitem(
1993 1992 b'server',
1994 1993 b'streamunbundle',
1995 1994 default=False,
1996 1995 )
1997 1996 coreconfigitem(
1998 1997 b'server',
1999 1998 b'uncompressed',
2000 1999 default=True,
2001 2000 )
2002 2001 coreconfigitem(
2003 2002 b'server',
2004 2003 b'uncompressedallowsecret',
2005 2004 default=False,
2006 2005 )
2007 2006 coreconfigitem(
2008 2007 b'server',
2009 2008 b'view',
2010 2009 default=b'served',
2011 2010 )
2012 2011 coreconfigitem(
2013 2012 b'server',
2014 2013 b'validate',
2015 2014 default=False,
2016 2015 )
2017 2016 coreconfigitem(
2018 2017 b'server',
2019 2018 b'zliblevel',
2020 2019 default=-1,
2021 2020 )
2022 2021 coreconfigitem(
2023 2022 b'server',
2024 2023 b'zstdlevel',
2025 2024 default=3,
2026 2025 )
2027 2026 coreconfigitem(
2028 2027 b'share',
2029 2028 b'pool',
2030 2029 default=None,
2031 2030 )
2032 2031 coreconfigitem(
2033 2032 b'share',
2034 2033 b'poolnaming',
2035 2034 default=b'identity',
2036 2035 )
2037 2036 coreconfigitem(
2038 2037 b'share',
2039 2038 b'safe-mismatch.source-not-safe',
2040 2039 default=b'abort',
2041 2040 )
2042 2041 coreconfigitem(
2043 2042 b'share',
2044 2043 b'safe-mismatch.source-safe',
2045 2044 default=b'abort',
2046 2045 )
2047 2046 coreconfigitem(
2048 2047 b'share',
2049 2048 b'safe-mismatch.source-not-safe.warn',
2050 2049 default=True,
2051 2050 )
2052 2051 coreconfigitem(
2053 2052 b'share',
2054 2053 b'safe-mismatch.source-safe.warn',
2055 2054 default=True,
2056 2055 )
2057 2056 coreconfigitem(
2058 2057 b'shelve',
2059 2058 b'maxbackups',
2060 2059 default=10,
2061 2060 )
2062 2061 coreconfigitem(
2063 2062 b'smtp',
2064 2063 b'host',
2065 2064 default=None,
2066 2065 )
2067 2066 coreconfigitem(
2068 2067 b'smtp',
2069 2068 b'local_hostname',
2070 2069 default=None,
2071 2070 )
2072 2071 coreconfigitem(
2073 2072 b'smtp',
2074 2073 b'password',
2075 2074 default=None,
2076 2075 )
2077 2076 coreconfigitem(
2078 2077 b'smtp',
2079 2078 b'port',
2080 2079 default=dynamicdefault,
2081 2080 )
2082 2081 coreconfigitem(
2083 2082 b'smtp',
2084 2083 b'tls',
2085 2084 default=b'none',
2086 2085 )
2087 2086 coreconfigitem(
2088 2087 b'smtp',
2089 2088 b'username',
2090 2089 default=None,
2091 2090 )
2092 2091 coreconfigitem(
2093 2092 b'sparse',
2094 2093 b'missingwarning',
2095 2094 default=True,
2096 2095 experimental=True,
2097 2096 )
2098 2097 coreconfigitem(
2099 2098 b'subrepos',
2100 2099 b'allowed',
2101 2100 default=dynamicdefault, # to make backporting simpler
2102 2101 )
2103 2102 coreconfigitem(
2104 2103 b'subrepos',
2105 2104 b'hg:allowed',
2106 2105 default=dynamicdefault,
2107 2106 )
2108 2107 coreconfigitem(
2109 2108 b'subrepos',
2110 2109 b'git:allowed',
2111 2110 default=dynamicdefault,
2112 2111 )
2113 2112 coreconfigitem(
2114 2113 b'subrepos',
2115 2114 b'svn:allowed',
2116 2115 default=dynamicdefault,
2117 2116 )
2118 2117 coreconfigitem(
2119 2118 b'templates',
2120 2119 b'.*',
2121 2120 default=None,
2122 2121 generic=True,
2123 2122 )
2124 2123 coreconfigitem(
2125 2124 b'templateconfig',
2126 2125 b'.*',
2127 2126 default=dynamicdefault,
2128 2127 generic=True,
2129 2128 )
2130 2129 coreconfigitem(
2131 2130 b'trusted',
2132 2131 b'groups',
2133 2132 default=list,
2134 2133 )
2135 2134 coreconfigitem(
2136 2135 b'trusted',
2137 2136 b'users',
2138 2137 default=list,
2139 2138 )
2140 2139 coreconfigitem(
2141 2140 b'ui',
2142 2141 b'_usedassubrepo',
2143 2142 default=False,
2144 2143 )
2145 2144 coreconfigitem(
2146 2145 b'ui',
2147 2146 b'allowemptycommit',
2148 2147 default=False,
2149 2148 )
2150 2149 coreconfigitem(
2151 2150 b'ui',
2152 2151 b'archivemeta',
2153 2152 default=True,
2154 2153 )
2155 2154 coreconfigitem(
2156 2155 b'ui',
2157 2156 b'askusername',
2158 2157 default=False,
2159 2158 )
2160 2159 coreconfigitem(
2161 2160 b'ui',
2162 2161 b'available-memory',
2163 2162 default=None,
2164 2163 )
2165 2164
2166 2165 coreconfigitem(
2167 2166 b'ui',
2168 2167 b'clonebundlefallback',
2169 2168 default=False,
2170 2169 )
2171 2170 coreconfigitem(
2172 2171 b'ui',
2173 2172 b'clonebundleprefers',
2174 2173 default=list,
2175 2174 )
2176 2175 coreconfigitem(
2177 2176 b'ui',
2178 2177 b'clonebundles',
2179 2178 default=True,
2180 2179 )
2181 2180 coreconfigitem(
2182 2181 b'ui',
2183 2182 b'color',
2184 2183 default=b'auto',
2185 2184 )
2186 2185 coreconfigitem(
2187 2186 b'ui',
2188 2187 b'commitsubrepos',
2189 2188 default=False,
2190 2189 )
2191 2190 coreconfigitem(
2192 2191 b'ui',
2193 2192 b'debug',
2194 2193 default=False,
2195 2194 )
2196 2195 coreconfigitem(
2197 2196 b'ui',
2198 2197 b'debugger',
2199 2198 default=None,
2200 2199 )
2201 2200 coreconfigitem(
2202 2201 b'ui',
2203 2202 b'editor',
2204 2203 default=dynamicdefault,
2205 2204 )
2206 2205 coreconfigitem(
2207 2206 b'ui',
2208 2207 b'detailed-exit-code',
2209 2208 default=False,
2210 2209 experimental=True,
2211 2210 )
2212 2211 coreconfigitem(
2213 2212 b'ui',
2214 2213 b'fallbackencoding',
2215 2214 default=None,
2216 2215 )
2217 2216 coreconfigitem(
2218 2217 b'ui',
2219 2218 b'forcecwd',
2220 2219 default=None,
2221 2220 )
2222 2221 coreconfigitem(
2223 2222 b'ui',
2224 2223 b'forcemerge',
2225 2224 default=None,
2226 2225 )
2227 2226 coreconfigitem(
2228 2227 b'ui',
2229 2228 b'formatdebug',
2230 2229 default=False,
2231 2230 )
2232 2231 coreconfigitem(
2233 2232 b'ui',
2234 2233 b'formatjson',
2235 2234 default=False,
2236 2235 )
2237 2236 coreconfigitem(
2238 2237 b'ui',
2239 2238 b'formatted',
2240 2239 default=None,
2241 2240 )
2242 2241 coreconfigitem(
2243 2242 b'ui',
2244 2243 b'interactive',
2245 2244 default=None,
2246 2245 )
2247 2246 coreconfigitem(
2248 2247 b'ui',
2249 2248 b'interface',
2250 2249 default=None,
2251 2250 )
2252 2251 coreconfigitem(
2253 2252 b'ui',
2254 2253 b'interface.chunkselector',
2255 2254 default=None,
2256 2255 )
2257 2256 coreconfigitem(
2258 2257 b'ui',
2259 2258 b'large-file-limit',
2260 2259 default=10000000,
2261 2260 )
2262 2261 coreconfigitem(
2263 2262 b'ui',
2264 2263 b'logblockedtimes',
2265 2264 default=False,
2266 2265 )
2267 2266 coreconfigitem(
2268 2267 b'ui',
2269 2268 b'merge',
2270 2269 default=None,
2271 2270 )
2272 2271 coreconfigitem(
2273 2272 b'ui',
2274 2273 b'mergemarkers',
2275 2274 default=b'basic',
2276 2275 )
2277 2276 coreconfigitem(
2278 2277 b'ui',
2279 2278 b'message-output',
2280 2279 default=b'stdio',
2281 2280 )
2282 2281 coreconfigitem(
2283 2282 b'ui',
2284 2283 b'nontty',
2285 2284 default=False,
2286 2285 )
2287 2286 coreconfigitem(
2288 2287 b'ui',
2289 2288 b'origbackuppath',
2290 2289 default=None,
2291 2290 )
2292 2291 coreconfigitem(
2293 2292 b'ui',
2294 2293 b'paginate',
2295 2294 default=True,
2296 2295 )
2297 2296 coreconfigitem(
2298 2297 b'ui',
2299 2298 b'patch',
2300 2299 default=None,
2301 2300 )
2302 2301 coreconfigitem(
2303 2302 b'ui',
2304 2303 b'portablefilenames',
2305 2304 default=b'warn',
2306 2305 )
2307 2306 coreconfigitem(
2308 2307 b'ui',
2309 2308 b'promptecho',
2310 2309 default=False,
2311 2310 )
2312 2311 coreconfigitem(
2313 2312 b'ui',
2314 2313 b'quiet',
2315 2314 default=False,
2316 2315 )
2317 2316 coreconfigitem(
2318 2317 b'ui',
2319 2318 b'quietbookmarkmove',
2320 2319 default=False,
2321 2320 )
2322 2321 coreconfigitem(
2323 2322 b'ui',
2324 2323 b'relative-paths',
2325 2324 default=b'legacy',
2326 2325 )
2327 2326 coreconfigitem(
2328 2327 b'ui',
2329 2328 b'remotecmd',
2330 2329 default=b'hg',
2331 2330 )
2332 2331 coreconfigitem(
2333 2332 b'ui',
2334 2333 b'report_untrusted',
2335 2334 default=True,
2336 2335 )
2337 2336 coreconfigitem(
2338 2337 b'ui',
2339 2338 b'rollback',
2340 2339 default=True,
2341 2340 )
2342 2341 coreconfigitem(
2343 2342 b'ui',
2344 2343 b'signal-safe-lock',
2345 2344 default=True,
2346 2345 )
2347 2346 coreconfigitem(
2348 2347 b'ui',
2349 2348 b'slash',
2350 2349 default=False,
2351 2350 )
2352 2351 coreconfigitem(
2353 2352 b'ui',
2354 2353 b'ssh',
2355 2354 default=b'ssh',
2356 2355 )
2357 2356 coreconfigitem(
2358 2357 b'ui',
2359 2358 b'ssherrorhint',
2360 2359 default=None,
2361 2360 )
2362 2361 coreconfigitem(
2363 2362 b'ui',
2364 2363 b'statuscopies',
2365 2364 default=False,
2366 2365 )
2367 2366 coreconfigitem(
2368 2367 b'ui',
2369 2368 b'strict',
2370 2369 default=False,
2371 2370 )
2372 2371 coreconfigitem(
2373 2372 b'ui',
2374 2373 b'style',
2375 2374 default=b'',
2376 2375 )
2377 2376 coreconfigitem(
2378 2377 b'ui',
2379 2378 b'supportcontact',
2380 2379 default=None,
2381 2380 )
2382 2381 coreconfigitem(
2383 2382 b'ui',
2384 2383 b'textwidth',
2385 2384 default=78,
2386 2385 )
2387 2386 coreconfigitem(
2388 2387 b'ui',
2389 2388 b'timeout',
2390 2389 default=b'600',
2391 2390 )
2392 2391 coreconfigitem(
2393 2392 b'ui',
2394 2393 b'timeout.warn',
2395 2394 default=0,
2396 2395 )
2397 2396 coreconfigitem(
2398 2397 b'ui',
2399 2398 b'timestamp-output',
2400 2399 default=False,
2401 2400 )
2402 2401 coreconfigitem(
2403 2402 b'ui',
2404 2403 b'traceback',
2405 2404 default=False,
2406 2405 )
2407 2406 coreconfigitem(
2408 2407 b'ui',
2409 2408 b'tweakdefaults',
2410 2409 default=False,
2411 2410 )
2412 2411 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2413 2412 coreconfigitem(
2414 2413 b'ui',
2415 2414 b'verbose',
2416 2415 default=False,
2417 2416 )
2418 2417 coreconfigitem(
2419 2418 b'verify',
2420 2419 b'skipflags',
2421 2420 default=None,
2422 2421 )
2423 2422 coreconfigitem(
2424 2423 b'web',
2425 2424 b'allowbz2',
2426 2425 default=False,
2427 2426 )
2428 2427 coreconfigitem(
2429 2428 b'web',
2430 2429 b'allowgz',
2431 2430 default=False,
2432 2431 )
2433 2432 coreconfigitem(
2434 2433 b'web',
2435 2434 b'allow-pull',
2436 2435 alias=[(b'web', b'allowpull')],
2437 2436 default=True,
2438 2437 )
2439 2438 coreconfigitem(
2440 2439 b'web',
2441 2440 b'allow-push',
2442 2441 alias=[(b'web', b'allow_push')],
2443 2442 default=list,
2444 2443 )
2445 2444 coreconfigitem(
2446 2445 b'web',
2447 2446 b'allowzip',
2448 2447 default=False,
2449 2448 )
2450 2449 coreconfigitem(
2451 2450 b'web',
2452 2451 b'archivesubrepos',
2453 2452 default=False,
2454 2453 )
2455 2454 coreconfigitem(
2456 2455 b'web',
2457 2456 b'cache',
2458 2457 default=True,
2459 2458 )
2460 2459 coreconfigitem(
2461 2460 b'web',
2462 2461 b'comparisoncontext',
2463 2462 default=5,
2464 2463 )
2465 2464 coreconfigitem(
2466 2465 b'web',
2467 2466 b'contact',
2468 2467 default=None,
2469 2468 )
2470 2469 coreconfigitem(
2471 2470 b'web',
2472 2471 b'deny_push',
2473 2472 default=list,
2474 2473 )
2475 2474 coreconfigitem(
2476 2475 b'web',
2477 2476 b'guessmime',
2478 2477 default=False,
2479 2478 )
2480 2479 coreconfigitem(
2481 2480 b'web',
2482 2481 b'hidden',
2483 2482 default=False,
2484 2483 )
2485 2484 coreconfigitem(
2486 2485 b'web',
2487 2486 b'labels',
2488 2487 default=list,
2489 2488 )
2490 2489 coreconfigitem(
2491 2490 b'web',
2492 2491 b'logoimg',
2493 2492 default=b'hglogo.png',
2494 2493 )
2495 2494 coreconfigitem(
2496 2495 b'web',
2497 2496 b'logourl',
2498 2497 default=b'https://mercurial-scm.org/',
2499 2498 )
2500 2499 coreconfigitem(
2501 2500 b'web',
2502 2501 b'accesslog',
2503 2502 default=b'-',
2504 2503 )
2505 2504 coreconfigitem(
2506 2505 b'web',
2507 2506 b'address',
2508 2507 default=b'',
2509 2508 )
2510 2509 coreconfigitem(
2511 2510 b'web',
2512 2511 b'allow-archive',
2513 2512 alias=[(b'web', b'allow_archive')],
2514 2513 default=list,
2515 2514 )
2516 2515 coreconfigitem(
2517 2516 b'web',
2518 2517 b'allow_read',
2519 2518 default=list,
2520 2519 )
2521 2520 coreconfigitem(
2522 2521 b'web',
2523 2522 b'baseurl',
2524 2523 default=None,
2525 2524 )
2526 2525 coreconfigitem(
2527 2526 b'web',
2528 2527 b'cacerts',
2529 2528 default=None,
2530 2529 )
2531 2530 coreconfigitem(
2532 2531 b'web',
2533 2532 b'certificate',
2534 2533 default=None,
2535 2534 )
2536 2535 coreconfigitem(
2537 2536 b'web',
2538 2537 b'collapse',
2539 2538 default=False,
2540 2539 )
2541 2540 coreconfigitem(
2542 2541 b'web',
2543 2542 b'csp',
2544 2543 default=None,
2545 2544 )
2546 2545 coreconfigitem(
2547 2546 b'web',
2548 2547 b'deny_read',
2549 2548 default=list,
2550 2549 )
2551 2550 coreconfigitem(
2552 2551 b'web',
2553 2552 b'descend',
2554 2553 default=True,
2555 2554 )
2556 2555 coreconfigitem(
2557 2556 b'web',
2558 2557 b'description',
2559 2558 default=b"",
2560 2559 )
2561 2560 coreconfigitem(
2562 2561 b'web',
2563 2562 b'encoding',
2564 2563 default=lambda: encoding.encoding,
2565 2564 )
2566 2565 coreconfigitem(
2567 2566 b'web',
2568 2567 b'errorlog',
2569 2568 default=b'-',
2570 2569 )
2571 2570 coreconfigitem(
2572 2571 b'web',
2573 2572 b'ipv6',
2574 2573 default=False,
2575 2574 )
2576 2575 coreconfigitem(
2577 2576 b'web',
2578 2577 b'maxchanges',
2579 2578 default=10,
2580 2579 )
2581 2580 coreconfigitem(
2582 2581 b'web',
2583 2582 b'maxfiles',
2584 2583 default=10,
2585 2584 )
2586 2585 coreconfigitem(
2587 2586 b'web',
2588 2587 b'maxshortchanges',
2589 2588 default=60,
2590 2589 )
2591 2590 coreconfigitem(
2592 2591 b'web',
2593 2592 b'motd',
2594 2593 default=b'',
2595 2594 )
2596 2595 coreconfigitem(
2597 2596 b'web',
2598 2597 b'name',
2599 2598 default=dynamicdefault,
2600 2599 )
2601 2600 coreconfigitem(
2602 2601 b'web',
2603 2602 b'port',
2604 2603 default=8000,
2605 2604 )
2606 2605 coreconfigitem(
2607 2606 b'web',
2608 2607 b'prefix',
2609 2608 default=b'',
2610 2609 )
2611 2610 coreconfigitem(
2612 2611 b'web',
2613 2612 b'push_ssl',
2614 2613 default=True,
2615 2614 )
2616 2615 coreconfigitem(
2617 2616 b'web',
2618 2617 b'refreshinterval',
2619 2618 default=20,
2620 2619 )
2621 2620 coreconfigitem(
2622 2621 b'web',
2623 2622 b'server-header',
2624 2623 default=None,
2625 2624 )
2626 2625 coreconfigitem(
2627 2626 b'web',
2628 2627 b'static',
2629 2628 default=None,
2630 2629 )
2631 2630 coreconfigitem(
2632 2631 b'web',
2633 2632 b'staticurl',
2634 2633 default=None,
2635 2634 )
2636 2635 coreconfigitem(
2637 2636 b'web',
2638 2637 b'stripes',
2639 2638 default=1,
2640 2639 )
2641 2640 coreconfigitem(
2642 2641 b'web',
2643 2642 b'style',
2644 2643 default=b'paper',
2645 2644 )
2646 2645 coreconfigitem(
2647 2646 b'web',
2648 2647 b'templates',
2649 2648 default=None,
2650 2649 )
2651 2650 coreconfigitem(
2652 2651 b'web',
2653 2652 b'view',
2654 2653 default=b'served',
2655 2654 experimental=True,
2656 2655 )
2657 2656 coreconfigitem(
2658 2657 b'worker',
2659 2658 b'backgroundclose',
2660 2659 default=dynamicdefault,
2661 2660 )
2662 2661 # Windows defaults to a limit of 512 open files. A buffer of 128
2663 2662 # should give us enough headway.
2664 2663 coreconfigitem(
2665 2664 b'worker',
2666 2665 b'backgroundclosemaxqueue',
2667 2666 default=384,
2668 2667 )
2669 2668 coreconfigitem(
2670 2669 b'worker',
2671 2670 b'backgroundcloseminfilecount',
2672 2671 default=2048,
2673 2672 )
2674 2673 coreconfigitem(
2675 2674 b'worker',
2676 2675 b'backgroundclosethreadcount',
2677 2676 default=4,
2678 2677 )
2679 2678 coreconfigitem(
2680 2679 b'worker',
2681 2680 b'enabled',
2682 2681 default=True,
2683 2682 )
2684 2683 coreconfigitem(
2685 2684 b'worker',
2686 2685 b'numcpus',
2687 2686 default=None,
2688 2687 )
2689 2688
2690 2689 # Rebase related configuration moved to core because other extension are doing
2691 2690 # strange things. For example, shelve import the extensions to reuse some bit
2692 2691 # without formally loading it.
2693 2692 coreconfigitem(
2694 2693 b'commands',
2695 2694 b'rebase.requiredest',
2696 2695 default=False,
2697 2696 )
2698 2697 coreconfigitem(
2699 2698 b'experimental',
2700 2699 b'rebaseskipobsolete',
2701 2700 default=True,
2702 2701 )
2703 2702 coreconfigitem(
2704 2703 b'rebase',
2705 2704 b'singletransaction',
2706 2705 default=False,
2707 2706 )
2708 2707 coreconfigitem(
2709 2708 b'rebase',
2710 2709 b'experimental.inmemory',
2711 2710 default=False,
2712 2711 )
@@ -1,408 +1,432
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from .. import (
18 18 error,
19 19 pycompat,
20 20 util,
21 21 )
22 22
23 23 from ..revlogutils import nodemap as nodemaputil
24 24 from ..revlogutils import constants as revlog_constants
25 25
26 26 stringio = pycompat.bytesio
27 27
28 28
29 29 _pack = struct.pack
30 30 _unpack = struct.unpack
31 31 _compress = zlib.compress
32 32 _decompress = zlib.decompress
33 33
34 34 # Some code below makes tuples directly because it's more convenient. However,
35 35 # code outside this module should always use dirstatetuple.
36 36 def dirstatetuple(*x):
37 37 # x is a tuple
38 38 return x
39 39
40 40
41 41 def gettype(q):
42 42 return int(q & 0xFFFF)
43 43
44 44
45 45 def offset_type(offset, type):
46 46 return int(int(offset) << 16 | type)
47 47
48 48
49 49 class BaseIndexObject(object):
50 50 # Can I be passed to an algorithme implemented in Rust ?
51 51 rust_ext_compat = 0
52 52 # Format of an index entry according to Python's `struct` language
53 53 index_format = revlog_constants.INDEX_ENTRY_V1
54 54 # Size of a C unsigned long long int, platform independent
55 55 big_int_size = struct.calcsize(b'>Q')
56 56 # Size of a C long int, platform independent
57 57 int_size = struct.calcsize(b'>i')
58 58 # An empty index entry, used as a default value to be overridden, or nullrev
59 59 null_item = (
60 60 0,
61 61 0,
62 62 0,
63 63 -1,
64 64 -1,
65 65 -1,
66 66 -1,
67 67 sha1nodeconstants.nullid,
68 68 0,
69 69 0,
70 70 revlog_constants.COMP_MODE_INLINE,
71 71 revlog_constants.COMP_MODE_INLINE,
72 72 )
73 73
74 74 @util.propertycache
75 75 def entry_size(self):
76 76 return self.index_format.size
77 77
78 78 @property
79 79 def nodemap(self):
80 80 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
81 81 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
82 82 return self._nodemap
83 83
84 84 @util.propertycache
85 85 def _nodemap(self):
86 86 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
87 87 for r in range(0, len(self)):
88 88 n = self[r][7]
89 89 nodemap[n] = r
90 90 return nodemap
91 91
92 92 def has_node(self, node):
93 93 """return True if the node exist in the index"""
94 94 return node in self._nodemap
95 95
96 96 def rev(self, node):
97 97 """return a revision for a node
98 98
99 99 If the node is unknown, raise a RevlogError"""
100 100 return self._nodemap[node]
101 101
102 102 def get_rev(self, node):
103 103 """return a revision for a node
104 104
105 105 If the node is unknown, return None"""
106 106 return self._nodemap.get(node)
107 107
108 108 def _stripnodes(self, start):
109 109 if '_nodemap' in vars(self):
110 110 for r in range(start, len(self)):
111 111 n = self[r][7]
112 112 del self._nodemap[n]
113 113
114 114 def clearcaches(self):
115 115 self.__dict__.pop('_nodemap', None)
116 116
117 117 def __len__(self):
118 118 return self._lgt + len(self._extra)
119 119
120 120 def append(self, tup):
121 121 if '_nodemap' in vars(self):
122 122 self._nodemap[tup[7]] = len(self)
123 123 data = self._pack_entry(len(self), tup)
124 124 self._extra.append(data)
125 125
126 126 def _pack_entry(self, rev, entry):
127 127 assert entry[8] == 0
128 128 assert entry[9] == 0
129 129 return self.index_format.pack(*entry[:8])
130 130
131 131 def _check_index(self, i):
132 132 if not isinstance(i, int):
133 133 raise TypeError(b"expecting int indexes")
134 134 if i < 0 or i >= len(self):
135 135 raise IndexError
136 136
137 137 def __getitem__(self, i):
138 138 if i == -1:
139 139 return self.null_item
140 140 self._check_index(i)
141 141 if i >= self._lgt:
142 142 data = self._extra[i - self._lgt]
143 143 else:
144 144 index = self._calculate_index(i)
145 145 data = self._data[index : index + self.entry_size]
146 146 r = self._unpack_entry(i, data)
147 147 if self._lgt and i == 0:
148 148 r = (offset_type(0, gettype(r[0])),) + r[1:]
149 149 return r
150 150
151 151 def _unpack_entry(self, rev, data):
152 152 r = self.index_format.unpack(data)
153 153 r = r + (
154 154 0,
155 155 0,
156 156 revlog_constants.COMP_MODE_INLINE,
157 157 revlog_constants.COMP_MODE_INLINE,
158 158 )
159 159 return r
160 160
161 161 def pack_header(self, header):
162 162 """pack header information as binary"""
163 163 v_fmt = revlog_constants.INDEX_HEADER
164 164 return v_fmt.pack(header)
165 165
166 166 def entry_binary(self, rev):
167 167 """return the raw binary string representing a revision"""
168 168 entry = self[rev]
169 169 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
170 170 if rev == 0:
171 171 p = p[revlog_constants.INDEX_HEADER.size :]
172 172 return p
173 173
174 174
175 175 class IndexObject(BaseIndexObject):
176 176 def __init__(self, data):
177 177 assert len(data) % self.entry_size == 0, (
178 178 len(data),
179 179 self.entry_size,
180 180 len(data) % self.entry_size,
181 181 )
182 182 self._data = data
183 183 self._lgt = len(data) // self.entry_size
184 184 self._extra = []
185 185
186 186 def _calculate_index(self, i):
187 187 return i * self.entry_size
188 188
189 189 def __delitem__(self, i):
190 190 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
191 191 raise ValueError(b"deleting slices only supports a:-1 with step 1")
192 192 i = i.start
193 193 self._check_index(i)
194 194 self._stripnodes(i)
195 195 if i < self._lgt:
196 196 self._data = self._data[: i * self.entry_size]
197 197 self._lgt = i
198 198 self._extra = []
199 199 else:
200 200 self._extra = self._extra[: i - self._lgt]
201 201
202 202
203 203 class PersistentNodeMapIndexObject(IndexObject):
204 204 """a Debug oriented class to test persistent nodemap
205 205
206 206 We need a simple python object to test API and higher level behavior. See
207 207 the Rust implementation for more serious usage. This should be used only
208 208 through the dedicated `devel.persistent-nodemap` config.
209 209 """
210 210
211 211 def nodemap_data_all(self):
212 212 """Return bytes containing a full serialization of a nodemap
213 213
214 214 The nodemap should be valid for the full set of revisions in the
215 215 index."""
216 216 return nodemaputil.persistent_data(self)
217 217
218 218 def nodemap_data_incremental(self):
219 219 """Return bytes containing a incremental update to persistent nodemap
220 220
221 221 This containst the data for an append-only update of the data provided
222 222 in the last call to `update_nodemap_data`.
223 223 """
224 224 if self._nm_root is None:
225 225 return None
226 226 docket = self._nm_docket
227 227 changed, data = nodemaputil.update_persistent_data(
228 228 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
229 229 )
230 230
231 231 self._nm_root = self._nm_max_idx = self._nm_docket = None
232 232 return docket, changed, data
233 233
234 234 def update_nodemap_data(self, docket, nm_data):
235 235 """provide full block of persisted binary data for a nodemap
236 236
237 237 The data are expected to come from disk. See `nodemap_data_all` for a
238 238 produceur of such data."""
239 239 if nm_data is not None:
240 240 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
241 241 if self._nm_root:
242 242 self._nm_docket = docket
243 243 else:
244 244 self._nm_root = self._nm_max_idx = self._nm_docket = None
245 245
246 246
247 247 class InlinedIndexObject(BaseIndexObject):
248 248 def __init__(self, data, inline=0):
249 249 self._data = data
250 250 self._lgt = self._inline_scan(None)
251 251 self._inline_scan(self._lgt)
252 252 self._extra = []
253 253
254 254 def _inline_scan(self, lgt):
255 255 off = 0
256 256 if lgt is not None:
257 257 self._offsets = [0] * lgt
258 258 count = 0
259 259 while off <= len(self._data) - self.entry_size:
260 260 start = off + self.big_int_size
261 261 (s,) = struct.unpack(
262 262 b'>i',
263 263 self._data[start : start + self.int_size],
264 264 )
265 265 if lgt is not None:
266 266 self._offsets[count] = off
267 267 count += 1
268 268 off += self.entry_size + s
269 269 if off != len(self._data):
270 270 raise ValueError(b"corrupted data")
271 271 return count
272 272
273 273 def __delitem__(self, i):
274 274 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
275 275 raise ValueError(b"deleting slices only supports a:-1 with step 1")
276 276 i = i.start
277 277 self._check_index(i)
278 278 self._stripnodes(i)
279 279 if i < self._lgt:
280 280 self._offsets = self._offsets[:i]
281 281 self._lgt = i
282 282 self._extra = []
283 283 else:
284 284 self._extra = self._extra[: i - self._lgt]
285 285
286 286 def _calculate_index(self, i):
287 287 return self._offsets[i]
288 288
289 289
290 290 def parse_index2(data, inline, revlogv2=False):
291 291 if not inline:
292 292 cls = IndexObject2 if revlogv2 else IndexObject
293 293 return cls(data), None
294 294 cls = InlinedIndexObject
295 295 return cls(data, inline), (0, data)
296 296
297 297
298 def parse_index_cl_v2(data):
299 return IndexChangelogV2(data), None
300
301
298 302 class IndexObject2(IndexObject):
299 303 index_format = revlog_constants.INDEX_ENTRY_V2
300 304
301 305 def replace_sidedata_info(
302 306 self,
303 307 rev,
304 308 sidedata_offset,
305 309 sidedata_length,
306 310 offset_flags,
307 311 compression_mode,
308 312 ):
309 313 """
310 314 Replace an existing index entry's sidedata offset and length with new
311 315 ones.
312 316 This cannot be used outside of the context of sidedata rewriting,
313 317 inside the transaction that creates the revision `rev`.
314 318 """
315 319 if rev < 0:
316 320 raise KeyError
317 321 self._check_index(rev)
318 322 if rev < self._lgt:
319 323 msg = b"cannot rewrite entries outside of this transaction"
320 324 raise KeyError(msg)
321 325 else:
322 326 entry = list(self[rev])
323 327 entry[0] = offset_flags
324 328 entry[8] = sidedata_offset
325 329 entry[9] = sidedata_length
326 330 entry[11] = compression_mode
327 331 entry = tuple(entry)
328 332 new = self._pack_entry(rev, entry)
329 333 self._extra[rev - self._lgt] = new
330 334
331 335 def _unpack_entry(self, rev, data):
332 336 data = self.index_format.unpack(data)
333 337 entry = data[:10]
334 338 data_comp = data[10] & 3
335 339 sidedata_comp = (data[10] & (3 << 2)) >> 2
336 340 return entry + (data_comp, sidedata_comp)
337 341
338 342 def _pack_entry(self, rev, entry):
339 343 data = entry[:10]
340 344 data_comp = entry[10] & 3
341 345 sidedata_comp = (entry[11] & 3) << 2
342 346 data += (data_comp | sidedata_comp,)
343 347
344 348 return self.index_format.pack(*data)
345 349
346 350 def entry_binary(self, rev):
347 351 """return the raw binary string representing a revision"""
348 352 entry = self[rev]
349 353 return self._pack_entry(rev, entry)
350 354
351 355 def pack_header(self, header):
352 356 """pack header information as binary"""
353 357 msg = 'version header should go in the docket, not the index: %d'
354 358 msg %= header
355 359 raise error.ProgrammingError(msg)
356 360
357 361
362 class IndexChangelogV2(IndexObject2):
363 index_format = revlog_constants.INDEX_ENTRY_CL_V2
364
365 def _unpack_entry(self, rev, data, r=True):
366 items = self.index_format.unpack(data)
367 entry = items[:3] + (rev, rev) + items[3:8]
368 data_comp = items[8] & 3
369 sidedata_comp = (items[8] >> 2) & 3
370 return entry + (data_comp, sidedata_comp)
371
372 def _pack_entry(self, rev, entry):
373 assert entry[3] == rev, entry[3]
374 assert entry[4] == rev, entry[4]
375 data = entry[:3] + entry[5:10]
376 data_comp = entry[10] & 3
377 sidedata_comp = (entry[11] & 3) << 2
378 data += (data_comp | sidedata_comp,)
379 return self.index_format.pack(*data)
380
381
358 382 def parse_index_devel_nodemap(data, inline):
359 383 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
360 384 return PersistentNodeMapIndexObject(data), None
361 385
362 386
363 387 def parse_dirstate(dmap, copymap, st):
364 388 parents = [st[:20], st[20:40]]
365 389 # dereference fields so they will be local in loop
366 390 format = b">cllll"
367 391 e_size = struct.calcsize(format)
368 392 pos1 = 40
369 393 l = len(st)
370 394
371 395 # the inner loop
372 396 while pos1 < l:
373 397 pos2 = pos1 + e_size
374 398 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
375 399 pos1 = pos2 + e[4]
376 400 f = st[pos2:pos1]
377 401 if b'\0' in f:
378 402 f, c = f.split(b'\0')
379 403 copymap[f] = c
380 404 dmap[f] = e[:4]
381 405 return parents
382 406
383 407
384 408 def pack_dirstate(dmap, copymap, pl, now):
385 409 now = int(now)
386 410 cs = stringio()
387 411 write = cs.write
388 412 write(b"".join(pl))
389 413 for f, e in pycompat.iteritems(dmap):
390 414 if e[0] == b'n' and e[3] == now:
391 415 # The file was last modified "simultaneously" with the current
392 416 # write to dirstate (i.e. within the same second for file-
393 417 # systems with a granularity of 1 sec). This commonly happens
394 418 # for at least a couple of files on 'update'.
395 419 # The user could change the file without changing its size
396 420 # within the same second. Invalidate the file's mtime in
397 421 # dirstate, forcing future 'status' calls to compare the
398 422 # contents of the file if the size is the same. This prevents
399 423 # mistakenly treating such files as clean.
400 424 e = dirstatetuple(e[0], e[1], e[2], -1)
401 425 dmap[f] = e
402 426
403 427 if f in copymap:
404 428 f = b"%s\0%s" % (f, copymap[f])
405 429 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
406 430 write(e)
407 431 write(f)
408 432 return cs.getvalue()
@@ -1,3445 +1,3454
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 CHANGELOGV2,
39 39 COMP_MODE_DEFAULT,
40 40 COMP_MODE_INLINE,
41 41 COMP_MODE_PLAIN,
42 42 FEATURES_BY_VERSION,
43 43 FLAG_GENERALDELTA,
44 44 FLAG_INLINE_DATA,
45 45 INDEX_HEADER,
46 46 KIND_CHANGELOG,
47 47 REVLOGV0,
48 48 REVLOGV1,
49 49 REVLOGV1_FLAGS,
50 50 REVLOGV2,
51 51 REVLOGV2_FLAGS,
52 52 REVLOG_DEFAULT_FLAGS,
53 53 REVLOG_DEFAULT_FORMAT,
54 54 REVLOG_DEFAULT_VERSION,
55 55 SUPPORTED_FLAGS,
56 56 )
57 57 from .revlogutils.flagutil import (
58 58 REVIDX_DEFAULT_FLAGS,
59 59 REVIDX_ELLIPSIS,
60 60 REVIDX_EXTSTORED,
61 61 REVIDX_FLAGS_ORDER,
62 62 REVIDX_HASCOPIESINFO,
63 63 REVIDX_ISCENSORED,
64 64 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 65 )
66 66 from .thirdparty import attr
67 67 from . import (
68 68 ancestor,
69 69 dagop,
70 70 error,
71 71 mdiff,
72 72 policy,
73 73 pycompat,
74 74 templatefilters,
75 75 util,
76 76 )
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81 from .revlogutils import (
82 82 deltas as deltautil,
83 83 docket as docketutil,
84 84 flagutil,
85 85 nodemap as nodemaputil,
86 86 revlogv0,
87 87 sidedata as sidedatautil,
88 88 )
89 89 from .utils import (
90 90 storageutil,
91 91 stringutil,
92 92 )
93 93
94 94 # blanked usage of all the name to prevent pyflakes constraints
95 95 # We need these name available in the module for extensions.
96 96
97 97 REVLOGV0
98 98 REVLOGV1
99 99 REVLOGV2
100 100 FLAG_INLINE_DATA
101 101 FLAG_GENERALDELTA
102 102 REVLOG_DEFAULT_FLAGS
103 103 REVLOG_DEFAULT_FORMAT
104 104 REVLOG_DEFAULT_VERSION
105 105 REVLOGV1_FLAGS
106 106 REVLOGV2_FLAGS
107 107 REVIDX_ISCENSORED
108 108 REVIDX_ELLIPSIS
109 109 REVIDX_HASCOPIESINFO
110 110 REVIDX_EXTSTORED
111 111 REVIDX_DEFAULT_FLAGS
112 112 REVIDX_FLAGS_ORDER
113 113 REVIDX_RAWTEXT_CHANGING_FLAGS
114 114
115 115 parsers = policy.importmod('parsers')
116 116 rustancestor = policy.importrust('ancestor')
117 117 rustdagop = policy.importrust('dagop')
118 118 rustrevlog = policy.importrust('revlog')
119 119
120 120 # Aliased for performance.
121 121 _zlibdecompress = zlib.decompress
122 122
123 123 # max size of revlog with inline data
124 124 _maxinline = 131072
125 125 _chunksize = 1048576
126 126
127 127 # Flag processors for REVIDX_ELLIPSIS.
128 128 def ellipsisreadprocessor(rl, text):
129 129 return text, False
130 130
131 131
132 132 def ellipsiswriteprocessor(rl, text):
133 133 return text, False
134 134
135 135
136 136 def ellipsisrawprocessor(rl, text):
137 137 return False
138 138
139 139
140 140 ellipsisprocessor = (
141 141 ellipsisreadprocessor,
142 142 ellipsiswriteprocessor,
143 143 ellipsisrawprocessor,
144 144 )
145 145
146 146
147 147 def offset_type(offset, type):
148 148 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 149 raise ValueError(b'unknown revlog index flags')
150 150 return int(int(offset) << 16 | type)
151 151
152 152
153 153 def _verify_revision(rl, skipflags, state, node):
154 154 """Verify the integrity of the given revlog ``node`` while providing a hook
155 155 point for extensions to influence the operation."""
156 156 if skipflags:
157 157 state[b'skipread'].add(node)
158 158 else:
159 159 # Side-effect: read content and verify hash.
160 160 rl.revision(node)
161 161
162 162
163 163 # True if a fast implementation for persistent-nodemap is available
164 164 #
165 165 # We also consider we have a "fast" implementation in "pure" python because
166 166 # people using pure don't really have performance consideration (and a
167 167 # wheelbarrow of other slowness source)
168 168 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 169 parsers, 'BaseIndexObject'
170 170 )
171 171
172 172
173 173 @attr.s(slots=True, frozen=True)
174 174 class _revisioninfo(object):
175 175 """Information about a revision that allows building its fulltext
176 176 node: expected hash of the revision
177 177 p1, p2: parent revs of the revision
178 178 btext: built text cache consisting of a one-element list
179 179 cachedelta: (baserev, uncompressed_delta) or None
180 180 flags: flags associated to the revision storage
181 181
182 182 One of btext[0] or cachedelta must be set.
183 183 """
184 184
185 185 node = attr.ib()
186 186 p1 = attr.ib()
187 187 p2 = attr.ib()
188 188 btext = attr.ib()
189 189 textlen = attr.ib()
190 190 cachedelta = attr.ib()
191 191 flags = attr.ib()
192 192
193 193
194 194 @interfaceutil.implementer(repository.irevisiondelta)
195 195 @attr.s(slots=True)
196 196 class revlogrevisiondelta(object):
197 197 node = attr.ib()
198 198 p1node = attr.ib()
199 199 p2node = attr.ib()
200 200 basenode = attr.ib()
201 201 flags = attr.ib()
202 202 baserevisionsize = attr.ib()
203 203 revision = attr.ib()
204 204 delta = attr.ib()
205 205 sidedata = attr.ib()
206 206 protocol_flags = attr.ib()
207 207 linknode = attr.ib(default=None)
208 208
209 209
210 210 @interfaceutil.implementer(repository.iverifyproblem)
211 211 @attr.s(frozen=True)
212 212 class revlogproblem(object):
213 213 warning = attr.ib(default=None)
214 214 error = attr.ib(default=None)
215 215 node = attr.ib(default=None)
216 216
217 217
218 218 def parse_index_v1(data, inline):
219 219 # call the C implementation to parse the index data
220 220 index, cache = parsers.parse_index2(data, inline)
221 221 return index, cache
222 222
223 223
224 224 def parse_index_v2(data, inline):
225 225 # call the C implementation to parse the index data
226 226 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
227 227 return index, cache
228 228
229 229
230 def parse_index_cl_v2(data, inline):
231 # call the C implementation to parse the index data
232 assert not inline
233 from .pure.parsers import parse_index_cl_v2
234
235 index, cache = parse_index_cl_v2(data)
236 return index, cache
237
238
230 239 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
231 240
232 241 def parse_index_v1_nodemap(data, inline):
233 242 index, cache = parsers.parse_index_devel_nodemap(data, inline)
234 243 return index, cache
235 244
236 245
237 246 else:
238 247 parse_index_v1_nodemap = None
239 248
240 249
241 250 def parse_index_v1_mixed(data, inline):
242 251 index, cache = parse_index_v1(data, inline)
243 252 return rustrevlog.MixedIndex(index), cache
244 253
245 254
246 255 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
247 256 # signed integer)
248 257 _maxentrysize = 0x7FFFFFFF
249 258
250 259
251 260 class revlog(object):
252 261 """
253 262 the underlying revision storage object
254 263
255 264 A revlog consists of two parts, an index and the revision data.
256 265
257 266 The index is a file with a fixed record size containing
258 267 information on each revision, including its nodeid (hash), the
259 268 nodeids of its parents, the position and offset of its data within
260 269 the data file, and the revision it's based on. Finally, each entry
261 270 contains a linkrev entry that can serve as a pointer to external
262 271 data.
263 272
264 273 The revision data itself is a linear collection of data chunks.
265 274 Each chunk represents a revision and is usually represented as a
266 275 delta against the previous chunk. To bound lookup time, runs of
267 276 deltas are limited to about 2 times the length of the original
268 277 version data. This makes retrieval of a version proportional to
269 278 its size, or O(1) relative to the number of revisions.
270 279
271 280 Both pieces of the revlog are written to in an append-only
272 281 fashion, which means we never need to rewrite a file to insert or
273 282 remove data, and can use some simple techniques to avoid the need
274 283 for locking while reading.
275 284
276 285 If checkambig, indexfile is opened with checkambig=True at
277 286 writing, to avoid file stat ambiguity.
278 287
279 288 If mmaplargeindex is True, and an mmapindexthreshold is set, the
280 289 index will be mmapped rather than read if it is larger than the
281 290 configured threshold.
282 291
283 292 If censorable is True, the revlog can have censored revisions.
284 293
285 294 If `upperboundcomp` is not None, this is the expected maximal gain from
286 295 compression for the data content.
287 296
288 297 `concurrencychecker` is an optional function that receives 3 arguments: a
289 298 file handle, a filename, and an expected position. It should check whether
290 299 the current position in the file handle is valid, and log/warn/fail (by
291 300 raising).
292 301
293 302
294 303 Internal details
295 304 ----------------
296 305
297 306 A large part of the revlog logic deals with revisions' "index entries", tuple
298 307 objects that contains the same "items" whatever the revlog version.
299 308 Different versions will have different ways of storing these items (sometimes
300 309 not having them at all), but the tuple will always be the same. New fields
301 310 are usually added at the end to avoid breaking existing code that relies
302 311 on the existing order. The field are defined as follows:
303 312
304 313 [0] offset:
305 314 The byte index of the start of revision data chunk.
306 315 That value is shifted up by 16 bits. use "offset = field >> 16" to
307 316 retrieve it.
308 317
309 318 flags:
310 319 A flag field that carries special information or changes the behavior
311 320 of the revision. (see `REVIDX_*` constants for details)
312 321 The flag field only occupies the first 16 bits of this field,
313 322 use "flags = field & 0xFFFF" to retrieve the value.
314 323
315 324 [1] compressed length:
316 325 The size, in bytes, of the chunk on disk
317 326
318 327 [2] uncompressed length:
319 328 The size, in bytes, of the full revision once reconstructed.
320 329
321 330 [3] base rev:
322 331 Either the base of the revision delta chain (without general
323 332 delta), or the base of the delta (stored in the data chunk)
324 333 with general delta.
325 334
326 335 [4] link rev:
327 336 Changelog revision number of the changeset introducing this
328 337 revision.
329 338
330 339 [5] parent 1 rev:
331 340 Revision number of the first parent
332 341
333 342 [6] parent 2 rev:
334 343 Revision number of the second parent
335 344
336 345 [7] node id:
337 346 The node id of the current revision
338 347
339 348 [8] sidedata offset:
340 349 The byte index of the start of the revision's side-data chunk.
341 350
342 351 [9] sidedata chunk length:
343 352 The size, in bytes, of the revision's side-data chunk.
344 353
345 354 [10] data compression mode:
346 355 two bits that detail the way the data chunk is compressed on disk.
347 356 (see "COMP_MODE_*" constants for details). For revlog version 0 and
348 357 1 this will always be COMP_MODE_INLINE.
349 358
350 359 [11] side-data compression mode:
351 360 two bits that detail the way the sidedata chunk is compressed on disk.
352 361 (see "COMP_MODE_*" constants for details)
353 362 """
354 363
355 364 _flagserrorclass = error.RevlogError
356 365
357 366 def __init__(
358 367 self,
359 368 opener,
360 369 target,
361 370 radix,
362 371 postfix=None, # only exist for `tmpcensored` now
363 372 checkambig=False,
364 373 mmaplargeindex=False,
365 374 censorable=False,
366 375 upperboundcomp=None,
367 376 persistentnodemap=False,
368 377 concurrencychecker=None,
369 378 trypending=False,
370 379 ):
371 380 """
372 381 create a revlog object
373 382
374 383 opener is a function that abstracts the file opening operation
375 384 and can be used to implement COW semantics or the like.
376 385
377 386 `target`: a (KIND, ID) tuple that identify the content stored in
378 387 this revlog. It help the rest of the code to understand what the revlog
379 388 is about without having to resort to heuristic and index filename
380 389 analysis. Note: that this must be reliably be set by normal code, but
381 390 that test, debug, or performance measurement code might not set this to
382 391 accurate value.
383 392 """
384 393 self.upperboundcomp = upperboundcomp
385 394
386 395 self.radix = radix
387 396
388 397 self._docket_file = None
389 398 self._indexfile = None
390 399 self._datafile = None
391 400 self._nodemap_file = None
392 401 self.postfix = postfix
393 402 self._trypending = trypending
394 403 self.opener = opener
395 404 if persistentnodemap:
396 405 self._nodemap_file = nodemaputil.get_nodemap_file(self)
397 406
398 407 assert target[0] in ALL_KINDS
399 408 assert len(target) == 2
400 409 self.target = target
401 410 # When True, indexfile is opened with checkambig=True at writing, to
402 411 # avoid file stat ambiguity.
403 412 self._checkambig = checkambig
404 413 self._mmaplargeindex = mmaplargeindex
405 414 self._censorable = censorable
406 415 # 3-tuple of (node, rev, text) for a raw revision.
407 416 self._revisioncache = None
408 417 # Maps rev to chain base rev.
409 418 self._chainbasecache = util.lrucachedict(100)
410 419 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
411 420 self._chunkcache = (0, b'')
412 421 # How much data to read and cache into the raw revlog data cache.
413 422 self._chunkcachesize = 65536
414 423 self._maxchainlen = None
415 424 self._deltabothparents = True
416 425 self.index = None
417 426 self._docket = None
418 427 self._nodemap_docket = None
419 428 # Mapping of partial identifiers to full nodes.
420 429 self._pcache = {}
421 430 # Mapping of revision integer to full node.
422 431 self._compengine = b'zlib'
423 432 self._compengineopts = {}
424 433 self._maxdeltachainspan = -1
425 434 self._withsparseread = False
426 435 self._sparserevlog = False
427 436 self.hassidedata = False
428 437 self._srdensitythreshold = 0.50
429 438 self._srmingapsize = 262144
430 439
431 440 # Make copy of flag processors so each revlog instance can support
432 441 # custom flags.
433 442 self._flagprocessors = dict(flagutil.flagprocessors)
434 443
435 444 # 2-tuple of file handles being used for active writing.
436 445 self._writinghandles = None
437 446 # prevent nesting of addgroup
438 447 self._adding_group = None
439 448
440 449 self._loadindex()
441 450
442 451 self._concurrencychecker = concurrencychecker
443 452
444 453 def _init_opts(self):
445 454 """process options (from above/config) to setup associated default revlog mode
446 455
447 456 These values might be affected when actually reading on disk information.
448 457
449 458 The relevant values are returned for use in _loadindex().
450 459
451 460 * newversionflags:
452 461 version header to use if we need to create a new revlog
453 462
454 463 * mmapindexthreshold:
455 464 minimal index size for start to use mmap
456 465
457 466 * force_nodemap:
458 467 force the usage of a "development" version of the nodemap code
459 468 """
460 469 mmapindexthreshold = None
461 470 opts = self.opener.options
462 471
463 472 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
464 473 new_header = CHANGELOGV2
465 474 elif b'revlogv2' in opts:
466 475 new_header = REVLOGV2
467 476 elif b'revlogv1' in opts:
468 477 new_header = REVLOGV1 | FLAG_INLINE_DATA
469 478 if b'generaldelta' in opts:
470 479 new_header |= FLAG_GENERALDELTA
471 480 elif b'revlogv0' in self.opener.options:
472 481 new_header = REVLOGV0
473 482 else:
474 483 new_header = REVLOG_DEFAULT_VERSION
475 484
476 485 if b'chunkcachesize' in opts:
477 486 self._chunkcachesize = opts[b'chunkcachesize']
478 487 if b'maxchainlen' in opts:
479 488 self._maxchainlen = opts[b'maxchainlen']
480 489 if b'deltabothparents' in opts:
481 490 self._deltabothparents = opts[b'deltabothparents']
482 491 self._lazydelta = bool(opts.get(b'lazydelta', True))
483 492 self._lazydeltabase = False
484 493 if self._lazydelta:
485 494 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
486 495 if b'compengine' in opts:
487 496 self._compengine = opts[b'compengine']
488 497 if b'zlib.level' in opts:
489 498 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
490 499 if b'zstd.level' in opts:
491 500 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
492 501 if b'maxdeltachainspan' in opts:
493 502 self._maxdeltachainspan = opts[b'maxdeltachainspan']
494 503 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
495 504 mmapindexthreshold = opts[b'mmapindexthreshold']
496 505 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
497 506 withsparseread = bool(opts.get(b'with-sparse-read', False))
498 507 # sparse-revlog forces sparse-read
499 508 self._withsparseread = self._sparserevlog or withsparseread
500 509 if b'sparse-read-density-threshold' in opts:
501 510 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
502 511 if b'sparse-read-min-gap-size' in opts:
503 512 self._srmingapsize = opts[b'sparse-read-min-gap-size']
504 513 if opts.get(b'enableellipsis'):
505 514 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
506 515
507 516 # revlog v0 doesn't have flag processors
508 517 for flag, processor in pycompat.iteritems(
509 518 opts.get(b'flagprocessors', {})
510 519 ):
511 520 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
512 521
513 522 if self._chunkcachesize <= 0:
514 523 raise error.RevlogError(
515 524 _(b'revlog chunk cache size %r is not greater than 0')
516 525 % self._chunkcachesize
517 526 )
518 527 elif self._chunkcachesize & (self._chunkcachesize - 1):
519 528 raise error.RevlogError(
520 529 _(b'revlog chunk cache size %r is not a power of 2')
521 530 % self._chunkcachesize
522 531 )
523 532 force_nodemap = opts.get(b'devel-force-nodemap', False)
524 533 return new_header, mmapindexthreshold, force_nodemap
525 534
526 535 def _get_data(self, filepath, mmap_threshold, size=None):
527 536 """return a file content with or without mmap
528 537
529 538 If the file is missing return the empty string"""
530 539 try:
531 540 with self.opener(filepath) as fp:
532 541 if mmap_threshold is not None:
533 542 file_size = self.opener.fstat(fp).st_size
534 543 if file_size >= mmap_threshold:
535 544 if size is not None:
536 545 # avoid potentiel mmap crash
537 546 size = min(file_size, size)
538 547 # TODO: should .close() to release resources without
539 548 # relying on Python GC
540 549 if size is None:
541 550 return util.buffer(util.mmapread(fp))
542 551 else:
543 552 return util.buffer(util.mmapread(fp, size))
544 553 if size is None:
545 554 return fp.read()
546 555 else:
547 556 return fp.read(size)
548 557 except IOError as inst:
549 558 if inst.errno != errno.ENOENT:
550 559 raise
551 560 return b''
552 561
553 562 def _loadindex(self):
554 563
555 564 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
556 565
557 566 if self.postfix is not None:
558 567 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
559 568 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
560 569 entry_point = b'%s.i.a' % self.radix
561 570 else:
562 571 entry_point = b'%s.i' % self.radix
563 572
564 573 entry_data = b''
565 574 self._initempty = True
566 575 entry_data = self._get_data(entry_point, mmapindexthreshold)
567 576 if len(entry_data) > 0:
568 577 header = INDEX_HEADER.unpack(entry_data[:4])[0]
569 578 self._initempty = False
570 579 else:
571 580 header = new_header
572 581
573 582 self._format_flags = header & ~0xFFFF
574 583 self._format_version = header & 0xFFFF
575 584
576 585 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
577 586 if supported_flags is None:
578 587 msg = _(b'unknown version (%d) in revlog %s')
579 588 msg %= (self._format_version, self.display_id)
580 589 raise error.RevlogError(msg)
581 590 elif self._format_flags & ~supported_flags:
582 591 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
583 592 display_flag = self._format_flags >> 16
584 593 msg %= (display_flag, self._format_version, self.display_id)
585 594 raise error.RevlogError(msg)
586 595
587 596 features = FEATURES_BY_VERSION[self._format_version]
588 597 self._inline = features[b'inline'](self._format_flags)
589 598 self._generaldelta = features[b'generaldelta'](self._format_flags)
590 599 self.hassidedata = features[b'sidedata']
591 600
592 601 if not features[b'docket']:
593 602 self._indexfile = entry_point
594 603 index_data = entry_data
595 604 else:
596 605 self._docket_file = entry_point
597 606 if self._initempty:
598 607 self._docket = docketutil.default_docket(self, header)
599 608 else:
600 609 self._docket = docketutil.parse_docket(
601 610 self, entry_data, use_pending=self._trypending
602 611 )
603 612 self._indexfile = self._docket.index_filepath()
604 613 index_data = b''
605 614 index_size = self._docket.index_end
606 615 if index_size > 0:
607 616 index_data = self._get_data(
608 617 self._indexfile, mmapindexthreshold, size=index_size
609 618 )
610 619 if len(index_data) < index_size:
611 620 msg = _(b'too few index data for %s: got %d, expected %d')
612 621 msg %= (self.display_id, len(index_data), index_size)
613 622 raise error.RevlogError(msg)
614 623
615 624 self._inline = False
616 625 # generaldelta implied by version 2 revlogs.
617 626 self._generaldelta = True
618 627 # the logic for persistent nodemap will be dealt with within the
619 628 # main docket, so disable it for now.
620 629 self._nodemap_file = None
621 630
622 631 if self.postfix is None:
623 632 self._datafile = b'%s.d' % self.radix
624 633 else:
625 634 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
626 635
627 636 self.nodeconstants = sha1nodeconstants
628 637 self.nullid = self.nodeconstants.nullid
629 638
630 639 # sparse-revlog can't be on without general-delta (issue6056)
631 640 if not self._generaldelta:
632 641 self._sparserevlog = False
633 642
634 643 self._storedeltachains = True
635 644
636 645 devel_nodemap = (
637 646 self._nodemap_file
638 647 and force_nodemap
639 648 and parse_index_v1_nodemap is not None
640 649 )
641 650
642 651 use_rust_index = False
643 652 if rustrevlog is not None:
644 653 if self._nodemap_file is not None:
645 654 use_rust_index = True
646 655 else:
647 656 use_rust_index = self.opener.options.get(b'rust.index')
648 657
649 658 self._parse_index = parse_index_v1
650 659 if self._format_version == REVLOGV0:
651 660 self._parse_index = revlogv0.parse_index_v0
652 661 elif self._format_version == REVLOGV2:
653 662 self._parse_index = parse_index_v2
654 663 elif self._format_version == CHANGELOGV2:
655 self._parse_index = parse_index_v2
664 self._parse_index = parse_index_cl_v2
656 665 elif devel_nodemap:
657 666 self._parse_index = parse_index_v1_nodemap
658 667 elif use_rust_index:
659 668 self._parse_index = parse_index_v1_mixed
660 669 try:
661 670 d = self._parse_index(index_data, self._inline)
662 671 index, _chunkcache = d
663 672 use_nodemap = (
664 673 not self._inline
665 674 and self._nodemap_file is not None
666 675 and util.safehasattr(index, 'update_nodemap_data')
667 676 )
668 677 if use_nodemap:
669 678 nodemap_data = nodemaputil.persisted_data(self)
670 679 if nodemap_data is not None:
671 680 docket = nodemap_data[0]
672 681 if (
673 682 len(d[0]) > docket.tip_rev
674 683 and d[0][docket.tip_rev][7] == docket.tip_node
675 684 ):
676 685 # no changelog tampering
677 686 self._nodemap_docket = docket
678 687 index.update_nodemap_data(*nodemap_data)
679 688 except (ValueError, IndexError):
680 689 raise error.RevlogError(
681 690 _(b"index %s is corrupted") % self.display_id
682 691 )
683 692 self.index, self._chunkcache = d
684 693 if not self._chunkcache:
685 694 self._chunkclear()
686 695 # revnum -> (chain-length, sum-delta-length)
687 696 self._chaininfocache = util.lrucachedict(500)
688 697 # revlog header -> revlog compressor
689 698 self._decompressors = {}
690 699
691 700 @util.propertycache
692 701 def revlog_kind(self):
693 702 return self.target[0]
694 703
695 704 @util.propertycache
696 705 def display_id(self):
697 706 """The public facing "ID" of the revlog that we use in message"""
698 707 # Maybe we should build a user facing representation of
699 708 # revlog.target instead of using `self.radix`
700 709 return self.radix
701 710
702 711 def _get_decompressor(self, t):
703 712 try:
704 713 compressor = self._decompressors[t]
705 714 except KeyError:
706 715 try:
707 716 engine = util.compengines.forrevlogheader(t)
708 717 compressor = engine.revlogcompressor(self._compengineopts)
709 718 self._decompressors[t] = compressor
710 719 except KeyError:
711 720 raise error.RevlogError(
712 721 _(b'unknown compression type %s') % binascii.hexlify(t)
713 722 )
714 723 return compressor
715 724
716 725 @util.propertycache
717 726 def _compressor(self):
718 727 engine = util.compengines[self._compengine]
719 728 return engine.revlogcompressor(self._compengineopts)
720 729
721 730 @util.propertycache
722 731 def _decompressor(self):
723 732 """the default decompressor"""
724 733 if self._docket is None:
725 734 return None
726 735 t = self._docket.default_compression_header
727 736 c = self._get_decompressor(t)
728 737 return c.decompress
729 738
730 739 def _indexfp(self):
731 740 """file object for the revlog's index file"""
732 741 return self.opener(self._indexfile, mode=b"r")
733 742
734 743 def __index_write_fp(self):
735 744 # You should not use this directly and use `_writing` instead
736 745 try:
737 746 f = self.opener(
738 747 self._indexfile, mode=b"r+", checkambig=self._checkambig
739 748 )
740 749 if self._docket is None:
741 750 f.seek(0, os.SEEK_END)
742 751 else:
743 752 f.seek(self._docket.index_end, os.SEEK_SET)
744 753 return f
745 754 except IOError as inst:
746 755 if inst.errno != errno.ENOENT:
747 756 raise
748 757 return self.opener(
749 758 self._indexfile, mode=b"w+", checkambig=self._checkambig
750 759 )
751 760
752 761 def __index_new_fp(self):
753 762 # You should not use this unless you are upgrading from inline revlog
754 763 return self.opener(
755 764 self._indexfile,
756 765 mode=b"w",
757 766 checkambig=self._checkambig,
758 767 atomictemp=True,
759 768 )
760 769
761 770 def _datafp(self, mode=b'r'):
762 771 """file object for the revlog's data file"""
763 772 return self.opener(self._datafile, mode=mode)
764 773
765 774 @contextlib.contextmanager
766 775 def _datareadfp(self, existingfp=None):
767 776 """file object suitable to read data"""
768 777 # Use explicit file handle, if given.
769 778 if existingfp is not None:
770 779 yield existingfp
771 780
772 781 # Use a file handle being actively used for writes, if available.
773 782 # There is some danger to doing this because reads will seek the
774 783 # file. However, _writeentry() performs a SEEK_END before all writes,
775 784 # so we should be safe.
776 785 elif self._writinghandles:
777 786 if self._inline:
778 787 yield self._writinghandles[0]
779 788 else:
780 789 yield self._writinghandles[1]
781 790
782 791 # Otherwise open a new file handle.
783 792 else:
784 793 if self._inline:
785 794 func = self._indexfp
786 795 else:
787 796 func = self._datafp
788 797 with func() as fp:
789 798 yield fp
790 799
791 800 def tiprev(self):
792 801 return len(self.index) - 1
793 802
794 803 def tip(self):
795 804 return self.node(self.tiprev())
796 805
797 806 def __contains__(self, rev):
798 807 return 0 <= rev < len(self)
799 808
800 809 def __len__(self):
801 810 return len(self.index)
802 811
803 812 def __iter__(self):
804 813 return iter(pycompat.xrange(len(self)))
805 814
806 815 def revs(self, start=0, stop=None):
807 816 """iterate over all rev in this revlog (from start to stop)"""
808 817 return storageutil.iterrevs(len(self), start=start, stop=stop)
809 818
810 819 @property
811 820 def nodemap(self):
812 821 msg = (
813 822 b"revlog.nodemap is deprecated, "
814 823 b"use revlog.index.[has_node|rev|get_rev]"
815 824 )
816 825 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
817 826 return self.index.nodemap
818 827
819 828 @property
820 829 def _nodecache(self):
821 830 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
822 831 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
823 832 return self.index.nodemap
824 833
825 834 def hasnode(self, node):
826 835 try:
827 836 self.rev(node)
828 837 return True
829 838 except KeyError:
830 839 return False
831 840
832 841 def candelta(self, baserev, rev):
833 842 """whether two revisions (baserev, rev) can be delta-ed or not"""
834 843 # Disable delta if either rev requires a content-changing flag
835 844 # processor (ex. LFS). This is because such flag processor can alter
836 845 # the rawtext content that the delta will be based on, and two clients
837 846 # could have a same revlog node with different flags (i.e. different
838 847 # rawtext contents) and the delta could be incompatible.
839 848 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
840 849 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
841 850 ):
842 851 return False
843 852 return True
844 853
845 854 def update_caches(self, transaction):
846 855 if self._nodemap_file is not None:
847 856 if transaction is None:
848 857 nodemaputil.update_persistent_nodemap(self)
849 858 else:
850 859 nodemaputil.setup_persistent_nodemap(transaction, self)
851 860
852 861 def clearcaches(self):
853 862 self._revisioncache = None
854 863 self._chainbasecache.clear()
855 864 self._chunkcache = (0, b'')
856 865 self._pcache = {}
857 866 self._nodemap_docket = None
858 867 self.index.clearcaches()
859 868 # The python code is the one responsible for validating the docket, we
860 869 # end up having to refresh it here.
861 870 use_nodemap = (
862 871 not self._inline
863 872 and self._nodemap_file is not None
864 873 and util.safehasattr(self.index, 'update_nodemap_data')
865 874 )
866 875 if use_nodemap:
867 876 nodemap_data = nodemaputil.persisted_data(self)
868 877 if nodemap_data is not None:
869 878 self._nodemap_docket = nodemap_data[0]
870 879 self.index.update_nodemap_data(*nodemap_data)
871 880
872 881 def rev(self, node):
873 882 try:
874 883 return self.index.rev(node)
875 884 except TypeError:
876 885 raise
877 886 except error.RevlogError:
878 887 # parsers.c radix tree lookup failed
879 888 if (
880 889 node == self.nodeconstants.wdirid
881 890 or node in self.nodeconstants.wdirfilenodeids
882 891 ):
883 892 raise error.WdirUnsupported
884 893 raise error.LookupError(node, self.display_id, _(b'no node'))
885 894
886 895 # Accessors for index entries.
887 896
888 897 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
889 898 # are flags.
890 899 def start(self, rev):
891 900 return int(self.index[rev][0] >> 16)
892 901
893 902 def flags(self, rev):
894 903 return self.index[rev][0] & 0xFFFF
895 904
896 905 def length(self, rev):
897 906 return self.index[rev][1]
898 907
899 908 def sidedata_length(self, rev):
900 909 if not self.hassidedata:
901 910 return 0
902 911 return self.index[rev][9]
903 912
904 913 def rawsize(self, rev):
905 914 """return the length of the uncompressed text for a given revision"""
906 915 l = self.index[rev][2]
907 916 if l >= 0:
908 917 return l
909 918
910 919 t = self.rawdata(rev)
911 920 return len(t)
912 921
913 922 def size(self, rev):
914 923 """length of non-raw text (processed by a "read" flag processor)"""
915 924 # fast path: if no "read" flag processor could change the content,
916 925 # size is rawsize. note: ELLIPSIS is known to not change the content.
917 926 flags = self.flags(rev)
918 927 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
919 928 return self.rawsize(rev)
920 929
921 930 return len(self.revision(rev, raw=False))
922 931
923 932 def chainbase(self, rev):
924 933 base = self._chainbasecache.get(rev)
925 934 if base is not None:
926 935 return base
927 936
928 937 index = self.index
929 938 iterrev = rev
930 939 base = index[iterrev][3]
931 940 while base != iterrev:
932 941 iterrev = base
933 942 base = index[iterrev][3]
934 943
935 944 self._chainbasecache[rev] = base
936 945 return base
937 946
938 947 def linkrev(self, rev):
939 948 return self.index[rev][4]
940 949
941 950 def parentrevs(self, rev):
942 951 try:
943 952 entry = self.index[rev]
944 953 except IndexError:
945 954 if rev == wdirrev:
946 955 raise error.WdirUnsupported
947 956 raise
948 957 if entry[5] == nullrev:
949 958 return entry[6], entry[5]
950 959 else:
951 960 return entry[5], entry[6]
952 961
953 962 # fast parentrevs(rev) where rev isn't filtered
954 963 _uncheckedparentrevs = parentrevs
955 964
956 965 def node(self, rev):
957 966 try:
958 967 return self.index[rev][7]
959 968 except IndexError:
960 969 if rev == wdirrev:
961 970 raise error.WdirUnsupported
962 971 raise
963 972
964 973 # Derived from index values.
965 974
966 975 def end(self, rev):
967 976 return self.start(rev) + self.length(rev)
968 977
969 978 def parents(self, node):
970 979 i = self.index
971 980 d = i[self.rev(node)]
972 981 # inline node() to avoid function call overhead
973 982 if d[5] == self.nullid:
974 983 return i[d[6]][7], i[d[5]][7]
975 984 else:
976 985 return i[d[5]][7], i[d[6]][7]
977 986
978 987 def chainlen(self, rev):
979 988 return self._chaininfo(rev)[0]
980 989
981 990 def _chaininfo(self, rev):
982 991 chaininfocache = self._chaininfocache
983 992 if rev in chaininfocache:
984 993 return chaininfocache[rev]
985 994 index = self.index
986 995 generaldelta = self._generaldelta
987 996 iterrev = rev
988 997 e = index[iterrev]
989 998 clen = 0
990 999 compresseddeltalen = 0
991 1000 while iterrev != e[3]:
992 1001 clen += 1
993 1002 compresseddeltalen += e[1]
994 1003 if generaldelta:
995 1004 iterrev = e[3]
996 1005 else:
997 1006 iterrev -= 1
998 1007 if iterrev in chaininfocache:
999 1008 t = chaininfocache[iterrev]
1000 1009 clen += t[0]
1001 1010 compresseddeltalen += t[1]
1002 1011 break
1003 1012 e = index[iterrev]
1004 1013 else:
1005 1014 # Add text length of base since decompressing that also takes
1006 1015 # work. For cache hits the length is already included.
1007 1016 compresseddeltalen += e[1]
1008 1017 r = (clen, compresseddeltalen)
1009 1018 chaininfocache[rev] = r
1010 1019 return r
1011 1020
1012 1021 def _deltachain(self, rev, stoprev=None):
1013 1022 """Obtain the delta chain for a revision.
1014 1023
1015 1024 ``stoprev`` specifies a revision to stop at. If not specified, we
1016 1025 stop at the base of the chain.
1017 1026
1018 1027 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1019 1028 revs in ascending order and ``stopped`` is a bool indicating whether
1020 1029 ``stoprev`` was hit.
1021 1030 """
1022 1031 # Try C implementation.
1023 1032 try:
1024 1033 return self.index.deltachain(rev, stoprev, self._generaldelta)
1025 1034 except AttributeError:
1026 1035 pass
1027 1036
1028 1037 chain = []
1029 1038
1030 1039 # Alias to prevent attribute lookup in tight loop.
1031 1040 index = self.index
1032 1041 generaldelta = self._generaldelta
1033 1042
1034 1043 iterrev = rev
1035 1044 e = index[iterrev]
1036 1045 while iterrev != e[3] and iterrev != stoprev:
1037 1046 chain.append(iterrev)
1038 1047 if generaldelta:
1039 1048 iterrev = e[3]
1040 1049 else:
1041 1050 iterrev -= 1
1042 1051 e = index[iterrev]
1043 1052
1044 1053 if iterrev == stoprev:
1045 1054 stopped = True
1046 1055 else:
1047 1056 chain.append(iterrev)
1048 1057 stopped = False
1049 1058
1050 1059 chain.reverse()
1051 1060 return chain, stopped
1052 1061
1053 1062 def ancestors(self, revs, stoprev=0, inclusive=False):
1054 1063 """Generate the ancestors of 'revs' in reverse revision order.
1055 1064 Does not generate revs lower than stoprev.
1056 1065
1057 1066 See the documentation for ancestor.lazyancestors for more details."""
1058 1067
1059 1068 # first, make sure start revisions aren't filtered
1060 1069 revs = list(revs)
1061 1070 checkrev = self.node
1062 1071 for r in revs:
1063 1072 checkrev(r)
1064 1073 # and we're sure ancestors aren't filtered as well
1065 1074
1066 1075 if rustancestor is not None and self.index.rust_ext_compat:
1067 1076 lazyancestors = rustancestor.LazyAncestors
1068 1077 arg = self.index
1069 1078 else:
1070 1079 lazyancestors = ancestor.lazyancestors
1071 1080 arg = self._uncheckedparentrevs
1072 1081 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1073 1082
1074 1083 def descendants(self, revs):
1075 1084 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1076 1085
1077 1086 def findcommonmissing(self, common=None, heads=None):
1078 1087 """Return a tuple of the ancestors of common and the ancestors of heads
1079 1088 that are not ancestors of common. In revset terminology, we return the
1080 1089 tuple:
1081 1090
1082 1091 ::common, (::heads) - (::common)
1083 1092
1084 1093 The list is sorted by revision number, meaning it is
1085 1094 topologically sorted.
1086 1095
1087 1096 'heads' and 'common' are both lists of node IDs. If heads is
1088 1097 not supplied, uses all of the revlog's heads. If common is not
1089 1098 supplied, uses nullid."""
1090 1099 if common is None:
1091 1100 common = [self.nullid]
1092 1101 if heads is None:
1093 1102 heads = self.heads()
1094 1103
1095 1104 common = [self.rev(n) for n in common]
1096 1105 heads = [self.rev(n) for n in heads]
1097 1106
1098 1107 # we want the ancestors, but inclusive
1099 1108 class lazyset(object):
1100 1109 def __init__(self, lazyvalues):
1101 1110 self.addedvalues = set()
1102 1111 self.lazyvalues = lazyvalues
1103 1112
1104 1113 def __contains__(self, value):
1105 1114 return value in self.addedvalues or value in self.lazyvalues
1106 1115
1107 1116 def __iter__(self):
1108 1117 added = self.addedvalues
1109 1118 for r in added:
1110 1119 yield r
1111 1120 for r in self.lazyvalues:
1112 1121 if not r in added:
1113 1122 yield r
1114 1123
1115 1124 def add(self, value):
1116 1125 self.addedvalues.add(value)
1117 1126
1118 1127 def update(self, values):
1119 1128 self.addedvalues.update(values)
1120 1129
1121 1130 has = lazyset(self.ancestors(common))
1122 1131 has.add(nullrev)
1123 1132 has.update(common)
1124 1133
1125 1134 # take all ancestors from heads that aren't in has
1126 1135 missing = set()
1127 1136 visit = collections.deque(r for r in heads if r not in has)
1128 1137 while visit:
1129 1138 r = visit.popleft()
1130 1139 if r in missing:
1131 1140 continue
1132 1141 else:
1133 1142 missing.add(r)
1134 1143 for p in self.parentrevs(r):
1135 1144 if p not in has:
1136 1145 visit.append(p)
1137 1146 missing = list(missing)
1138 1147 missing.sort()
1139 1148 return has, [self.node(miss) for miss in missing]
1140 1149
1141 1150 def incrementalmissingrevs(self, common=None):
1142 1151 """Return an object that can be used to incrementally compute the
1143 1152 revision numbers of the ancestors of arbitrary sets that are not
1144 1153 ancestors of common. This is an ancestor.incrementalmissingancestors
1145 1154 object.
1146 1155
1147 1156 'common' is a list of revision numbers. If common is not supplied, uses
1148 1157 nullrev.
1149 1158 """
1150 1159 if common is None:
1151 1160 common = [nullrev]
1152 1161
1153 1162 if rustancestor is not None and self.index.rust_ext_compat:
1154 1163 return rustancestor.MissingAncestors(self.index, common)
1155 1164 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1156 1165
1157 1166 def findmissingrevs(self, common=None, heads=None):
1158 1167 """Return the revision numbers of the ancestors of heads that
1159 1168 are not ancestors of common.
1160 1169
1161 1170 More specifically, return a list of revision numbers corresponding to
1162 1171 nodes N such that every N satisfies the following constraints:
1163 1172
1164 1173 1. N is an ancestor of some node in 'heads'
1165 1174 2. N is not an ancestor of any node in 'common'
1166 1175
1167 1176 The list is sorted by revision number, meaning it is
1168 1177 topologically sorted.
1169 1178
1170 1179 'heads' and 'common' are both lists of revision numbers. If heads is
1171 1180 not supplied, uses all of the revlog's heads. If common is not
1172 1181 supplied, uses nullid."""
1173 1182 if common is None:
1174 1183 common = [nullrev]
1175 1184 if heads is None:
1176 1185 heads = self.headrevs()
1177 1186
1178 1187 inc = self.incrementalmissingrevs(common=common)
1179 1188 return inc.missingancestors(heads)
1180 1189
1181 1190 def findmissing(self, common=None, heads=None):
1182 1191 """Return the ancestors of heads that are not ancestors of common.
1183 1192
1184 1193 More specifically, return a list of nodes N such that every N
1185 1194 satisfies the following constraints:
1186 1195
1187 1196 1. N is an ancestor of some node in 'heads'
1188 1197 2. N is not an ancestor of any node in 'common'
1189 1198
1190 1199 The list is sorted by revision number, meaning it is
1191 1200 topologically sorted.
1192 1201
1193 1202 'heads' and 'common' are both lists of node IDs. If heads is
1194 1203 not supplied, uses all of the revlog's heads. If common is not
1195 1204 supplied, uses nullid."""
1196 1205 if common is None:
1197 1206 common = [self.nullid]
1198 1207 if heads is None:
1199 1208 heads = self.heads()
1200 1209
1201 1210 common = [self.rev(n) for n in common]
1202 1211 heads = [self.rev(n) for n in heads]
1203 1212
1204 1213 inc = self.incrementalmissingrevs(common=common)
1205 1214 return [self.node(r) for r in inc.missingancestors(heads)]
1206 1215
1207 1216 def nodesbetween(self, roots=None, heads=None):
1208 1217 """Return a topological path from 'roots' to 'heads'.
1209 1218
1210 1219 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1211 1220 topologically sorted list of all nodes N that satisfy both of
1212 1221 these constraints:
1213 1222
1214 1223 1. N is a descendant of some node in 'roots'
1215 1224 2. N is an ancestor of some node in 'heads'
1216 1225
1217 1226 Every node is considered to be both a descendant and an ancestor
1218 1227 of itself, so every reachable node in 'roots' and 'heads' will be
1219 1228 included in 'nodes'.
1220 1229
1221 1230 'outroots' is the list of reachable nodes in 'roots', i.e., the
1222 1231 subset of 'roots' that is returned in 'nodes'. Likewise,
1223 1232 'outheads' is the subset of 'heads' that is also in 'nodes'.
1224 1233
1225 1234 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1226 1235 unspecified, uses nullid as the only root. If 'heads' is
1227 1236 unspecified, uses list of all of the revlog's heads."""
1228 1237 nonodes = ([], [], [])
1229 1238 if roots is not None:
1230 1239 roots = list(roots)
1231 1240 if not roots:
1232 1241 return nonodes
1233 1242 lowestrev = min([self.rev(n) for n in roots])
1234 1243 else:
1235 1244 roots = [self.nullid] # Everybody's a descendant of nullid
1236 1245 lowestrev = nullrev
1237 1246 if (lowestrev == nullrev) and (heads is None):
1238 1247 # We want _all_ the nodes!
1239 1248 return (
1240 1249 [self.node(r) for r in self],
1241 1250 [self.nullid],
1242 1251 list(self.heads()),
1243 1252 )
1244 1253 if heads is None:
1245 1254 # All nodes are ancestors, so the latest ancestor is the last
1246 1255 # node.
1247 1256 highestrev = len(self) - 1
1248 1257 # Set ancestors to None to signal that every node is an ancestor.
1249 1258 ancestors = None
1250 1259 # Set heads to an empty dictionary for later discovery of heads
1251 1260 heads = {}
1252 1261 else:
1253 1262 heads = list(heads)
1254 1263 if not heads:
1255 1264 return nonodes
1256 1265 ancestors = set()
1257 1266 # Turn heads into a dictionary so we can remove 'fake' heads.
1258 1267 # Also, later we will be using it to filter out the heads we can't
1259 1268 # find from roots.
1260 1269 heads = dict.fromkeys(heads, False)
1261 1270 # Start at the top and keep marking parents until we're done.
1262 1271 nodestotag = set(heads)
1263 1272 # Remember where the top was so we can use it as a limit later.
1264 1273 highestrev = max([self.rev(n) for n in nodestotag])
1265 1274 while nodestotag:
1266 1275 # grab a node to tag
1267 1276 n = nodestotag.pop()
1268 1277 # Never tag nullid
1269 1278 if n == self.nullid:
1270 1279 continue
1271 1280 # A node's revision number represents its place in a
1272 1281 # topologically sorted list of nodes.
1273 1282 r = self.rev(n)
1274 1283 if r >= lowestrev:
1275 1284 if n not in ancestors:
1276 1285 # If we are possibly a descendant of one of the roots
1277 1286 # and we haven't already been marked as an ancestor
1278 1287 ancestors.add(n) # Mark as ancestor
1279 1288 # Add non-nullid parents to list of nodes to tag.
1280 1289 nodestotag.update(
1281 1290 [p for p in self.parents(n) if p != self.nullid]
1282 1291 )
1283 1292 elif n in heads: # We've seen it before, is it a fake head?
1284 1293 # So it is, real heads should not be the ancestors of
1285 1294 # any other heads.
1286 1295 heads.pop(n)
1287 1296 if not ancestors:
1288 1297 return nonodes
1289 1298 # Now that we have our set of ancestors, we want to remove any
1290 1299 # roots that are not ancestors.
1291 1300
1292 1301 # If one of the roots was nullid, everything is included anyway.
1293 1302 if lowestrev > nullrev:
1294 1303 # But, since we weren't, let's recompute the lowest rev to not
1295 1304 # include roots that aren't ancestors.
1296 1305
1297 1306 # Filter out roots that aren't ancestors of heads
1298 1307 roots = [root for root in roots if root in ancestors]
1299 1308 # Recompute the lowest revision
1300 1309 if roots:
1301 1310 lowestrev = min([self.rev(root) for root in roots])
1302 1311 else:
1303 1312 # No more roots? Return empty list
1304 1313 return nonodes
1305 1314 else:
1306 1315 # We are descending from nullid, and don't need to care about
1307 1316 # any other roots.
1308 1317 lowestrev = nullrev
1309 1318 roots = [self.nullid]
1310 1319 # Transform our roots list into a set.
1311 1320 descendants = set(roots)
1312 1321 # Also, keep the original roots so we can filter out roots that aren't
1313 1322 # 'real' roots (i.e. are descended from other roots).
1314 1323 roots = descendants.copy()
1315 1324 # Our topologically sorted list of output nodes.
1316 1325 orderedout = []
1317 1326 # Don't start at nullid since we don't want nullid in our output list,
1318 1327 # and if nullid shows up in descendants, empty parents will look like
1319 1328 # they're descendants.
1320 1329 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1321 1330 n = self.node(r)
1322 1331 isdescendant = False
1323 1332 if lowestrev == nullrev: # Everybody is a descendant of nullid
1324 1333 isdescendant = True
1325 1334 elif n in descendants:
1326 1335 # n is already a descendant
1327 1336 isdescendant = True
1328 1337 # This check only needs to be done here because all the roots
1329 1338 # will start being marked is descendants before the loop.
1330 1339 if n in roots:
1331 1340 # If n was a root, check if it's a 'real' root.
1332 1341 p = tuple(self.parents(n))
1333 1342 # If any of its parents are descendants, it's not a root.
1334 1343 if (p[0] in descendants) or (p[1] in descendants):
1335 1344 roots.remove(n)
1336 1345 else:
1337 1346 p = tuple(self.parents(n))
1338 1347 # A node is a descendant if either of its parents are
1339 1348 # descendants. (We seeded the dependents list with the roots
1340 1349 # up there, remember?)
1341 1350 if (p[0] in descendants) or (p[1] in descendants):
1342 1351 descendants.add(n)
1343 1352 isdescendant = True
1344 1353 if isdescendant and ((ancestors is None) or (n in ancestors)):
1345 1354 # Only include nodes that are both descendants and ancestors.
1346 1355 orderedout.append(n)
1347 1356 if (ancestors is not None) and (n in heads):
1348 1357 # We're trying to figure out which heads are reachable
1349 1358 # from roots.
1350 1359 # Mark this head as having been reached
1351 1360 heads[n] = True
1352 1361 elif ancestors is None:
1353 1362 # Otherwise, we're trying to discover the heads.
1354 1363 # Assume this is a head because if it isn't, the next step
1355 1364 # will eventually remove it.
1356 1365 heads[n] = True
1357 1366 # But, obviously its parents aren't.
1358 1367 for p in self.parents(n):
1359 1368 heads.pop(p, None)
1360 1369 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1361 1370 roots = list(roots)
1362 1371 assert orderedout
1363 1372 assert roots
1364 1373 assert heads
1365 1374 return (orderedout, roots, heads)
1366 1375
1367 1376 def headrevs(self, revs=None):
1368 1377 if revs is None:
1369 1378 try:
1370 1379 return self.index.headrevs()
1371 1380 except AttributeError:
1372 1381 return self._headrevs()
1373 1382 if rustdagop is not None and self.index.rust_ext_compat:
1374 1383 return rustdagop.headrevs(self.index, revs)
1375 1384 return dagop.headrevs(revs, self._uncheckedparentrevs)
1376 1385
1377 1386 def computephases(self, roots):
1378 1387 return self.index.computephasesmapsets(roots)
1379 1388
1380 1389 def _headrevs(self):
1381 1390 count = len(self)
1382 1391 if not count:
1383 1392 return [nullrev]
1384 1393 # we won't iter over filtered rev so nobody is a head at start
1385 1394 ishead = [0] * (count + 1)
1386 1395 index = self.index
1387 1396 for r in self:
1388 1397 ishead[r] = 1 # I may be an head
1389 1398 e = index[r]
1390 1399 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1391 1400 return [r for r, val in enumerate(ishead) if val]
1392 1401
1393 1402 def heads(self, start=None, stop=None):
1394 1403 """return the list of all nodes that have no children
1395 1404
1396 1405 if start is specified, only heads that are descendants of
1397 1406 start will be returned
1398 1407 if stop is specified, it will consider all the revs from stop
1399 1408 as if they had no children
1400 1409 """
1401 1410 if start is None and stop is None:
1402 1411 if not len(self):
1403 1412 return [self.nullid]
1404 1413 return [self.node(r) for r in self.headrevs()]
1405 1414
1406 1415 if start is None:
1407 1416 start = nullrev
1408 1417 else:
1409 1418 start = self.rev(start)
1410 1419
1411 1420 stoprevs = {self.rev(n) for n in stop or []}
1412 1421
1413 1422 revs = dagop.headrevssubset(
1414 1423 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1415 1424 )
1416 1425
1417 1426 return [self.node(rev) for rev in revs]
1418 1427
1419 1428 def children(self, node):
1420 1429 """find the children of a given node"""
1421 1430 c = []
1422 1431 p = self.rev(node)
1423 1432 for r in self.revs(start=p + 1):
1424 1433 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1425 1434 if prevs:
1426 1435 for pr in prevs:
1427 1436 if pr == p:
1428 1437 c.append(self.node(r))
1429 1438 elif p == nullrev:
1430 1439 c.append(self.node(r))
1431 1440 return c
1432 1441
1433 1442 def commonancestorsheads(self, a, b):
1434 1443 """calculate all the heads of the common ancestors of nodes a and b"""
1435 1444 a, b = self.rev(a), self.rev(b)
1436 1445 ancs = self._commonancestorsheads(a, b)
1437 1446 return pycompat.maplist(self.node, ancs)
1438 1447
1439 1448 def _commonancestorsheads(self, *revs):
1440 1449 """calculate all the heads of the common ancestors of revs"""
1441 1450 try:
1442 1451 ancs = self.index.commonancestorsheads(*revs)
1443 1452 except (AttributeError, OverflowError): # C implementation failed
1444 1453 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1445 1454 return ancs
1446 1455
1447 1456 def isancestor(self, a, b):
1448 1457 """return True if node a is an ancestor of node b
1449 1458
1450 1459 A revision is considered an ancestor of itself."""
1451 1460 a, b = self.rev(a), self.rev(b)
1452 1461 return self.isancestorrev(a, b)
1453 1462
1454 1463 def isancestorrev(self, a, b):
1455 1464 """return True if revision a is an ancestor of revision b
1456 1465
1457 1466 A revision is considered an ancestor of itself.
1458 1467
1459 1468 The implementation of this is trivial but the use of
1460 1469 reachableroots is not."""
1461 1470 if a == nullrev:
1462 1471 return True
1463 1472 elif a == b:
1464 1473 return True
1465 1474 elif a > b:
1466 1475 return False
1467 1476 return bool(self.reachableroots(a, [b], [a], includepath=False))
1468 1477
1469 1478 def reachableroots(self, minroot, heads, roots, includepath=False):
1470 1479 """return (heads(::(<roots> and <roots>::<heads>)))
1471 1480
1472 1481 If includepath is True, return (<roots>::<heads>)."""
1473 1482 try:
1474 1483 return self.index.reachableroots2(
1475 1484 minroot, heads, roots, includepath
1476 1485 )
1477 1486 except AttributeError:
1478 1487 return dagop._reachablerootspure(
1479 1488 self.parentrevs, minroot, roots, heads, includepath
1480 1489 )
1481 1490
1482 1491 def ancestor(self, a, b):
1483 1492 """calculate the "best" common ancestor of nodes a and b"""
1484 1493
1485 1494 a, b = self.rev(a), self.rev(b)
1486 1495 try:
1487 1496 ancs = self.index.ancestors(a, b)
1488 1497 except (AttributeError, OverflowError):
1489 1498 ancs = ancestor.ancestors(self.parentrevs, a, b)
1490 1499 if ancs:
1491 1500 # choose a consistent winner when there's a tie
1492 1501 return min(map(self.node, ancs))
1493 1502 return self.nullid
1494 1503
1495 1504 def _match(self, id):
1496 1505 if isinstance(id, int):
1497 1506 # rev
1498 1507 return self.node(id)
1499 1508 if len(id) == self.nodeconstants.nodelen:
1500 1509 # possibly a binary node
1501 1510 # odds of a binary node being all hex in ASCII are 1 in 10**25
1502 1511 try:
1503 1512 node = id
1504 1513 self.rev(node) # quick search the index
1505 1514 return node
1506 1515 except error.LookupError:
1507 1516 pass # may be partial hex id
1508 1517 try:
1509 1518 # str(rev)
1510 1519 rev = int(id)
1511 1520 if b"%d" % rev != id:
1512 1521 raise ValueError
1513 1522 if rev < 0:
1514 1523 rev = len(self) + rev
1515 1524 if rev < 0 or rev >= len(self):
1516 1525 raise ValueError
1517 1526 return self.node(rev)
1518 1527 except (ValueError, OverflowError):
1519 1528 pass
1520 1529 if len(id) == 2 * self.nodeconstants.nodelen:
1521 1530 try:
1522 1531 # a full hex nodeid?
1523 1532 node = bin(id)
1524 1533 self.rev(node)
1525 1534 return node
1526 1535 except (TypeError, error.LookupError):
1527 1536 pass
1528 1537
1529 1538 def _partialmatch(self, id):
1530 1539 # we don't care wdirfilenodeids as they should be always full hash
1531 1540 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1532 1541 try:
1533 1542 partial = self.index.partialmatch(id)
1534 1543 if partial and self.hasnode(partial):
1535 1544 if maybewdir:
1536 1545 # single 'ff...' match in radix tree, ambiguous with wdir
1537 1546 raise error.RevlogError
1538 1547 return partial
1539 1548 if maybewdir:
1540 1549 # no 'ff...' match in radix tree, wdir identified
1541 1550 raise error.WdirUnsupported
1542 1551 return None
1543 1552 except error.RevlogError:
1544 1553 # parsers.c radix tree lookup gave multiple matches
1545 1554 # fast path: for unfiltered changelog, radix tree is accurate
1546 1555 if not getattr(self, 'filteredrevs', None):
1547 1556 raise error.AmbiguousPrefixLookupError(
1548 1557 id, self.display_id, _(b'ambiguous identifier')
1549 1558 )
1550 1559 # fall through to slow path that filters hidden revisions
1551 1560 except (AttributeError, ValueError):
1552 1561 # we are pure python, or key was too short to search radix tree
1553 1562 pass
1554 1563
1555 1564 if id in self._pcache:
1556 1565 return self._pcache[id]
1557 1566
1558 1567 if len(id) <= 40:
1559 1568 try:
1560 1569 # hex(node)[:...]
1561 1570 l = len(id) // 2 # grab an even number of digits
1562 1571 prefix = bin(id[: l * 2])
1563 1572 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1564 1573 nl = [
1565 1574 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1566 1575 ]
1567 1576 if self.nodeconstants.nullhex.startswith(id):
1568 1577 nl.append(self.nullid)
1569 1578 if len(nl) > 0:
1570 1579 if len(nl) == 1 and not maybewdir:
1571 1580 self._pcache[id] = nl[0]
1572 1581 return nl[0]
1573 1582 raise error.AmbiguousPrefixLookupError(
1574 1583 id, self.display_id, _(b'ambiguous identifier')
1575 1584 )
1576 1585 if maybewdir:
1577 1586 raise error.WdirUnsupported
1578 1587 return None
1579 1588 except TypeError:
1580 1589 pass
1581 1590
1582 1591 def lookup(self, id):
1583 1592 """locate a node based on:
1584 1593 - revision number or str(revision number)
1585 1594 - nodeid or subset of hex nodeid
1586 1595 """
1587 1596 n = self._match(id)
1588 1597 if n is not None:
1589 1598 return n
1590 1599 n = self._partialmatch(id)
1591 1600 if n:
1592 1601 return n
1593 1602
1594 1603 raise error.LookupError(id, self.display_id, _(b'no match found'))
1595 1604
1596 1605 def shortest(self, node, minlength=1):
1597 1606 """Find the shortest unambiguous prefix that matches node."""
1598 1607
1599 1608 def isvalid(prefix):
1600 1609 try:
1601 1610 matchednode = self._partialmatch(prefix)
1602 1611 except error.AmbiguousPrefixLookupError:
1603 1612 return False
1604 1613 except error.WdirUnsupported:
1605 1614 # single 'ff...' match
1606 1615 return True
1607 1616 if matchednode is None:
1608 1617 raise error.LookupError(node, self.display_id, _(b'no node'))
1609 1618 return True
1610 1619
1611 1620 def maybewdir(prefix):
1612 1621 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1613 1622
1614 1623 hexnode = hex(node)
1615 1624
1616 1625 def disambiguate(hexnode, minlength):
1617 1626 """Disambiguate against wdirid."""
1618 1627 for length in range(minlength, len(hexnode) + 1):
1619 1628 prefix = hexnode[:length]
1620 1629 if not maybewdir(prefix):
1621 1630 return prefix
1622 1631
1623 1632 if not getattr(self, 'filteredrevs', None):
1624 1633 try:
1625 1634 length = max(self.index.shortest(node), minlength)
1626 1635 return disambiguate(hexnode, length)
1627 1636 except error.RevlogError:
1628 1637 if node != self.nodeconstants.wdirid:
1629 1638 raise error.LookupError(
1630 1639 node, self.display_id, _(b'no node')
1631 1640 )
1632 1641 except AttributeError:
1633 1642 # Fall through to pure code
1634 1643 pass
1635 1644
1636 1645 if node == self.nodeconstants.wdirid:
1637 1646 for length in range(minlength, len(hexnode) + 1):
1638 1647 prefix = hexnode[:length]
1639 1648 if isvalid(prefix):
1640 1649 return prefix
1641 1650
1642 1651 for length in range(minlength, len(hexnode) + 1):
1643 1652 prefix = hexnode[:length]
1644 1653 if isvalid(prefix):
1645 1654 return disambiguate(hexnode, length)
1646 1655
1647 1656 def cmp(self, node, text):
1648 1657 """compare text with a given file revision
1649 1658
1650 1659 returns True if text is different than what is stored.
1651 1660 """
1652 1661 p1, p2 = self.parents(node)
1653 1662 return storageutil.hashrevisionsha1(text, p1, p2) != node
1654 1663
1655 1664 def _cachesegment(self, offset, data):
1656 1665 """Add a segment to the revlog cache.
1657 1666
1658 1667 Accepts an absolute offset and the data that is at that location.
1659 1668 """
1660 1669 o, d = self._chunkcache
1661 1670 # try to add to existing cache
1662 1671 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1663 1672 self._chunkcache = o, d + data
1664 1673 else:
1665 1674 self._chunkcache = offset, data
1666 1675
1667 1676 def _readsegment(self, offset, length, df=None):
1668 1677 """Load a segment of raw data from the revlog.
1669 1678
1670 1679 Accepts an absolute offset, length to read, and an optional existing
1671 1680 file handle to read from.
1672 1681
1673 1682 If an existing file handle is passed, it will be seeked and the
1674 1683 original seek position will NOT be restored.
1675 1684
1676 1685 Returns a str or buffer of raw byte data.
1677 1686
1678 1687 Raises if the requested number of bytes could not be read.
1679 1688 """
1680 1689 # Cache data both forward and backward around the requested
1681 1690 # data, in a fixed size window. This helps speed up operations
1682 1691 # involving reading the revlog backwards.
1683 1692 cachesize = self._chunkcachesize
1684 1693 realoffset = offset & ~(cachesize - 1)
1685 1694 reallength = (
1686 1695 (offset + length + cachesize) & ~(cachesize - 1)
1687 1696 ) - realoffset
1688 1697 with self._datareadfp(df) as df:
1689 1698 df.seek(realoffset)
1690 1699 d = df.read(reallength)
1691 1700
1692 1701 self._cachesegment(realoffset, d)
1693 1702 if offset != realoffset or reallength != length:
1694 1703 startoffset = offset - realoffset
1695 1704 if len(d) - startoffset < length:
1696 1705 raise error.RevlogError(
1697 1706 _(
1698 1707 b'partial read of revlog %s; expected %d bytes from '
1699 1708 b'offset %d, got %d'
1700 1709 )
1701 1710 % (
1702 1711 self._indexfile if self._inline else self._datafile,
1703 1712 length,
1704 1713 offset,
1705 1714 len(d) - startoffset,
1706 1715 )
1707 1716 )
1708 1717
1709 1718 return util.buffer(d, startoffset, length)
1710 1719
1711 1720 if len(d) < length:
1712 1721 raise error.RevlogError(
1713 1722 _(
1714 1723 b'partial read of revlog %s; expected %d bytes from offset '
1715 1724 b'%d, got %d'
1716 1725 )
1717 1726 % (
1718 1727 self._indexfile if self._inline else self._datafile,
1719 1728 length,
1720 1729 offset,
1721 1730 len(d),
1722 1731 )
1723 1732 )
1724 1733
1725 1734 return d
1726 1735
1727 1736 def _getsegment(self, offset, length, df=None):
1728 1737 """Obtain a segment of raw data from the revlog.
1729 1738
1730 1739 Accepts an absolute offset, length of bytes to obtain, and an
1731 1740 optional file handle to the already-opened revlog. If the file
1732 1741 handle is used, it's original seek position will not be preserved.
1733 1742
1734 1743 Requests for data may be returned from a cache.
1735 1744
1736 1745 Returns a str or a buffer instance of raw byte data.
1737 1746 """
1738 1747 o, d = self._chunkcache
1739 1748 l = len(d)
1740 1749
1741 1750 # is it in the cache?
1742 1751 cachestart = offset - o
1743 1752 cacheend = cachestart + length
1744 1753 if cachestart >= 0 and cacheend <= l:
1745 1754 if cachestart == 0 and cacheend == l:
1746 1755 return d # avoid a copy
1747 1756 return util.buffer(d, cachestart, cacheend - cachestart)
1748 1757
1749 1758 return self._readsegment(offset, length, df=df)
1750 1759
1751 1760 def _getsegmentforrevs(self, startrev, endrev, df=None):
1752 1761 """Obtain a segment of raw data corresponding to a range of revisions.
1753 1762
1754 1763 Accepts the start and end revisions and an optional already-open
1755 1764 file handle to be used for reading. If the file handle is read, its
1756 1765 seek position will not be preserved.
1757 1766
1758 1767 Requests for data may be satisfied by a cache.
1759 1768
1760 1769 Returns a 2-tuple of (offset, data) for the requested range of
1761 1770 revisions. Offset is the integer offset from the beginning of the
1762 1771 revlog and data is a str or buffer of the raw byte data.
1763 1772
1764 1773 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1765 1774 to determine where each revision's data begins and ends.
1766 1775 """
1767 1776 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1768 1777 # (functions are expensive).
1769 1778 index = self.index
1770 1779 istart = index[startrev]
1771 1780 start = int(istart[0] >> 16)
1772 1781 if startrev == endrev:
1773 1782 end = start + istart[1]
1774 1783 else:
1775 1784 iend = index[endrev]
1776 1785 end = int(iend[0] >> 16) + iend[1]
1777 1786
1778 1787 if self._inline:
1779 1788 start += (startrev + 1) * self.index.entry_size
1780 1789 end += (endrev + 1) * self.index.entry_size
1781 1790 length = end - start
1782 1791
1783 1792 return start, self._getsegment(start, length, df=df)
1784 1793
1785 1794 def _chunk(self, rev, df=None):
1786 1795 """Obtain a single decompressed chunk for a revision.
1787 1796
1788 1797 Accepts an integer revision and an optional already-open file handle
1789 1798 to be used for reading. If used, the seek position of the file will not
1790 1799 be preserved.
1791 1800
1792 1801 Returns a str holding uncompressed data for the requested revision.
1793 1802 """
1794 1803 compression_mode = self.index[rev][10]
1795 1804 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1796 1805 if compression_mode == COMP_MODE_PLAIN:
1797 1806 return data
1798 1807 elif compression_mode == COMP_MODE_DEFAULT:
1799 1808 return self._decompressor(data)
1800 1809 elif compression_mode == COMP_MODE_INLINE:
1801 1810 return self.decompress(data)
1802 1811 else:
1803 1812 msg = 'unknown compression mode %d'
1804 1813 msg %= compression_mode
1805 1814 raise error.RevlogError(msg)
1806 1815
1807 1816 def _chunks(self, revs, df=None, targetsize=None):
1808 1817 """Obtain decompressed chunks for the specified revisions.
1809 1818
1810 1819 Accepts an iterable of numeric revisions that are assumed to be in
1811 1820 ascending order. Also accepts an optional already-open file handle
1812 1821 to be used for reading. If used, the seek position of the file will
1813 1822 not be preserved.
1814 1823
1815 1824 This function is similar to calling ``self._chunk()`` multiple times,
1816 1825 but is faster.
1817 1826
1818 1827 Returns a list with decompressed data for each requested revision.
1819 1828 """
1820 1829 if not revs:
1821 1830 return []
1822 1831 start = self.start
1823 1832 length = self.length
1824 1833 inline = self._inline
1825 1834 iosize = self.index.entry_size
1826 1835 buffer = util.buffer
1827 1836
1828 1837 l = []
1829 1838 ladd = l.append
1830 1839
1831 1840 if not self._withsparseread:
1832 1841 slicedchunks = (revs,)
1833 1842 else:
1834 1843 slicedchunks = deltautil.slicechunk(
1835 1844 self, revs, targetsize=targetsize
1836 1845 )
1837 1846
1838 1847 for revschunk in slicedchunks:
1839 1848 firstrev = revschunk[0]
1840 1849 # Skip trailing revisions with empty diff
1841 1850 for lastrev in revschunk[::-1]:
1842 1851 if length(lastrev) != 0:
1843 1852 break
1844 1853
1845 1854 try:
1846 1855 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1847 1856 except OverflowError:
1848 1857 # issue4215 - we can't cache a run of chunks greater than
1849 1858 # 2G on Windows
1850 1859 return [self._chunk(rev, df=df) for rev in revschunk]
1851 1860
1852 1861 decomp = self.decompress
1853 1862 # self._decompressor might be None, but will not be used in that case
1854 1863 def_decomp = self._decompressor
1855 1864 for rev in revschunk:
1856 1865 chunkstart = start(rev)
1857 1866 if inline:
1858 1867 chunkstart += (rev + 1) * iosize
1859 1868 chunklength = length(rev)
1860 1869 comp_mode = self.index[rev][10]
1861 1870 c = buffer(data, chunkstart - offset, chunklength)
1862 1871 if comp_mode == COMP_MODE_PLAIN:
1863 1872 ladd(c)
1864 1873 elif comp_mode == COMP_MODE_INLINE:
1865 1874 ladd(decomp(c))
1866 1875 elif comp_mode == COMP_MODE_DEFAULT:
1867 1876 ladd(def_decomp(c))
1868 1877 else:
1869 1878 msg = 'unknown compression mode %d'
1870 1879 msg %= comp_mode
1871 1880 raise error.RevlogError(msg)
1872 1881
1873 1882 return l
1874 1883
1875 1884 def _chunkclear(self):
1876 1885 """Clear the raw chunk cache."""
1877 1886 self._chunkcache = (0, b'')
1878 1887
1879 1888 def deltaparent(self, rev):
1880 1889 """return deltaparent of the given revision"""
1881 1890 base = self.index[rev][3]
1882 1891 if base == rev:
1883 1892 return nullrev
1884 1893 elif self._generaldelta:
1885 1894 return base
1886 1895 else:
1887 1896 return rev - 1
1888 1897
1889 1898 def issnapshot(self, rev):
1890 1899 """tells whether rev is a snapshot"""
1891 1900 if not self._sparserevlog:
1892 1901 return self.deltaparent(rev) == nullrev
1893 1902 elif util.safehasattr(self.index, b'issnapshot'):
1894 1903 # directly assign the method to cache the testing and access
1895 1904 self.issnapshot = self.index.issnapshot
1896 1905 return self.issnapshot(rev)
1897 1906 if rev == nullrev:
1898 1907 return True
1899 1908 entry = self.index[rev]
1900 1909 base = entry[3]
1901 1910 if base == rev:
1902 1911 return True
1903 1912 if base == nullrev:
1904 1913 return True
1905 1914 p1 = entry[5]
1906 1915 p2 = entry[6]
1907 1916 if base == p1 or base == p2:
1908 1917 return False
1909 1918 return self.issnapshot(base)
1910 1919
1911 1920 def snapshotdepth(self, rev):
1912 1921 """number of snapshot in the chain before this one"""
1913 1922 if not self.issnapshot(rev):
1914 1923 raise error.ProgrammingError(b'revision %d not a snapshot')
1915 1924 return len(self._deltachain(rev)[0]) - 1
1916 1925
1917 1926 def revdiff(self, rev1, rev2):
1918 1927 """return or calculate a delta between two revisions
1919 1928
1920 1929 The delta calculated is in binary form and is intended to be written to
1921 1930 revlog data directly. So this function needs raw revision data.
1922 1931 """
1923 1932 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1924 1933 return bytes(self._chunk(rev2))
1925 1934
1926 1935 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1927 1936
1928 1937 def _processflags(self, text, flags, operation, raw=False):
1929 1938 """deprecated entry point to access flag processors"""
1930 1939 msg = b'_processflag(...) use the specialized variant'
1931 1940 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1932 1941 if raw:
1933 1942 return text, flagutil.processflagsraw(self, text, flags)
1934 1943 elif operation == b'read':
1935 1944 return flagutil.processflagsread(self, text, flags)
1936 1945 else: # write operation
1937 1946 return flagutil.processflagswrite(self, text, flags)
1938 1947
1939 1948 def revision(self, nodeorrev, _df=None, raw=False):
1940 1949 """return an uncompressed revision of a given node or revision
1941 1950 number.
1942 1951
1943 1952 _df - an existing file handle to read from. (internal-only)
1944 1953 raw - an optional argument specifying if the revision data is to be
1945 1954 treated as raw data when applying flag transforms. 'raw' should be set
1946 1955 to True when generating changegroups or in debug commands.
1947 1956 """
1948 1957 if raw:
1949 1958 msg = (
1950 1959 b'revlog.revision(..., raw=True) is deprecated, '
1951 1960 b'use revlog.rawdata(...)'
1952 1961 )
1953 1962 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1954 1963 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1955 1964
1956 1965 def sidedata(self, nodeorrev, _df=None):
1957 1966 """a map of extra data related to the changeset but not part of the hash
1958 1967
1959 1968 This function currently return a dictionary. However, more advanced
1960 1969 mapping object will likely be used in the future for a more
1961 1970 efficient/lazy code.
1962 1971 """
1963 1972 return self._revisiondata(nodeorrev, _df)[1]
1964 1973
1965 1974 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1966 1975 # deal with <nodeorrev> argument type
1967 1976 if isinstance(nodeorrev, int):
1968 1977 rev = nodeorrev
1969 1978 node = self.node(rev)
1970 1979 else:
1971 1980 node = nodeorrev
1972 1981 rev = None
1973 1982
1974 1983 # fast path the special `nullid` rev
1975 1984 if node == self.nullid:
1976 1985 return b"", {}
1977 1986
1978 1987 # ``rawtext`` is the text as stored inside the revlog. Might be the
1979 1988 # revision or might need to be processed to retrieve the revision.
1980 1989 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1981 1990
1982 1991 if self.hassidedata:
1983 1992 if rev is None:
1984 1993 rev = self.rev(node)
1985 1994 sidedata = self._sidedata(rev)
1986 1995 else:
1987 1996 sidedata = {}
1988 1997
1989 1998 if raw and validated:
1990 1999 # if we don't want to process the raw text and that raw
1991 2000 # text is cached, we can exit early.
1992 2001 return rawtext, sidedata
1993 2002 if rev is None:
1994 2003 rev = self.rev(node)
1995 2004 # the revlog's flag for this revision
1996 2005 # (usually alter its state or content)
1997 2006 flags = self.flags(rev)
1998 2007
1999 2008 if validated and flags == REVIDX_DEFAULT_FLAGS:
2000 2009 # no extra flags set, no flag processor runs, text = rawtext
2001 2010 return rawtext, sidedata
2002 2011
2003 2012 if raw:
2004 2013 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2005 2014 text = rawtext
2006 2015 else:
2007 2016 r = flagutil.processflagsread(self, rawtext, flags)
2008 2017 text, validatehash = r
2009 2018 if validatehash:
2010 2019 self.checkhash(text, node, rev=rev)
2011 2020 if not validated:
2012 2021 self._revisioncache = (node, rev, rawtext)
2013 2022
2014 2023 return text, sidedata
2015 2024
2016 2025 def _rawtext(self, node, rev, _df=None):
2017 2026 """return the possibly unvalidated rawtext for a revision
2018 2027
2019 2028 returns (rev, rawtext, validated)
2020 2029 """
2021 2030
2022 2031 # revision in the cache (could be useful to apply delta)
2023 2032 cachedrev = None
2024 2033 # An intermediate text to apply deltas to
2025 2034 basetext = None
2026 2035
2027 2036 # Check if we have the entry in cache
2028 2037 # The cache entry looks like (node, rev, rawtext)
2029 2038 if self._revisioncache:
2030 2039 if self._revisioncache[0] == node:
2031 2040 return (rev, self._revisioncache[2], True)
2032 2041 cachedrev = self._revisioncache[1]
2033 2042
2034 2043 if rev is None:
2035 2044 rev = self.rev(node)
2036 2045
2037 2046 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2038 2047 if stopped:
2039 2048 basetext = self._revisioncache[2]
2040 2049
2041 2050 # drop cache to save memory, the caller is expected to
2042 2051 # update self._revisioncache after validating the text
2043 2052 self._revisioncache = None
2044 2053
2045 2054 targetsize = None
2046 2055 rawsize = self.index[rev][2]
2047 2056 if 0 <= rawsize:
2048 2057 targetsize = 4 * rawsize
2049 2058
2050 2059 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2051 2060 if basetext is None:
2052 2061 basetext = bytes(bins[0])
2053 2062 bins = bins[1:]
2054 2063
2055 2064 rawtext = mdiff.patches(basetext, bins)
2056 2065 del basetext # let us have a chance to free memory early
2057 2066 return (rev, rawtext, False)
2058 2067
2059 2068 def _sidedata(self, rev):
2060 2069 """Return the sidedata for a given revision number."""
2061 2070 index_entry = self.index[rev]
2062 2071 sidedata_offset = index_entry[8]
2063 2072 sidedata_size = index_entry[9]
2064 2073
2065 2074 if self._inline:
2066 2075 sidedata_offset += self.index.entry_size * (1 + rev)
2067 2076 if sidedata_size == 0:
2068 2077 return {}
2069 2078
2070 2079 comp_segment = self._getsegment(sidedata_offset, sidedata_size)
2071 2080 comp = self.index[rev][11]
2072 2081 if comp == COMP_MODE_PLAIN:
2073 2082 segment = comp_segment
2074 2083 elif comp == COMP_MODE_DEFAULT:
2075 2084 segment = self._decompressor(comp_segment)
2076 2085 elif comp == COMP_MODE_INLINE:
2077 2086 segment = self.decompress(comp_segment)
2078 2087 else:
2079 2088 msg = 'unknown compression mode %d'
2080 2089 msg %= comp
2081 2090 raise error.RevlogError(msg)
2082 2091
2083 2092 sidedata = sidedatautil.deserialize_sidedata(segment)
2084 2093 return sidedata
2085 2094
2086 2095 def rawdata(self, nodeorrev, _df=None):
2087 2096 """return an uncompressed raw data of a given node or revision number.
2088 2097
2089 2098 _df - an existing file handle to read from. (internal-only)
2090 2099 """
2091 2100 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2092 2101
2093 2102 def hash(self, text, p1, p2):
2094 2103 """Compute a node hash.
2095 2104
2096 2105 Available as a function so that subclasses can replace the hash
2097 2106 as needed.
2098 2107 """
2099 2108 return storageutil.hashrevisionsha1(text, p1, p2)
2100 2109
2101 2110 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2102 2111 """Check node hash integrity.
2103 2112
2104 2113 Available as a function so that subclasses can extend hash mismatch
2105 2114 behaviors as needed.
2106 2115 """
2107 2116 try:
2108 2117 if p1 is None and p2 is None:
2109 2118 p1, p2 = self.parents(node)
2110 2119 if node != self.hash(text, p1, p2):
2111 2120 # Clear the revision cache on hash failure. The revision cache
2112 2121 # only stores the raw revision and clearing the cache does have
2113 2122 # the side-effect that we won't have a cache hit when the raw
2114 2123 # revision data is accessed. But this case should be rare and
2115 2124 # it is extra work to teach the cache about the hash
2116 2125 # verification state.
2117 2126 if self._revisioncache and self._revisioncache[0] == node:
2118 2127 self._revisioncache = None
2119 2128
2120 2129 revornode = rev
2121 2130 if revornode is None:
2122 2131 revornode = templatefilters.short(hex(node))
2123 2132 raise error.RevlogError(
2124 2133 _(b"integrity check failed on %s:%s")
2125 2134 % (self.display_id, pycompat.bytestr(revornode))
2126 2135 )
2127 2136 except error.RevlogError:
2128 2137 if self._censorable and storageutil.iscensoredtext(text):
2129 2138 raise error.CensoredNodeError(self.display_id, node, text)
2130 2139 raise
2131 2140
2132 2141 def _enforceinlinesize(self, tr):
2133 2142 """Check if the revlog is too big for inline and convert if so.
2134 2143
2135 2144 This should be called after revisions are added to the revlog. If the
2136 2145 revlog has grown too large to be an inline revlog, it will convert it
2137 2146 to use multiple index and data files.
2138 2147 """
2139 2148 tiprev = len(self) - 1
2140 2149 total_size = self.start(tiprev) + self.length(tiprev)
2141 2150 if not self._inline or total_size < _maxinline:
2142 2151 return
2143 2152
2144 2153 troffset = tr.findoffset(self._indexfile)
2145 2154 if troffset is None:
2146 2155 raise error.RevlogError(
2147 2156 _(b"%s not found in the transaction") % self._indexfile
2148 2157 )
2149 2158 trindex = 0
2150 2159 tr.add(self._datafile, 0)
2151 2160
2152 2161 existing_handles = False
2153 2162 if self._writinghandles is not None:
2154 2163 existing_handles = True
2155 2164 fp = self._writinghandles[0]
2156 2165 fp.flush()
2157 2166 fp.close()
2158 2167 # We can't use the cached file handle after close(). So prevent
2159 2168 # its usage.
2160 2169 self._writinghandles = None
2161 2170
2162 2171 new_dfh = self._datafp(b'w+')
2163 2172 new_dfh.truncate(0) # drop any potentially existing data
2164 2173 try:
2165 2174 with self._indexfp() as read_ifh:
2166 2175 for r in self:
2167 2176 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2168 2177 if troffset <= self.start(r):
2169 2178 trindex = r
2170 2179 new_dfh.flush()
2171 2180
2172 2181 with self.__index_new_fp() as fp:
2173 2182 self._format_flags &= ~FLAG_INLINE_DATA
2174 2183 self._inline = False
2175 2184 for i in self:
2176 2185 e = self.index.entry_binary(i)
2177 2186 if i == 0 and self._docket is None:
2178 2187 header = self._format_flags | self._format_version
2179 2188 header = self.index.pack_header(header)
2180 2189 e = header + e
2181 2190 fp.write(e)
2182 2191 if self._docket is not None:
2183 2192 self._docket.index_end = fp.tell()
2184 2193 # the temp file replace the real index when we exit the context
2185 2194 # manager
2186 2195
2187 2196 tr.replace(self._indexfile, trindex * self.index.entry_size)
2188 2197 nodemaputil.setup_persistent_nodemap(tr, self)
2189 2198 self._chunkclear()
2190 2199
2191 2200 if existing_handles:
2192 2201 # switched from inline to conventional reopen the index
2193 2202 ifh = self.__index_write_fp()
2194 2203 self._writinghandles = (ifh, new_dfh)
2195 2204 new_dfh = None
2196 2205 finally:
2197 2206 if new_dfh is not None:
2198 2207 new_dfh.close()
2199 2208
2200 2209 def _nodeduplicatecallback(self, transaction, node):
2201 2210 """called when trying to add a node already stored."""
2202 2211
2203 2212 @contextlib.contextmanager
2204 2213 def _writing(self, transaction):
2205 2214 if self._trypending:
2206 2215 msg = b'try to write in a `trypending` revlog: %s'
2207 2216 msg %= self.display_id
2208 2217 raise error.ProgrammingError(msg)
2209 2218 if self._writinghandles is not None:
2210 2219 yield
2211 2220 else:
2212 2221 r = len(self)
2213 2222 dsize = 0
2214 2223 if r:
2215 2224 dsize = self.end(r - 1)
2216 2225 dfh = None
2217 2226 if not self._inline:
2218 2227 try:
2219 2228 dfh = self._datafp(b"r+")
2220 2229 if self._docket is None:
2221 2230 dfh.seek(0, os.SEEK_END)
2222 2231 else:
2223 2232 dfh.seek(self._docket.data_end, os.SEEK_SET)
2224 2233 except IOError as inst:
2225 2234 if inst.errno != errno.ENOENT:
2226 2235 raise
2227 2236 dfh = self._datafp(b"w+")
2228 2237 transaction.add(self._datafile, dsize)
2229 2238 try:
2230 2239 isize = r * self.index.entry_size
2231 2240 ifh = self.__index_write_fp()
2232 2241 if self._inline:
2233 2242 transaction.add(self._indexfile, dsize + isize)
2234 2243 else:
2235 2244 transaction.add(self._indexfile, isize)
2236 2245 try:
2237 2246 self._writinghandles = (ifh, dfh)
2238 2247 try:
2239 2248 yield
2240 2249 if self._docket is not None:
2241 2250 self._write_docket(transaction)
2242 2251 finally:
2243 2252 self._writinghandles = None
2244 2253 finally:
2245 2254 ifh.close()
2246 2255 finally:
2247 2256 if dfh is not None:
2248 2257 dfh.close()
2249 2258
2250 2259 def _write_docket(self, transaction):
2251 2260 """write the current docket on disk
2252 2261
2253 2262 Exist as a method to help changelog to implement transaction logic
2254 2263
2255 2264 We could also imagine using the same transaction logic for all revlog
2256 2265 since docket are cheap."""
2257 2266 self._docket.write(transaction)
2258 2267
2259 2268 def addrevision(
2260 2269 self,
2261 2270 text,
2262 2271 transaction,
2263 2272 link,
2264 2273 p1,
2265 2274 p2,
2266 2275 cachedelta=None,
2267 2276 node=None,
2268 2277 flags=REVIDX_DEFAULT_FLAGS,
2269 2278 deltacomputer=None,
2270 2279 sidedata=None,
2271 2280 ):
2272 2281 """add a revision to the log
2273 2282
2274 2283 text - the revision data to add
2275 2284 transaction - the transaction object used for rollback
2276 2285 link - the linkrev data to add
2277 2286 p1, p2 - the parent nodeids of the revision
2278 2287 cachedelta - an optional precomputed delta
2279 2288 node - nodeid of revision; typically node is not specified, and it is
2280 2289 computed by default as hash(text, p1, p2), however subclasses might
2281 2290 use different hashing method (and override checkhash() in such case)
2282 2291 flags - the known flags to set on the revision
2283 2292 deltacomputer - an optional deltacomputer instance shared between
2284 2293 multiple calls
2285 2294 """
2286 2295 if link == nullrev:
2287 2296 raise error.RevlogError(
2288 2297 _(b"attempted to add linkrev -1 to %s") % self.display_id
2289 2298 )
2290 2299
2291 2300 if sidedata is None:
2292 2301 sidedata = {}
2293 2302 elif sidedata and not self.hassidedata:
2294 2303 raise error.ProgrammingError(
2295 2304 _(b"trying to add sidedata to a revlog who don't support them")
2296 2305 )
2297 2306
2298 2307 if flags:
2299 2308 node = node or self.hash(text, p1, p2)
2300 2309
2301 2310 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2302 2311
2303 2312 # If the flag processor modifies the revision data, ignore any provided
2304 2313 # cachedelta.
2305 2314 if rawtext != text:
2306 2315 cachedelta = None
2307 2316
2308 2317 if len(rawtext) > _maxentrysize:
2309 2318 raise error.RevlogError(
2310 2319 _(
2311 2320 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2312 2321 )
2313 2322 % (self.display_id, len(rawtext))
2314 2323 )
2315 2324
2316 2325 node = node or self.hash(rawtext, p1, p2)
2317 2326 rev = self.index.get_rev(node)
2318 2327 if rev is not None:
2319 2328 return rev
2320 2329
2321 2330 if validatehash:
2322 2331 self.checkhash(rawtext, node, p1=p1, p2=p2)
2323 2332
2324 2333 return self.addrawrevision(
2325 2334 rawtext,
2326 2335 transaction,
2327 2336 link,
2328 2337 p1,
2329 2338 p2,
2330 2339 node,
2331 2340 flags,
2332 2341 cachedelta=cachedelta,
2333 2342 deltacomputer=deltacomputer,
2334 2343 sidedata=sidedata,
2335 2344 )
2336 2345
2337 2346 def addrawrevision(
2338 2347 self,
2339 2348 rawtext,
2340 2349 transaction,
2341 2350 link,
2342 2351 p1,
2343 2352 p2,
2344 2353 node,
2345 2354 flags,
2346 2355 cachedelta=None,
2347 2356 deltacomputer=None,
2348 2357 sidedata=None,
2349 2358 ):
2350 2359 """add a raw revision with known flags, node and parents
2351 2360 useful when reusing a revision not stored in this revlog (ex: received
2352 2361 over wire, or read from an external bundle).
2353 2362 """
2354 2363 with self._writing(transaction):
2355 2364 return self._addrevision(
2356 2365 node,
2357 2366 rawtext,
2358 2367 transaction,
2359 2368 link,
2360 2369 p1,
2361 2370 p2,
2362 2371 flags,
2363 2372 cachedelta,
2364 2373 deltacomputer=deltacomputer,
2365 2374 sidedata=sidedata,
2366 2375 )
2367 2376
2368 2377 def compress(self, data):
2369 2378 """Generate a possibly-compressed representation of data."""
2370 2379 if not data:
2371 2380 return b'', data
2372 2381
2373 2382 compressed = self._compressor.compress(data)
2374 2383
2375 2384 if compressed:
2376 2385 # The revlog compressor added the header in the returned data.
2377 2386 return b'', compressed
2378 2387
2379 2388 if data[0:1] == b'\0':
2380 2389 return b'', data
2381 2390 return b'u', data
2382 2391
2383 2392 def decompress(self, data):
2384 2393 """Decompress a revlog chunk.
2385 2394
2386 2395 The chunk is expected to begin with a header identifying the
2387 2396 format type so it can be routed to an appropriate decompressor.
2388 2397 """
2389 2398 if not data:
2390 2399 return data
2391 2400
2392 2401 # Revlogs are read much more frequently than they are written and many
2393 2402 # chunks only take microseconds to decompress, so performance is
2394 2403 # important here.
2395 2404 #
2396 2405 # We can make a few assumptions about revlogs:
2397 2406 #
2398 2407 # 1) the majority of chunks will be compressed (as opposed to inline
2399 2408 # raw data).
2400 2409 # 2) decompressing *any* data will likely by at least 10x slower than
2401 2410 # returning raw inline data.
2402 2411 # 3) we want to prioritize common and officially supported compression
2403 2412 # engines
2404 2413 #
2405 2414 # It follows that we want to optimize for "decompress compressed data
2406 2415 # when encoded with common and officially supported compression engines"
2407 2416 # case over "raw data" and "data encoded by less common or non-official
2408 2417 # compression engines." That is why we have the inline lookup first
2409 2418 # followed by the compengines lookup.
2410 2419 #
2411 2420 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2412 2421 # compressed chunks. And this matters for changelog and manifest reads.
2413 2422 t = data[0:1]
2414 2423
2415 2424 if t == b'x':
2416 2425 try:
2417 2426 return _zlibdecompress(data)
2418 2427 except zlib.error as e:
2419 2428 raise error.RevlogError(
2420 2429 _(b'revlog decompress error: %s')
2421 2430 % stringutil.forcebytestr(e)
2422 2431 )
2423 2432 # '\0' is more common than 'u' so it goes first.
2424 2433 elif t == b'\0':
2425 2434 return data
2426 2435 elif t == b'u':
2427 2436 return util.buffer(data, 1)
2428 2437
2429 2438 compressor = self._get_decompressor(t)
2430 2439
2431 2440 return compressor.decompress(data)
2432 2441
2433 2442 def _addrevision(
2434 2443 self,
2435 2444 node,
2436 2445 rawtext,
2437 2446 transaction,
2438 2447 link,
2439 2448 p1,
2440 2449 p2,
2441 2450 flags,
2442 2451 cachedelta,
2443 2452 alwayscache=False,
2444 2453 deltacomputer=None,
2445 2454 sidedata=None,
2446 2455 ):
2447 2456 """internal function to add revisions to the log
2448 2457
2449 2458 see addrevision for argument descriptions.
2450 2459
2451 2460 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2452 2461
2453 2462 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2454 2463 be used.
2455 2464
2456 2465 invariants:
2457 2466 - rawtext is optional (can be None); if not set, cachedelta must be set.
2458 2467 if both are set, they must correspond to each other.
2459 2468 """
2460 2469 if node == self.nullid:
2461 2470 raise error.RevlogError(
2462 2471 _(b"%s: attempt to add null revision") % self.display_id
2463 2472 )
2464 2473 if (
2465 2474 node == self.nodeconstants.wdirid
2466 2475 or node in self.nodeconstants.wdirfilenodeids
2467 2476 ):
2468 2477 raise error.RevlogError(
2469 2478 _(b"%s: attempt to add wdir revision") % self.display_id
2470 2479 )
2471 2480 if self._writinghandles is None:
2472 2481 msg = b'adding revision outside `revlog._writing` context'
2473 2482 raise error.ProgrammingError(msg)
2474 2483
2475 2484 if self._inline:
2476 2485 fh = self._writinghandles[0]
2477 2486 else:
2478 2487 fh = self._writinghandles[1]
2479 2488
2480 2489 btext = [rawtext]
2481 2490
2482 2491 curr = len(self)
2483 2492 prev = curr - 1
2484 2493
2485 2494 offset = self._get_data_offset(prev)
2486 2495
2487 2496 if self._concurrencychecker:
2488 2497 ifh, dfh = self._writinghandles
2489 2498 if self._inline:
2490 2499 # offset is "as if" it were in the .d file, so we need to add on
2491 2500 # the size of the entry metadata.
2492 2501 self._concurrencychecker(
2493 2502 ifh, self._indexfile, offset + curr * self.index.entry_size
2494 2503 )
2495 2504 else:
2496 2505 # Entries in the .i are a consistent size.
2497 2506 self._concurrencychecker(
2498 2507 ifh, self._indexfile, curr * self.index.entry_size
2499 2508 )
2500 2509 self._concurrencychecker(dfh, self._datafile, offset)
2501 2510
2502 2511 p1r, p2r = self.rev(p1), self.rev(p2)
2503 2512
2504 2513 # full versions are inserted when the needed deltas
2505 2514 # become comparable to the uncompressed text
2506 2515 if rawtext is None:
2507 2516 # need rawtext size, before changed by flag processors, which is
2508 2517 # the non-raw size. use revlog explicitly to avoid filelog's extra
2509 2518 # logic that might remove metadata size.
2510 2519 textlen = mdiff.patchedsize(
2511 2520 revlog.size(self, cachedelta[0]), cachedelta[1]
2512 2521 )
2513 2522 else:
2514 2523 textlen = len(rawtext)
2515 2524
2516 2525 if deltacomputer is None:
2517 2526 deltacomputer = deltautil.deltacomputer(self)
2518 2527
2519 2528 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2520 2529
2521 2530 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2522 2531
2523 2532 compression_mode = COMP_MODE_INLINE
2524 2533 if self._docket is not None:
2525 2534 h, d = deltainfo.data
2526 2535 if not h and not d:
2527 2536 # not data to store at all... declare them uncompressed
2528 2537 compression_mode = COMP_MODE_PLAIN
2529 2538 elif not h:
2530 2539 t = d[0:1]
2531 2540 if t == b'\0':
2532 2541 compression_mode = COMP_MODE_PLAIN
2533 2542 elif t == self._docket.default_compression_header:
2534 2543 compression_mode = COMP_MODE_DEFAULT
2535 2544 elif h == b'u':
2536 2545 # we have a more efficient way to declare uncompressed
2537 2546 h = b''
2538 2547 compression_mode = COMP_MODE_PLAIN
2539 2548 deltainfo = deltautil.drop_u_compression(deltainfo)
2540 2549
2541 2550 sidedata_compression_mode = COMP_MODE_INLINE
2542 2551 if sidedata and self.hassidedata:
2543 2552 sidedata_compression_mode = COMP_MODE_PLAIN
2544 2553 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2545 2554 sidedata_offset = offset + deltainfo.deltalen
2546 2555 h, comp_sidedata = self.compress(serialized_sidedata)
2547 2556 if (
2548 2557 h != b'u'
2549 2558 and comp_sidedata[0:1] != b'\0'
2550 2559 and len(comp_sidedata) < len(serialized_sidedata)
2551 2560 ):
2552 2561 assert not h
2553 2562 if (
2554 2563 comp_sidedata[0:1]
2555 2564 == self._docket.default_compression_header
2556 2565 ):
2557 2566 sidedata_compression_mode = COMP_MODE_DEFAULT
2558 2567 serialized_sidedata = comp_sidedata
2559 2568 else:
2560 2569 sidedata_compression_mode = COMP_MODE_INLINE
2561 2570 serialized_sidedata = comp_sidedata
2562 2571 else:
2563 2572 serialized_sidedata = b""
2564 2573 # Don't store the offset if the sidedata is empty, that way
2565 2574 # we can easily detect empty sidedata and they will be no different
2566 2575 # than ones we manually add.
2567 2576 sidedata_offset = 0
2568 2577
2569 2578 e = (
2570 2579 offset_type(offset, flags),
2571 2580 deltainfo.deltalen,
2572 2581 textlen,
2573 2582 deltainfo.base,
2574 2583 link,
2575 2584 p1r,
2576 2585 p2r,
2577 2586 node,
2578 2587 sidedata_offset,
2579 2588 len(serialized_sidedata),
2580 2589 compression_mode,
2581 2590 sidedata_compression_mode,
2582 2591 )
2583 2592
2584 2593 self.index.append(e)
2585 2594 entry = self.index.entry_binary(curr)
2586 2595 if curr == 0 and self._docket is None:
2587 2596 header = self._format_flags | self._format_version
2588 2597 header = self.index.pack_header(header)
2589 2598 entry = header + entry
2590 2599 self._writeentry(
2591 2600 transaction,
2592 2601 entry,
2593 2602 deltainfo.data,
2594 2603 link,
2595 2604 offset,
2596 2605 serialized_sidedata,
2597 2606 )
2598 2607
2599 2608 rawtext = btext[0]
2600 2609
2601 2610 if alwayscache and rawtext is None:
2602 2611 rawtext = deltacomputer.buildtext(revinfo, fh)
2603 2612
2604 2613 if type(rawtext) == bytes: # only accept immutable objects
2605 2614 self._revisioncache = (node, curr, rawtext)
2606 2615 self._chainbasecache[curr] = deltainfo.chainbase
2607 2616 return curr
2608 2617
2609 2618 def _get_data_offset(self, prev):
2610 2619 """Returns the current offset in the (in-transaction) data file.
2611 2620 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2612 2621 file to store that information: since sidedata can be rewritten to the
2613 2622 end of the data file within a transaction, you can have cases where, for
2614 2623 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2615 2624 to `n - 1`'s sidedata being written after `n`'s data.
2616 2625
2617 2626 TODO cache this in a docket file before getting out of experimental."""
2618 2627 if self._docket is None:
2619 2628 return self.end(prev)
2620 2629 else:
2621 2630 return self._docket.data_end
2622 2631
2623 2632 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2624 2633 # Files opened in a+ mode have inconsistent behavior on various
2625 2634 # platforms. Windows requires that a file positioning call be made
2626 2635 # when the file handle transitions between reads and writes. See
2627 2636 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2628 2637 # platforms, Python or the platform itself can be buggy. Some versions
2629 2638 # of Solaris have been observed to not append at the end of the file
2630 2639 # if the file was seeked to before the end. See issue4943 for more.
2631 2640 #
2632 2641 # We work around this issue by inserting a seek() before writing.
2633 2642 # Note: This is likely not necessary on Python 3. However, because
2634 2643 # the file handle is reused for reads and may be seeked there, we need
2635 2644 # to be careful before changing this.
2636 2645 if self._writinghandles is None:
2637 2646 msg = b'adding revision outside `revlog._writing` context'
2638 2647 raise error.ProgrammingError(msg)
2639 2648 ifh, dfh = self._writinghandles
2640 2649 if self._docket is None:
2641 2650 ifh.seek(0, os.SEEK_END)
2642 2651 else:
2643 2652 ifh.seek(self._docket.index_end, os.SEEK_SET)
2644 2653 if dfh:
2645 2654 if self._docket is None:
2646 2655 dfh.seek(0, os.SEEK_END)
2647 2656 else:
2648 2657 dfh.seek(self._docket.data_end, os.SEEK_SET)
2649 2658
2650 2659 curr = len(self) - 1
2651 2660 if not self._inline:
2652 2661 transaction.add(self._datafile, offset)
2653 2662 transaction.add(self._indexfile, curr * len(entry))
2654 2663 if data[0]:
2655 2664 dfh.write(data[0])
2656 2665 dfh.write(data[1])
2657 2666 if sidedata:
2658 2667 dfh.write(sidedata)
2659 2668 ifh.write(entry)
2660 2669 else:
2661 2670 offset += curr * self.index.entry_size
2662 2671 transaction.add(self._indexfile, offset)
2663 2672 ifh.write(entry)
2664 2673 ifh.write(data[0])
2665 2674 ifh.write(data[1])
2666 2675 if sidedata:
2667 2676 ifh.write(sidedata)
2668 2677 self._enforceinlinesize(transaction)
2669 2678 if self._docket is not None:
2670 2679 self._docket.index_end = self._writinghandles[0].tell()
2671 2680 self._docket.data_end = self._writinghandles[1].tell()
2672 2681
2673 2682 nodemaputil.setup_persistent_nodemap(transaction, self)
2674 2683
2675 2684 def addgroup(
2676 2685 self,
2677 2686 deltas,
2678 2687 linkmapper,
2679 2688 transaction,
2680 2689 alwayscache=False,
2681 2690 addrevisioncb=None,
2682 2691 duplicaterevisioncb=None,
2683 2692 ):
2684 2693 """
2685 2694 add a delta group
2686 2695
2687 2696 given a set of deltas, add them to the revision log. the
2688 2697 first delta is against its parent, which should be in our
2689 2698 log, the rest are against the previous delta.
2690 2699
2691 2700 If ``addrevisioncb`` is defined, it will be called with arguments of
2692 2701 this revlog and the node that was added.
2693 2702 """
2694 2703
2695 2704 if self._adding_group:
2696 2705 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2697 2706
2698 2707 self._adding_group = True
2699 2708 empty = True
2700 2709 try:
2701 2710 with self._writing(transaction):
2702 2711 deltacomputer = deltautil.deltacomputer(self)
2703 2712 # loop through our set of deltas
2704 2713 for data in deltas:
2705 2714 (
2706 2715 node,
2707 2716 p1,
2708 2717 p2,
2709 2718 linknode,
2710 2719 deltabase,
2711 2720 delta,
2712 2721 flags,
2713 2722 sidedata,
2714 2723 ) = data
2715 2724 link = linkmapper(linknode)
2716 2725 flags = flags or REVIDX_DEFAULT_FLAGS
2717 2726
2718 2727 rev = self.index.get_rev(node)
2719 2728 if rev is not None:
2720 2729 # this can happen if two branches make the same change
2721 2730 self._nodeduplicatecallback(transaction, rev)
2722 2731 if duplicaterevisioncb:
2723 2732 duplicaterevisioncb(self, rev)
2724 2733 empty = False
2725 2734 continue
2726 2735
2727 2736 for p in (p1, p2):
2728 2737 if not self.index.has_node(p):
2729 2738 raise error.LookupError(
2730 2739 p, self.radix, _(b'unknown parent')
2731 2740 )
2732 2741
2733 2742 if not self.index.has_node(deltabase):
2734 2743 raise error.LookupError(
2735 2744 deltabase, self.display_id, _(b'unknown delta base')
2736 2745 )
2737 2746
2738 2747 baserev = self.rev(deltabase)
2739 2748
2740 2749 if baserev != nullrev and self.iscensored(baserev):
2741 2750 # if base is censored, delta must be full replacement in a
2742 2751 # single patch operation
2743 2752 hlen = struct.calcsize(b">lll")
2744 2753 oldlen = self.rawsize(baserev)
2745 2754 newlen = len(delta) - hlen
2746 2755 if delta[:hlen] != mdiff.replacediffheader(
2747 2756 oldlen, newlen
2748 2757 ):
2749 2758 raise error.CensoredBaseError(
2750 2759 self.display_id, self.node(baserev)
2751 2760 )
2752 2761
2753 2762 if not flags and self._peek_iscensored(baserev, delta):
2754 2763 flags |= REVIDX_ISCENSORED
2755 2764
2756 2765 # We assume consumers of addrevisioncb will want to retrieve
2757 2766 # the added revision, which will require a call to
2758 2767 # revision(). revision() will fast path if there is a cache
2759 2768 # hit. So, we tell _addrevision() to always cache in this case.
2760 2769 # We're only using addgroup() in the context of changegroup
2761 2770 # generation so the revision data can always be handled as raw
2762 2771 # by the flagprocessor.
2763 2772 rev = self._addrevision(
2764 2773 node,
2765 2774 None,
2766 2775 transaction,
2767 2776 link,
2768 2777 p1,
2769 2778 p2,
2770 2779 flags,
2771 2780 (baserev, delta),
2772 2781 alwayscache=alwayscache,
2773 2782 deltacomputer=deltacomputer,
2774 2783 sidedata=sidedata,
2775 2784 )
2776 2785
2777 2786 if addrevisioncb:
2778 2787 addrevisioncb(self, rev)
2779 2788 empty = False
2780 2789 finally:
2781 2790 self._adding_group = False
2782 2791 return not empty
2783 2792
2784 2793 def iscensored(self, rev):
2785 2794 """Check if a file revision is censored."""
2786 2795 if not self._censorable:
2787 2796 return False
2788 2797
2789 2798 return self.flags(rev) & REVIDX_ISCENSORED
2790 2799
2791 2800 def _peek_iscensored(self, baserev, delta):
2792 2801 """Quickly check if a delta produces a censored revision."""
2793 2802 if not self._censorable:
2794 2803 return False
2795 2804
2796 2805 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2797 2806
2798 2807 def getstrippoint(self, minlink):
2799 2808 """find the minimum rev that must be stripped to strip the linkrev
2800 2809
2801 2810 Returns a tuple containing the minimum rev and a set of all revs that
2802 2811 have linkrevs that will be broken by this strip.
2803 2812 """
2804 2813 return storageutil.resolvestripinfo(
2805 2814 minlink,
2806 2815 len(self) - 1,
2807 2816 self.headrevs(),
2808 2817 self.linkrev,
2809 2818 self.parentrevs,
2810 2819 )
2811 2820
2812 2821 def strip(self, minlink, transaction):
2813 2822 """truncate the revlog on the first revision with a linkrev >= minlink
2814 2823
2815 2824 This function is called when we're stripping revision minlink and
2816 2825 its descendants from the repository.
2817 2826
2818 2827 We have to remove all revisions with linkrev >= minlink, because
2819 2828 the equivalent changelog revisions will be renumbered after the
2820 2829 strip.
2821 2830
2822 2831 So we truncate the revlog on the first of these revisions, and
2823 2832 trust that the caller has saved the revisions that shouldn't be
2824 2833 removed and that it'll re-add them after this truncation.
2825 2834 """
2826 2835 if len(self) == 0:
2827 2836 return
2828 2837
2829 2838 rev, _ = self.getstrippoint(minlink)
2830 2839 if rev == len(self):
2831 2840 return
2832 2841
2833 2842 # first truncate the files on disk
2834 2843 data_end = self.start(rev)
2835 2844 if not self._inline:
2836 2845 transaction.add(self._datafile, data_end)
2837 2846 end = rev * self.index.entry_size
2838 2847 else:
2839 2848 end = data_end + (rev * self.index.entry_size)
2840 2849
2841 2850 transaction.add(self._indexfile, end)
2842 2851 if self._docket is not None:
2843 2852 # XXX we could, leverage the docket while stripping. However it is
2844 2853 # not powerfull enough at the time of this comment
2845 2854 self._docket.index_end = end
2846 2855 self._docket.data_end = data_end
2847 2856 self._docket.write(transaction, stripping=True)
2848 2857
2849 2858 # then reset internal state in memory to forget those revisions
2850 2859 self._revisioncache = None
2851 2860 self._chaininfocache = util.lrucachedict(500)
2852 2861 self._chunkclear()
2853 2862
2854 2863 del self.index[rev:-1]
2855 2864
2856 2865 def checksize(self):
2857 2866 """Check size of index and data files
2858 2867
2859 2868 return a (dd, di) tuple.
2860 2869 - dd: extra bytes for the "data" file
2861 2870 - di: extra bytes for the "index" file
2862 2871
2863 2872 A healthy revlog will return (0, 0).
2864 2873 """
2865 2874 expected = 0
2866 2875 if len(self):
2867 2876 expected = max(0, self.end(len(self) - 1))
2868 2877
2869 2878 try:
2870 2879 with self._datafp() as f:
2871 2880 f.seek(0, io.SEEK_END)
2872 2881 actual = f.tell()
2873 2882 dd = actual - expected
2874 2883 except IOError as inst:
2875 2884 if inst.errno != errno.ENOENT:
2876 2885 raise
2877 2886 dd = 0
2878 2887
2879 2888 try:
2880 2889 f = self.opener(self._indexfile)
2881 2890 f.seek(0, io.SEEK_END)
2882 2891 actual = f.tell()
2883 2892 f.close()
2884 2893 s = self.index.entry_size
2885 2894 i = max(0, actual // s)
2886 2895 di = actual - (i * s)
2887 2896 if self._inline:
2888 2897 databytes = 0
2889 2898 for r in self:
2890 2899 databytes += max(0, self.length(r))
2891 2900 dd = 0
2892 2901 di = actual - len(self) * s - databytes
2893 2902 except IOError as inst:
2894 2903 if inst.errno != errno.ENOENT:
2895 2904 raise
2896 2905 di = 0
2897 2906
2898 2907 return (dd, di)
2899 2908
2900 2909 def files(self):
2901 2910 res = [self._indexfile]
2902 2911 if not self._inline:
2903 2912 res.append(self._datafile)
2904 2913 return res
2905 2914
2906 2915 def emitrevisions(
2907 2916 self,
2908 2917 nodes,
2909 2918 nodesorder=None,
2910 2919 revisiondata=False,
2911 2920 assumehaveparentrevisions=False,
2912 2921 deltamode=repository.CG_DELTAMODE_STD,
2913 2922 sidedata_helpers=None,
2914 2923 ):
2915 2924 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2916 2925 raise error.ProgrammingError(
2917 2926 b'unhandled value for nodesorder: %s' % nodesorder
2918 2927 )
2919 2928
2920 2929 if nodesorder is None and not self._generaldelta:
2921 2930 nodesorder = b'storage'
2922 2931
2923 2932 if (
2924 2933 not self._storedeltachains
2925 2934 and deltamode != repository.CG_DELTAMODE_PREV
2926 2935 ):
2927 2936 deltamode = repository.CG_DELTAMODE_FULL
2928 2937
2929 2938 return storageutil.emitrevisions(
2930 2939 self,
2931 2940 nodes,
2932 2941 nodesorder,
2933 2942 revlogrevisiondelta,
2934 2943 deltaparentfn=self.deltaparent,
2935 2944 candeltafn=self.candelta,
2936 2945 rawsizefn=self.rawsize,
2937 2946 revdifffn=self.revdiff,
2938 2947 flagsfn=self.flags,
2939 2948 deltamode=deltamode,
2940 2949 revisiondata=revisiondata,
2941 2950 assumehaveparentrevisions=assumehaveparentrevisions,
2942 2951 sidedata_helpers=sidedata_helpers,
2943 2952 )
2944 2953
2945 2954 DELTAREUSEALWAYS = b'always'
2946 2955 DELTAREUSESAMEREVS = b'samerevs'
2947 2956 DELTAREUSENEVER = b'never'
2948 2957
2949 2958 DELTAREUSEFULLADD = b'fulladd'
2950 2959
2951 2960 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2952 2961
2953 2962 def clone(
2954 2963 self,
2955 2964 tr,
2956 2965 destrevlog,
2957 2966 addrevisioncb=None,
2958 2967 deltareuse=DELTAREUSESAMEREVS,
2959 2968 forcedeltabothparents=None,
2960 2969 sidedata_helpers=None,
2961 2970 ):
2962 2971 """Copy this revlog to another, possibly with format changes.
2963 2972
2964 2973 The destination revlog will contain the same revisions and nodes.
2965 2974 However, it may not be bit-for-bit identical due to e.g. delta encoding
2966 2975 differences.
2967 2976
2968 2977 The ``deltareuse`` argument control how deltas from the existing revlog
2969 2978 are preserved in the destination revlog. The argument can have the
2970 2979 following values:
2971 2980
2972 2981 DELTAREUSEALWAYS
2973 2982 Deltas will always be reused (if possible), even if the destination
2974 2983 revlog would not select the same revisions for the delta. This is the
2975 2984 fastest mode of operation.
2976 2985 DELTAREUSESAMEREVS
2977 2986 Deltas will be reused if the destination revlog would pick the same
2978 2987 revisions for the delta. This mode strikes a balance between speed
2979 2988 and optimization.
2980 2989 DELTAREUSENEVER
2981 2990 Deltas will never be reused. This is the slowest mode of execution.
2982 2991 This mode can be used to recompute deltas (e.g. if the diff/delta
2983 2992 algorithm changes).
2984 2993 DELTAREUSEFULLADD
2985 2994 Revision will be re-added as if their were new content. This is
2986 2995 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2987 2996 eg: large file detection and handling.
2988 2997
2989 2998 Delta computation can be slow, so the choice of delta reuse policy can
2990 2999 significantly affect run time.
2991 3000
2992 3001 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2993 3002 two extremes. Deltas will be reused if they are appropriate. But if the
2994 3003 delta could choose a better revision, it will do so. This means if you
2995 3004 are converting a non-generaldelta revlog to a generaldelta revlog,
2996 3005 deltas will be recomputed if the delta's parent isn't a parent of the
2997 3006 revision.
2998 3007
2999 3008 In addition to the delta policy, the ``forcedeltabothparents``
3000 3009 argument controls whether to force compute deltas against both parents
3001 3010 for merges. By default, the current default is used.
3002 3011
3003 3012 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3004 3013 `sidedata_helpers`.
3005 3014 """
3006 3015 if deltareuse not in self.DELTAREUSEALL:
3007 3016 raise ValueError(
3008 3017 _(b'value for deltareuse invalid: %s') % deltareuse
3009 3018 )
3010 3019
3011 3020 if len(destrevlog):
3012 3021 raise ValueError(_(b'destination revlog is not empty'))
3013 3022
3014 3023 if getattr(self, 'filteredrevs', None):
3015 3024 raise ValueError(_(b'source revlog has filtered revisions'))
3016 3025 if getattr(destrevlog, 'filteredrevs', None):
3017 3026 raise ValueError(_(b'destination revlog has filtered revisions'))
3018 3027
3019 3028 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3020 3029 # if possible.
3021 3030 oldlazydelta = destrevlog._lazydelta
3022 3031 oldlazydeltabase = destrevlog._lazydeltabase
3023 3032 oldamd = destrevlog._deltabothparents
3024 3033
3025 3034 try:
3026 3035 if deltareuse == self.DELTAREUSEALWAYS:
3027 3036 destrevlog._lazydeltabase = True
3028 3037 destrevlog._lazydelta = True
3029 3038 elif deltareuse == self.DELTAREUSESAMEREVS:
3030 3039 destrevlog._lazydeltabase = False
3031 3040 destrevlog._lazydelta = True
3032 3041 elif deltareuse == self.DELTAREUSENEVER:
3033 3042 destrevlog._lazydeltabase = False
3034 3043 destrevlog._lazydelta = False
3035 3044
3036 3045 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3037 3046
3038 3047 self._clone(
3039 3048 tr,
3040 3049 destrevlog,
3041 3050 addrevisioncb,
3042 3051 deltareuse,
3043 3052 forcedeltabothparents,
3044 3053 sidedata_helpers,
3045 3054 )
3046 3055
3047 3056 finally:
3048 3057 destrevlog._lazydelta = oldlazydelta
3049 3058 destrevlog._lazydeltabase = oldlazydeltabase
3050 3059 destrevlog._deltabothparents = oldamd
3051 3060
3052 3061 def _clone(
3053 3062 self,
3054 3063 tr,
3055 3064 destrevlog,
3056 3065 addrevisioncb,
3057 3066 deltareuse,
3058 3067 forcedeltabothparents,
3059 3068 sidedata_helpers,
3060 3069 ):
3061 3070 """perform the core duty of `revlog.clone` after parameter processing"""
3062 3071 deltacomputer = deltautil.deltacomputer(destrevlog)
3063 3072 index = self.index
3064 3073 for rev in self:
3065 3074 entry = index[rev]
3066 3075
3067 3076 # Some classes override linkrev to take filtered revs into
3068 3077 # account. Use raw entry from index.
3069 3078 flags = entry[0] & 0xFFFF
3070 3079 linkrev = entry[4]
3071 3080 p1 = index[entry[5]][7]
3072 3081 p2 = index[entry[6]][7]
3073 3082 node = entry[7]
3074 3083
3075 3084 # (Possibly) reuse the delta from the revlog if allowed and
3076 3085 # the revlog chunk is a delta.
3077 3086 cachedelta = None
3078 3087 rawtext = None
3079 3088 if deltareuse == self.DELTAREUSEFULLADD:
3080 3089 text, sidedata = self._revisiondata(rev)
3081 3090
3082 3091 if sidedata_helpers is not None:
3083 3092 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3084 3093 self, sidedata_helpers, sidedata, rev
3085 3094 )
3086 3095 flags = flags | new_flags[0] & ~new_flags[1]
3087 3096
3088 3097 destrevlog.addrevision(
3089 3098 text,
3090 3099 tr,
3091 3100 linkrev,
3092 3101 p1,
3093 3102 p2,
3094 3103 cachedelta=cachedelta,
3095 3104 node=node,
3096 3105 flags=flags,
3097 3106 deltacomputer=deltacomputer,
3098 3107 sidedata=sidedata,
3099 3108 )
3100 3109 else:
3101 3110 if destrevlog._lazydelta:
3102 3111 dp = self.deltaparent(rev)
3103 3112 if dp != nullrev:
3104 3113 cachedelta = (dp, bytes(self._chunk(rev)))
3105 3114
3106 3115 sidedata = None
3107 3116 if not cachedelta:
3108 3117 rawtext, sidedata = self._revisiondata(rev)
3109 3118 if sidedata is None:
3110 3119 sidedata = self.sidedata(rev)
3111 3120
3112 3121 if sidedata_helpers is not None:
3113 3122 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3114 3123 self, sidedata_helpers, sidedata, rev
3115 3124 )
3116 3125 flags = flags | new_flags[0] & ~new_flags[1]
3117 3126
3118 3127 with destrevlog._writing(tr):
3119 3128 destrevlog._addrevision(
3120 3129 node,
3121 3130 rawtext,
3122 3131 tr,
3123 3132 linkrev,
3124 3133 p1,
3125 3134 p2,
3126 3135 flags,
3127 3136 cachedelta,
3128 3137 deltacomputer=deltacomputer,
3129 3138 sidedata=sidedata,
3130 3139 )
3131 3140
3132 3141 if addrevisioncb:
3133 3142 addrevisioncb(self, rev, node)
3134 3143
3135 3144 def censorrevision(self, tr, censornode, tombstone=b''):
3136 3145 if self._format_version == REVLOGV0:
3137 3146 raise error.RevlogError(
3138 3147 _(b'cannot censor with version %d revlogs')
3139 3148 % self._format_version
3140 3149 )
3141 3150
3142 3151 censorrev = self.rev(censornode)
3143 3152 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3144 3153
3145 3154 if len(tombstone) > self.rawsize(censorrev):
3146 3155 raise error.Abort(
3147 3156 _(b'censor tombstone must be no longer than censored data')
3148 3157 )
3149 3158
3150 3159 # Rewriting the revlog in place is hard. Our strategy for censoring is
3151 3160 # to create a new revlog, copy all revisions to it, then replace the
3152 3161 # revlogs on transaction close.
3153 3162 #
3154 3163 # This is a bit dangerous. We could easily have a mismatch of state.
3155 3164 newrl = revlog(
3156 3165 self.opener,
3157 3166 target=self.target,
3158 3167 radix=self.radix,
3159 3168 postfix=b'tmpcensored',
3160 3169 censorable=True,
3161 3170 )
3162 3171 newrl._format_version = self._format_version
3163 3172 newrl._format_flags = self._format_flags
3164 3173 newrl._generaldelta = self._generaldelta
3165 3174 newrl._parse_index = self._parse_index
3166 3175
3167 3176 for rev in self.revs():
3168 3177 node = self.node(rev)
3169 3178 p1, p2 = self.parents(node)
3170 3179
3171 3180 if rev == censorrev:
3172 3181 newrl.addrawrevision(
3173 3182 tombstone,
3174 3183 tr,
3175 3184 self.linkrev(censorrev),
3176 3185 p1,
3177 3186 p2,
3178 3187 censornode,
3179 3188 REVIDX_ISCENSORED,
3180 3189 )
3181 3190
3182 3191 if newrl.deltaparent(rev) != nullrev:
3183 3192 raise error.Abort(
3184 3193 _(
3185 3194 b'censored revision stored as delta; '
3186 3195 b'cannot censor'
3187 3196 ),
3188 3197 hint=_(
3189 3198 b'censoring of revlogs is not '
3190 3199 b'fully implemented; please report '
3191 3200 b'this bug'
3192 3201 ),
3193 3202 )
3194 3203 continue
3195 3204
3196 3205 if self.iscensored(rev):
3197 3206 if self.deltaparent(rev) != nullrev:
3198 3207 raise error.Abort(
3199 3208 _(
3200 3209 b'cannot censor due to censored '
3201 3210 b'revision having delta stored'
3202 3211 )
3203 3212 )
3204 3213 rawtext = self._chunk(rev)
3205 3214 else:
3206 3215 rawtext = self.rawdata(rev)
3207 3216
3208 3217 newrl.addrawrevision(
3209 3218 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3210 3219 )
3211 3220
3212 3221 tr.addbackup(self._indexfile, location=b'store')
3213 3222 if not self._inline:
3214 3223 tr.addbackup(self._datafile, location=b'store')
3215 3224
3216 3225 self.opener.rename(newrl._indexfile, self._indexfile)
3217 3226 if not self._inline:
3218 3227 self.opener.rename(newrl._datafile, self._datafile)
3219 3228
3220 3229 self.clearcaches()
3221 3230 self._loadindex()
3222 3231
3223 3232 def verifyintegrity(self, state):
3224 3233 """Verifies the integrity of the revlog.
3225 3234
3226 3235 Yields ``revlogproblem`` instances describing problems that are
3227 3236 found.
3228 3237 """
3229 3238 dd, di = self.checksize()
3230 3239 if dd:
3231 3240 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3232 3241 if di:
3233 3242 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3234 3243
3235 3244 version = self._format_version
3236 3245
3237 3246 # The verifier tells us what version revlog we should be.
3238 3247 if version != state[b'expectedversion']:
3239 3248 yield revlogproblem(
3240 3249 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3241 3250 % (self.display_id, version, state[b'expectedversion'])
3242 3251 )
3243 3252
3244 3253 state[b'skipread'] = set()
3245 3254 state[b'safe_renamed'] = set()
3246 3255
3247 3256 for rev in self:
3248 3257 node = self.node(rev)
3249 3258
3250 3259 # Verify contents. 4 cases to care about:
3251 3260 #
3252 3261 # common: the most common case
3253 3262 # rename: with a rename
3254 3263 # meta: file content starts with b'\1\n', the metadata
3255 3264 # header defined in filelog.py, but without a rename
3256 3265 # ext: content stored externally
3257 3266 #
3258 3267 # More formally, their differences are shown below:
3259 3268 #
3260 3269 # | common | rename | meta | ext
3261 3270 # -------------------------------------------------------
3262 3271 # flags() | 0 | 0 | 0 | not 0
3263 3272 # renamed() | False | True | False | ?
3264 3273 # rawtext[0:2]=='\1\n'| False | True | True | ?
3265 3274 #
3266 3275 # "rawtext" means the raw text stored in revlog data, which
3267 3276 # could be retrieved by "rawdata(rev)". "text"
3268 3277 # mentioned below is "revision(rev)".
3269 3278 #
3270 3279 # There are 3 different lengths stored physically:
3271 3280 # 1. L1: rawsize, stored in revlog index
3272 3281 # 2. L2: len(rawtext), stored in revlog data
3273 3282 # 3. L3: len(text), stored in revlog data if flags==0, or
3274 3283 # possibly somewhere else if flags!=0
3275 3284 #
3276 3285 # L1 should be equal to L2. L3 could be different from them.
3277 3286 # "text" may or may not affect commit hash depending on flag
3278 3287 # processors (see flagutil.addflagprocessor).
3279 3288 #
3280 3289 # | common | rename | meta | ext
3281 3290 # -------------------------------------------------
3282 3291 # rawsize() | L1 | L1 | L1 | L1
3283 3292 # size() | L1 | L2-LM | L1(*) | L1 (?)
3284 3293 # len(rawtext) | L2 | L2 | L2 | L2
3285 3294 # len(text) | L2 | L2 | L2 | L3
3286 3295 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3287 3296 #
3288 3297 # LM: length of metadata, depending on rawtext
3289 3298 # (*): not ideal, see comment in filelog.size
3290 3299 # (?): could be "- len(meta)" if the resolved content has
3291 3300 # rename metadata
3292 3301 #
3293 3302 # Checks needed to be done:
3294 3303 # 1. length check: L1 == L2, in all cases.
3295 3304 # 2. hash check: depending on flag processor, we may need to
3296 3305 # use either "text" (external), or "rawtext" (in revlog).
3297 3306
3298 3307 try:
3299 3308 skipflags = state.get(b'skipflags', 0)
3300 3309 if skipflags:
3301 3310 skipflags &= self.flags(rev)
3302 3311
3303 3312 _verify_revision(self, skipflags, state, node)
3304 3313
3305 3314 l1 = self.rawsize(rev)
3306 3315 l2 = len(self.rawdata(node))
3307 3316
3308 3317 if l1 != l2:
3309 3318 yield revlogproblem(
3310 3319 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3311 3320 node=node,
3312 3321 )
3313 3322
3314 3323 except error.CensoredNodeError:
3315 3324 if state[b'erroroncensored']:
3316 3325 yield revlogproblem(
3317 3326 error=_(b'censored file data'), node=node
3318 3327 )
3319 3328 state[b'skipread'].add(node)
3320 3329 except Exception as e:
3321 3330 yield revlogproblem(
3322 3331 error=_(b'unpacking %s: %s')
3323 3332 % (short(node), stringutil.forcebytestr(e)),
3324 3333 node=node,
3325 3334 )
3326 3335 state[b'skipread'].add(node)
3327 3336
3328 3337 def storageinfo(
3329 3338 self,
3330 3339 exclusivefiles=False,
3331 3340 sharedfiles=False,
3332 3341 revisionscount=False,
3333 3342 trackedsize=False,
3334 3343 storedsize=False,
3335 3344 ):
3336 3345 d = {}
3337 3346
3338 3347 if exclusivefiles:
3339 3348 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3340 3349 if not self._inline:
3341 3350 d[b'exclusivefiles'].append((self.opener, self._datafile))
3342 3351
3343 3352 if sharedfiles:
3344 3353 d[b'sharedfiles'] = []
3345 3354
3346 3355 if revisionscount:
3347 3356 d[b'revisionscount'] = len(self)
3348 3357
3349 3358 if trackedsize:
3350 3359 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3351 3360
3352 3361 if storedsize:
3353 3362 d[b'storedsize'] = sum(
3354 3363 self.opener.stat(path).st_size for path in self.files()
3355 3364 )
3356 3365
3357 3366 return d
3358 3367
3359 3368 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3360 3369 if not self.hassidedata:
3361 3370 return
3362 3371 # revlog formats with sidedata support does not support inline
3363 3372 assert not self._inline
3364 3373 if not helpers[1] and not helpers[2]:
3365 3374 # Nothing to generate or remove
3366 3375 return
3367 3376
3368 3377 new_entries = []
3369 3378 # append the new sidedata
3370 3379 with self._writing(transaction):
3371 3380 ifh, dfh = self._writinghandles
3372 3381 if self._docket is not None:
3373 3382 dfh.seek(self._docket.data_end, os.SEEK_SET)
3374 3383 else:
3375 3384 dfh.seek(0, os.SEEK_END)
3376 3385
3377 3386 current_offset = dfh.tell()
3378 3387 for rev in range(startrev, endrev + 1):
3379 3388 entry = self.index[rev]
3380 3389 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3381 3390 store=self,
3382 3391 sidedata_helpers=helpers,
3383 3392 sidedata={},
3384 3393 rev=rev,
3385 3394 )
3386 3395
3387 3396 serialized_sidedata = sidedatautil.serialize_sidedata(
3388 3397 new_sidedata
3389 3398 )
3390 3399
3391 3400 sidedata_compression_mode = COMP_MODE_INLINE
3392 3401 if serialized_sidedata and self.hassidedata:
3393 3402 sidedata_compression_mode = COMP_MODE_PLAIN
3394 3403 h, comp_sidedata = self.compress(serialized_sidedata)
3395 3404 if (
3396 3405 h != b'u'
3397 3406 and comp_sidedata[0] != b'\0'
3398 3407 and len(comp_sidedata) < len(serialized_sidedata)
3399 3408 ):
3400 3409 assert not h
3401 3410 if (
3402 3411 comp_sidedata[0]
3403 3412 == self._docket.default_compression_header
3404 3413 ):
3405 3414 sidedata_compression_mode = COMP_MODE_DEFAULT
3406 3415 serialized_sidedata = comp_sidedata
3407 3416 else:
3408 3417 sidedata_compression_mode = COMP_MODE_INLINE
3409 3418 serialized_sidedata = comp_sidedata
3410 3419 if entry[8] != 0 or entry[9] != 0:
3411 3420 # rewriting entries that already have sidedata is not
3412 3421 # supported yet, because it introduces garbage data in the
3413 3422 # revlog.
3414 3423 msg = b"rewriting existing sidedata is not supported yet"
3415 3424 raise error.Abort(msg)
3416 3425
3417 3426 # Apply (potential) flags to add and to remove after running
3418 3427 # the sidedata helpers
3419 3428 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3420 3429 entry_update = (
3421 3430 current_offset,
3422 3431 len(serialized_sidedata),
3423 3432 new_offset_flags,
3424 3433 sidedata_compression_mode,
3425 3434 )
3426 3435
3427 3436 # the sidedata computation might have move the file cursors around
3428 3437 dfh.seek(current_offset, os.SEEK_SET)
3429 3438 dfh.write(serialized_sidedata)
3430 3439 new_entries.append(entry_update)
3431 3440 current_offset += len(serialized_sidedata)
3432 3441 if self._docket is not None:
3433 3442 self._docket.data_end = dfh.tell()
3434 3443
3435 3444 # rewrite the new index entries
3436 3445 ifh.seek(startrev * self.index.entry_size)
3437 3446 for i, e in enumerate(new_entries):
3438 3447 rev = startrev + i
3439 3448 self.index.replace_sidedata_info(rev, *e)
3440 3449 packed = self.index.entry_binary(rev)
3441 3450 if rev == 0 and self._docket is None:
3442 3451 header = self._format_flags | self._format_version
3443 3452 header = self.index.pack_header(header)
3444 3453 packed = header + packed
3445 3454 ifh.write(packed)
@@ -1,190 +1,204
1 1 # revlogdeltas.py - constant used for revlog logic.
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2018 Octobus <contact@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """Helper class to compute deltas stored inside revlogs"""
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import struct
13 13
14 14 from ..interfaces import repository
15 15
16 16 ### Internal utily constants
17 17
18 18 KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
19 19 KIND_MANIFESTLOG = 1002
20 20 KIND_FILELOG = 1003
21 21 KIND_OTHER = 1004
22 22
23 23 ALL_KINDS = {
24 24 KIND_CHANGELOG,
25 25 KIND_MANIFESTLOG,
26 26 KIND_FILELOG,
27 27 KIND_OTHER,
28 28 }
29 29
30 30 ### main revlog header
31 31
32 32 INDEX_HEADER = struct.Struct(b">I")
33 33
34 34 ## revlog version
35 35 REVLOGV0 = 0
36 36 REVLOGV1 = 1
37 37 # Dummy value until file format is finalized.
38 38 REVLOGV2 = 0xDEAD
39 39 # Dummy value until file format is finalized.
40 40 CHANGELOGV2 = 0xD34D
41 41
42 42 ## global revlog header flags
43 43 # Shared across v1 and v2.
44 44 FLAG_INLINE_DATA = 1 << 16
45 45 # Only used by v1, implied by v2.
46 46 FLAG_GENERALDELTA = 1 << 17
47 47 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
48 48 REVLOG_DEFAULT_FORMAT = REVLOGV1
49 49 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
50 50 REVLOGV0_FLAGS = 0
51 51 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
52 52 REVLOGV2_FLAGS = FLAG_INLINE_DATA
53 53 CHANGELOGV2_FLAGS = 0
54 54
55 55 ### individual entry
56 56
57 57 ## index v0:
58 58 # 4 bytes: offset
59 59 # 4 bytes: compressed length
60 60 # 4 bytes: base rev
61 61 # 4 bytes: link rev
62 62 # 20 bytes: parent 1 nodeid
63 63 # 20 bytes: parent 2 nodeid
64 64 # 20 bytes: nodeid
65 65 INDEX_ENTRY_V0 = struct.Struct(b">4l20s20s20s")
66 66
67 67 ## index v1
68 68 # 6 bytes: offset
69 69 # 2 bytes: flags
70 70 # 4 bytes: compressed length
71 71 # 4 bytes: uncompressed length
72 72 # 4 bytes: base rev
73 73 # 4 bytes: link rev
74 74 # 4 bytes: parent 1 rev
75 75 # 4 bytes: parent 2 rev
76 76 # 32 bytes: nodeid
77 77 INDEX_ENTRY_V1 = struct.Struct(b">Qiiiiii20s12x")
78 78 assert INDEX_ENTRY_V1.size == 32 * 2
79 79
80 80 # 6 bytes: offset
81 81 # 2 bytes: flags
82 82 # 4 bytes: compressed length
83 83 # 4 bytes: uncompressed length
84 84 # 4 bytes: base rev
85 85 # 4 bytes: link rev
86 86 # 4 bytes: parent 1 rev
87 87 # 4 bytes: parent 2 rev
88 88 # 32 bytes: nodeid
89 89 # 8 bytes: sidedata offset
90 90 # 4 bytes: sidedata compressed length
91 91 # 1 bytes: compression mode (2 lower bit are data_compression_mode)
92 92 # 19 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
93 93 INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQiB19x")
94 94 assert INDEX_ENTRY_V2.size == 32 * 3, INDEX_ENTRY_V2.size
95 95
96 # 6 bytes: offset
97 # 2 bytes: flags
98 # 4 bytes: compressed length
99 # 4 bytes: uncompressed length
100 # 4 bytes: parent 1 rev
101 # 4 bytes: parent 2 rev
102 # 32 bytes: nodeid
103 # 8 bytes: sidedata offset
104 # 4 bytes: sidedata compressed length
105 # 1 bytes: compression mode (2 lower bit are data_compression_mode)
106 # 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
107 INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x")
108 assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size
109
96 110 # revlog index flags
97 111
98 112 # For historical reasons, revlog's internal flags were exposed via the
99 113 # wire protocol and are even exposed in parts of the storage APIs.
100 114
101 115 # revision has censor metadata, must be verified
102 116 REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
103 117 # revision hash does not match data (narrowhg)
104 118 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
105 119 # revision data is stored externally
106 120 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
107 121 # revision changes files in a way that could affect copy tracing.
108 122 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
109 123 REVIDX_DEFAULT_FLAGS = 0
110 124 # stable order in which flags need to be processed and their processors applied
111 125 REVIDX_FLAGS_ORDER = [
112 126 REVIDX_ISCENSORED,
113 127 REVIDX_ELLIPSIS,
114 128 REVIDX_EXTSTORED,
115 129 REVIDX_HASCOPIESINFO,
116 130 ]
117 131
118 132 # bitmark for flags that could cause rawdata content change
119 133 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
120 134
121 135 ## chunk compression mode constants:
122 136 # These constants are used in revlog version >=2 to denote the compression used
123 137 # for a chunk.
124 138
125 139 # Chunk use no compression, the data stored on disk can be directly use as
126 140 # chunk value. Without any header information prefixed.
127 141 COMP_MODE_PLAIN = 0
128 142
129 143 # Chunk use the "default compression" for the revlog (usually defined in the
130 144 # revlog docket). A header is still used.
131 145 #
132 146 # XXX: keeping a header is probably not useful and we should probably drop it.
133 147 #
134 148 # XXX: The value of allow mixed type of compression in the revlog is unclear
135 149 # and we should consider making PLAIN/DEFAULT the only available mode for
136 150 # revlog v2, disallowing INLINE mode.
137 151 COMP_MODE_DEFAULT = 1
138 152
139 153 # Chunk use a compression mode stored "inline" at the start of the chunk
140 154 # itself. This is the mode always used for revlog version "0" and "1"
141 155 COMP_MODE_INLINE = 2
142 156
143 157 SUPPORTED_FLAGS = {
144 158 REVLOGV0: REVLOGV0_FLAGS,
145 159 REVLOGV1: REVLOGV1_FLAGS,
146 160 REVLOGV2: REVLOGV2_FLAGS,
147 161 CHANGELOGV2: CHANGELOGV2_FLAGS,
148 162 }
149 163
150 164 _no = lambda flags: False
151 165 _yes = lambda flags: True
152 166
153 167
154 168 def _from_flag(flag):
155 169 return lambda flags: bool(flags & flag)
156 170
157 171
158 172 FEATURES_BY_VERSION = {
159 173 REVLOGV0: {
160 174 b'inline': _no,
161 175 b'generaldelta': _no,
162 176 b'sidedata': False,
163 177 b'docket': False,
164 178 },
165 179 REVLOGV1: {
166 180 b'inline': _from_flag(FLAG_INLINE_DATA),
167 181 b'generaldelta': _from_flag(FLAG_GENERALDELTA),
168 182 b'sidedata': False,
169 183 b'docket': False,
170 184 },
171 185 REVLOGV2: {
172 186 # The point of inline-revlog is to reduce the number of files used in
173 187 # the store. Using a docket defeat this purpose. So we needs other
174 188 # means to reduce the number of files for revlogv2.
175 189 b'inline': _no,
176 190 b'generaldelta': _yes,
177 191 b'sidedata': True,
178 192 b'docket': True,
179 193 },
180 194 CHANGELOGV2: {
181 195 b'inline': _no,
182 196 # General delta is useless for changelog since we don't do any delta
183 197 b'generaldelta': _no,
184 198 b'sidedata': True,
185 199 b'docket': True,
186 200 },
187 201 }
188 202
189 203
190 204 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
General Comments 0
You need to be logged in to leave comments. Login now