##// END OF EJS Templates
delta-find: add a way to control the number of bases tested at the same time...
marmoute -
r50552:f5f113f1 default
parent child Browse files
Show More
@@ -1,2896 +1,2901 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10 import re
11 11
12 12 from . import (
13 13 encoding,
14 14 error,
15 15 )
16 16
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config=b'warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31
32 32 class configitem:
33 33 """represent a known config item
34 34
35 35 :section: the official config section where to find this item,
36 36 :name: the official name within the section,
37 37 :default: default value for this item,
38 38 :alias: optional list of tuples as alternatives,
39 39 :generic: this is a generic definition, match name using regular expression.
40 40 """
41 41
42 42 def __init__(
43 43 self,
44 44 section,
45 45 name,
46 46 default=None,
47 47 alias=(),
48 48 generic=False,
49 49 priority=0,
50 50 experimental=False,
51 51 ):
52 52 self.section = section
53 53 self.name = name
54 54 self.default = default
55 55 self.alias = list(alias)
56 56 self.generic = generic
57 57 self.priority = priority
58 58 self.experimental = experimental
59 59 self._re = None
60 60 if generic:
61 61 self._re = re.compile(self.name)
62 62
63 63
64 64 class itemregister(dict):
65 65 """A specialized dictionary that can handle wild-card selection"""
66 66
67 67 def __init__(self):
68 68 super(itemregister, self).__init__()
69 69 self._generics = set()
70 70
71 71 def update(self, other):
72 72 super(itemregister, self).update(other)
73 73 self._generics.update(other._generics)
74 74
75 75 def __setitem__(self, key, item):
76 76 super(itemregister, self).__setitem__(key, item)
77 77 if item.generic:
78 78 self._generics.add(item)
79 79
80 80 def get(self, key):
81 81 baseitem = super(itemregister, self).get(key)
82 82 if baseitem is not None and not baseitem.generic:
83 83 return baseitem
84 84
85 85 # search for a matching generic item
86 86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 87 for item in generics:
88 88 # we use 'match' instead of 'search' to make the matching simpler
89 89 # for people unfamiliar with regular expression. Having the match
90 90 # rooted to the start of the string will produce less surprising
91 91 # result for user writing simple regex for sub-attribute.
92 92 #
93 93 # For example using "color\..*" match produces an unsurprising
94 94 # result, while using search could suddenly match apparently
95 95 # unrelated configuration that happens to contains "color."
96 96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 97 # some match to avoid the need to prefix most pattern with "^".
98 98 # The "^" seems more error prone.
99 99 if item._re.match(key):
100 100 return item
101 101
102 102 return None
103 103
104 104
105 105 coreitems = {}
106 106
107 107
108 108 def _register(configtable, *args, **kwargs):
109 109 item = configitem(*args, **kwargs)
110 110 section = configtable.setdefault(item.section, itemregister())
111 111 if item.name in section:
112 112 msg = b"duplicated config item registration for '%s.%s'"
113 113 raise error.ProgrammingError(msg % (item.section, item.name))
114 114 section[item.name] = item
115 115
116 116
117 117 # special value for case where the default is derived from other values
118 118 dynamicdefault = object()
119 119
120 120 # Registering actual config items
121 121
122 122
123 123 def getitemregister(configtable):
124 124 f = functools.partial(_register, configtable)
125 125 # export pseudo enum as configitem.*
126 126 f.dynamicdefault = dynamicdefault
127 127 return f
128 128
129 129
130 130 coreconfigitem = getitemregister(coreitems)
131 131
132 132
133 133 def _registerdiffopts(section, configprefix=b''):
134 134 coreconfigitem(
135 135 section,
136 136 configprefix + b'nodates',
137 137 default=False,
138 138 )
139 139 coreconfigitem(
140 140 section,
141 141 configprefix + b'showfunc',
142 142 default=False,
143 143 )
144 144 coreconfigitem(
145 145 section,
146 146 configprefix + b'unified',
147 147 default=None,
148 148 )
149 149 coreconfigitem(
150 150 section,
151 151 configprefix + b'git',
152 152 default=False,
153 153 )
154 154 coreconfigitem(
155 155 section,
156 156 configprefix + b'ignorews',
157 157 default=False,
158 158 )
159 159 coreconfigitem(
160 160 section,
161 161 configprefix + b'ignorewsamount',
162 162 default=False,
163 163 )
164 164 coreconfigitem(
165 165 section,
166 166 configprefix + b'ignoreblanklines',
167 167 default=False,
168 168 )
169 169 coreconfigitem(
170 170 section,
171 171 configprefix + b'ignorewseol',
172 172 default=False,
173 173 )
174 174 coreconfigitem(
175 175 section,
176 176 configprefix + b'nobinary',
177 177 default=False,
178 178 )
179 179 coreconfigitem(
180 180 section,
181 181 configprefix + b'noprefix',
182 182 default=False,
183 183 )
184 184 coreconfigitem(
185 185 section,
186 186 configprefix + b'word-diff',
187 187 default=False,
188 188 )
189 189
190 190
191 191 coreconfigitem(
192 192 b'alias',
193 193 b'.*',
194 194 default=dynamicdefault,
195 195 generic=True,
196 196 )
197 197 coreconfigitem(
198 198 b'auth',
199 199 b'cookiefile',
200 200 default=None,
201 201 )
202 202 _registerdiffopts(section=b'annotate')
203 203 # bookmarks.pushing: internal hack for discovery
204 204 coreconfigitem(
205 205 b'bookmarks',
206 206 b'pushing',
207 207 default=list,
208 208 )
209 209 # bundle.mainreporoot: internal hack for bundlerepo
210 210 coreconfigitem(
211 211 b'bundle',
212 212 b'mainreporoot',
213 213 default=b'',
214 214 )
215 215 coreconfigitem(
216 216 b'censor',
217 217 b'policy',
218 218 default=b'abort',
219 219 experimental=True,
220 220 )
221 221 coreconfigitem(
222 222 b'chgserver',
223 223 b'idletimeout',
224 224 default=3600,
225 225 )
226 226 coreconfigitem(
227 227 b'chgserver',
228 228 b'skiphash',
229 229 default=False,
230 230 )
231 231 coreconfigitem(
232 232 b'cmdserver',
233 233 b'log',
234 234 default=None,
235 235 )
236 236 coreconfigitem(
237 237 b'cmdserver',
238 238 b'max-log-files',
239 239 default=7,
240 240 )
241 241 coreconfigitem(
242 242 b'cmdserver',
243 243 b'max-log-size',
244 244 default=b'1 MB',
245 245 )
246 246 coreconfigitem(
247 247 b'cmdserver',
248 248 b'max-repo-cache',
249 249 default=0,
250 250 experimental=True,
251 251 )
252 252 coreconfigitem(
253 253 b'cmdserver',
254 254 b'message-encodings',
255 255 default=list,
256 256 )
257 257 coreconfigitem(
258 258 b'cmdserver',
259 259 b'track-log',
260 260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 261 )
262 262 coreconfigitem(
263 263 b'cmdserver',
264 264 b'shutdown-on-interrupt',
265 265 default=True,
266 266 )
267 267 coreconfigitem(
268 268 b'color',
269 269 b'.*',
270 270 default=None,
271 271 generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'color',
275 275 b'mode',
276 276 default=b'auto',
277 277 )
278 278 coreconfigitem(
279 279 b'color',
280 280 b'pagermode',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem(
284 284 b'command-templates',
285 285 b'graphnode',
286 286 default=None,
287 287 alias=[(b'ui', b'graphnodetemplate')],
288 288 )
289 289 coreconfigitem(
290 290 b'command-templates',
291 291 b'log',
292 292 default=None,
293 293 alias=[(b'ui', b'logtemplate')],
294 294 )
295 295 coreconfigitem(
296 296 b'command-templates',
297 297 b'mergemarker',
298 298 default=(
299 299 b'{node|short} '
300 300 b'{ifeq(tags, "tip", "", '
301 301 b'ifeq(tags, "", "", "{tags} "))}'
302 302 b'{if(bookmarks, "{bookmarks} ")}'
303 303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 304 b'- {author|user}: {desc|firstline}'
305 305 ),
306 306 alias=[(b'ui', b'mergemarkertemplate')],
307 307 )
308 308 coreconfigitem(
309 309 b'command-templates',
310 310 b'pre-merge-tool-output',
311 311 default=None,
312 312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 313 )
314 314 coreconfigitem(
315 315 b'command-templates',
316 316 b'oneline-summary',
317 317 default=None,
318 318 )
319 319 coreconfigitem(
320 320 b'command-templates',
321 321 b'oneline-summary.*',
322 322 default=dynamicdefault,
323 323 generic=True,
324 324 )
325 325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 326 coreconfigitem(
327 327 b'commands',
328 328 b'commit.post-status',
329 329 default=False,
330 330 )
331 331 coreconfigitem(
332 332 b'commands',
333 333 b'grep.all-files',
334 334 default=False,
335 335 experimental=True,
336 336 )
337 337 coreconfigitem(
338 338 b'commands',
339 339 b'merge.require-rev',
340 340 default=False,
341 341 )
342 342 coreconfigitem(
343 343 b'commands',
344 344 b'push.require-revs',
345 345 default=False,
346 346 )
347 347 coreconfigitem(
348 348 b'commands',
349 349 b'resolve.confirm',
350 350 default=False,
351 351 )
352 352 coreconfigitem(
353 353 b'commands',
354 354 b'resolve.explicit-re-merge',
355 355 default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'commands',
359 359 b'resolve.mark-check',
360 360 default=b'none',
361 361 )
362 362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 363 coreconfigitem(
364 364 b'commands',
365 365 b'show.aliasprefix',
366 366 default=list,
367 367 )
368 368 coreconfigitem(
369 369 b'commands',
370 370 b'status.relative',
371 371 default=False,
372 372 )
373 373 coreconfigitem(
374 374 b'commands',
375 375 b'status.skipstates',
376 376 default=[],
377 377 experimental=True,
378 378 )
379 379 coreconfigitem(
380 380 b'commands',
381 381 b'status.terse',
382 382 default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'commands',
386 386 b'status.verbose',
387 387 default=False,
388 388 )
389 389 coreconfigitem(
390 390 b'commands',
391 391 b'update.check',
392 392 default=None,
393 393 )
394 394 coreconfigitem(
395 395 b'commands',
396 396 b'update.requiredest',
397 397 default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'committemplate',
401 401 b'.*',
402 402 default=None,
403 403 generic=True,
404 404 )
405 405 coreconfigitem(
406 406 b'convert',
407 407 b'bzr.saverev',
408 408 default=True,
409 409 )
410 410 coreconfigitem(
411 411 b'convert',
412 412 b'cvsps.cache',
413 413 default=True,
414 414 )
415 415 coreconfigitem(
416 416 b'convert',
417 417 b'cvsps.fuzz',
418 418 default=60,
419 419 )
420 420 coreconfigitem(
421 421 b'convert',
422 422 b'cvsps.logencoding',
423 423 default=None,
424 424 )
425 425 coreconfigitem(
426 426 b'convert',
427 427 b'cvsps.mergefrom',
428 428 default=None,
429 429 )
430 430 coreconfigitem(
431 431 b'convert',
432 432 b'cvsps.mergeto',
433 433 default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'convert',
437 437 b'git.committeractions',
438 438 default=lambda: [b'messagedifferent'],
439 439 )
440 440 coreconfigitem(
441 441 b'convert',
442 442 b'git.extrakeys',
443 443 default=list,
444 444 )
445 445 coreconfigitem(
446 446 b'convert',
447 447 b'git.findcopiesharder',
448 448 default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'convert',
452 452 b'git.remoteprefix',
453 453 default=b'remote',
454 454 )
455 455 coreconfigitem(
456 456 b'convert',
457 457 b'git.renamelimit',
458 458 default=400,
459 459 )
460 460 coreconfigitem(
461 461 b'convert',
462 462 b'git.saverev',
463 463 default=True,
464 464 )
465 465 coreconfigitem(
466 466 b'convert',
467 467 b'git.similarity',
468 468 default=50,
469 469 )
470 470 coreconfigitem(
471 471 b'convert',
472 472 b'git.skipsubmodules',
473 473 default=False,
474 474 )
475 475 coreconfigitem(
476 476 b'convert',
477 477 b'hg.clonebranches',
478 478 default=False,
479 479 )
480 480 coreconfigitem(
481 481 b'convert',
482 482 b'hg.ignoreerrors',
483 483 default=False,
484 484 )
485 485 coreconfigitem(
486 486 b'convert',
487 487 b'hg.preserve-hash',
488 488 default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'convert',
492 492 b'hg.revs',
493 493 default=None,
494 494 )
495 495 coreconfigitem(
496 496 b'convert',
497 497 b'hg.saverev',
498 498 default=False,
499 499 )
500 500 coreconfigitem(
501 501 b'convert',
502 502 b'hg.sourcename',
503 503 default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'convert',
507 507 b'hg.startrev',
508 508 default=None,
509 509 )
510 510 coreconfigitem(
511 511 b'convert',
512 512 b'hg.tagsbranch',
513 513 default=b'default',
514 514 )
515 515 coreconfigitem(
516 516 b'convert',
517 517 b'hg.usebranchnames',
518 518 default=True,
519 519 )
520 520 coreconfigitem(
521 521 b'convert',
522 522 b'ignoreancestorcheck',
523 523 default=False,
524 524 experimental=True,
525 525 )
526 526 coreconfigitem(
527 527 b'convert',
528 528 b'localtimezone',
529 529 default=False,
530 530 )
531 531 coreconfigitem(
532 532 b'convert',
533 533 b'p4.encoding',
534 534 default=dynamicdefault,
535 535 )
536 536 coreconfigitem(
537 537 b'convert',
538 538 b'p4.startrev',
539 539 default=0,
540 540 )
541 541 coreconfigitem(
542 542 b'convert',
543 543 b'skiptags',
544 544 default=False,
545 545 )
546 546 coreconfigitem(
547 547 b'convert',
548 548 b'svn.debugsvnlog',
549 549 default=True,
550 550 )
551 551 coreconfigitem(
552 552 b'convert',
553 553 b'svn.trunk',
554 554 default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'convert',
558 558 b'svn.tags',
559 559 default=None,
560 560 )
561 561 coreconfigitem(
562 562 b'convert',
563 563 b'svn.branches',
564 564 default=None,
565 565 )
566 566 coreconfigitem(
567 567 b'convert',
568 568 b'svn.startrev',
569 569 default=0,
570 570 )
571 571 coreconfigitem(
572 572 b'convert',
573 573 b'svn.dangerous-set-commit-dates',
574 574 default=False,
575 575 )
576 576 coreconfigitem(
577 577 b'debug',
578 578 b'dirstate.delaywrite',
579 579 default=0,
580 580 )
581 581 coreconfigitem(
582 582 b'debug',
583 583 b'revlog.verifyposition.changelog',
584 584 default=b'',
585 585 )
586 586 coreconfigitem(
587 587 b'debug',
588 588 b'revlog.debug-delta',
589 589 default=False,
590 590 )
591 591 # display extra information about the bundling process
592 592 coreconfigitem(
593 593 b'debug',
594 594 b'bundling-stats',
595 595 default=False,
596 596 )
597 597 # display extra information about the unbundling process
598 598 coreconfigitem(
599 599 b'debug',
600 600 b'unbundling-stats',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'defaults',
605 605 b'.*',
606 606 default=None,
607 607 generic=True,
608 608 )
609 609 coreconfigitem(
610 610 b'devel',
611 611 b'all-warnings',
612 612 default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'devel',
616 616 b'bundle2.debug',
617 617 default=False,
618 618 )
619 619 coreconfigitem(
620 620 b'devel',
621 621 b'bundle.delta',
622 622 default=b'',
623 623 )
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'cache-vfs',
627 627 default=None,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'check-locks',
632 632 default=False,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'check-relroot',
637 637 default=False,
638 638 )
639 639 # Track copy information for all file, not just "added" one (very slow)
640 640 coreconfigitem(
641 641 b'devel',
642 642 b'copy-tracing.trace-all-files',
643 643 default=False,
644 644 )
645 645 coreconfigitem(
646 646 b'devel',
647 647 b'default-date',
648 648 default=None,
649 649 )
650 650 coreconfigitem(
651 651 b'devel',
652 652 b'deprec-warn',
653 653 default=False,
654 654 )
655 655 coreconfigitem(
656 656 b'devel',
657 657 b'disableloaddefaultcerts',
658 658 default=False,
659 659 )
660 660 coreconfigitem(
661 661 b'devel',
662 662 b'warn-empty-changegroup',
663 663 default=False,
664 664 )
665 665 coreconfigitem(
666 666 b'devel',
667 667 b'legacy.exchange',
668 668 default=list,
669 669 )
670 670 # When True, revlogs use a special reference version of the nodemap, that is not
671 671 # performant but is "known" to behave properly.
672 672 coreconfigitem(
673 673 b'devel',
674 674 b'persistent-nodemap',
675 675 default=False,
676 676 )
677 677 coreconfigitem(
678 678 b'devel',
679 679 b'servercafile',
680 680 default=b'',
681 681 )
682 682 coreconfigitem(
683 683 b'devel',
684 684 b'serverexactprotocol',
685 685 default=b'',
686 686 )
687 687 coreconfigitem(
688 688 b'devel',
689 689 b'serverrequirecert',
690 690 default=False,
691 691 )
692 692 coreconfigitem(
693 693 b'devel',
694 694 b'strip-obsmarkers',
695 695 default=True,
696 696 )
697 697 coreconfigitem(
698 698 b'devel',
699 699 b'warn-config',
700 700 default=None,
701 701 )
702 702 coreconfigitem(
703 703 b'devel',
704 704 b'warn-config-default',
705 705 default=None,
706 706 )
707 707 coreconfigitem(
708 708 b'devel',
709 709 b'user.obsmarker',
710 710 default=None,
711 711 )
712 712 coreconfigitem(
713 713 b'devel',
714 714 b'warn-config-unknown',
715 715 default=None,
716 716 )
717 717 coreconfigitem(
718 718 b'devel',
719 719 b'debug.copies',
720 720 default=False,
721 721 )
722 722 coreconfigitem(
723 723 b'devel',
724 724 b'copy-tracing.multi-thread',
725 725 default=True,
726 726 )
727 727 coreconfigitem(
728 728 b'devel',
729 729 b'debug.extensions',
730 730 default=False,
731 731 )
732 732 coreconfigitem(
733 733 b'devel',
734 734 b'debug.repo-filters',
735 735 default=False,
736 736 )
737 737 coreconfigitem(
738 738 b'devel',
739 739 b'debug.peer-request',
740 740 default=False,
741 741 )
742 742 # If discovery.exchange-heads is False, the discovery will not start with
743 743 # remote head fetching and local head querying.
744 744 coreconfigitem(
745 745 b'devel',
746 746 b'discovery.exchange-heads',
747 747 default=True,
748 748 )
749 749 # If discovery.grow-sample is False, the sample size used in set discovery will
750 750 # not be increased through the process
751 751 coreconfigitem(
752 752 b'devel',
753 753 b'discovery.grow-sample',
754 754 default=True,
755 755 )
756 756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
757 757 # adapted to the shape of the undecided set (it is set to the max of:
758 758 # <target-size>, len(roots(undecided)), len(heads(undecided)
759 759 coreconfigitem(
760 760 b'devel',
761 761 b'discovery.grow-sample.dynamic',
762 762 default=True,
763 763 )
764 764 # discovery.grow-sample.rate control the rate at which the sample grow
765 765 coreconfigitem(
766 766 b'devel',
767 767 b'discovery.grow-sample.rate',
768 768 default=1.05,
769 769 )
770 770 # If discovery.randomize is False, random sampling during discovery are
771 771 # deterministic. It is meant for integration tests.
772 772 coreconfigitem(
773 773 b'devel',
774 774 b'discovery.randomize',
775 775 default=True,
776 776 )
777 777 # Control the initial size of the discovery sample
778 778 coreconfigitem(
779 779 b'devel',
780 780 b'discovery.sample-size',
781 781 default=200,
782 782 )
783 783 # Control the initial size of the discovery for initial change
784 784 coreconfigitem(
785 785 b'devel',
786 786 b'discovery.sample-size.initial',
787 787 default=100,
788 788 )
789 789 _registerdiffopts(section=b'diff')
790 790 coreconfigitem(
791 791 b'diff',
792 792 b'merge',
793 793 default=False,
794 794 experimental=True,
795 795 )
796 796 coreconfigitem(
797 797 b'email',
798 798 b'bcc',
799 799 default=None,
800 800 )
801 801 coreconfigitem(
802 802 b'email',
803 803 b'cc',
804 804 default=None,
805 805 )
806 806 coreconfigitem(
807 807 b'email',
808 808 b'charsets',
809 809 default=list,
810 810 )
811 811 coreconfigitem(
812 812 b'email',
813 813 b'from',
814 814 default=None,
815 815 )
816 816 coreconfigitem(
817 817 b'email',
818 818 b'method',
819 819 default=b'smtp',
820 820 )
821 821 coreconfigitem(
822 822 b'email',
823 823 b'reply-to',
824 824 default=None,
825 825 )
826 826 coreconfigitem(
827 827 b'email',
828 828 b'to',
829 829 default=None,
830 830 )
831 831 coreconfigitem(
832 832 b'experimental',
833 833 b'archivemetatemplate',
834 834 default=dynamicdefault,
835 835 )
836 836 coreconfigitem(
837 837 b'experimental',
838 838 b'auto-publish',
839 839 default=b'publish',
840 840 )
841 841 coreconfigitem(
842 842 b'experimental',
843 843 b'bundle-phases',
844 844 default=False,
845 845 )
846 846 coreconfigitem(
847 847 b'experimental',
848 848 b'bundle2-advertise',
849 849 default=True,
850 850 )
851 851 coreconfigitem(
852 852 b'experimental',
853 853 b'bundle2-output-capture',
854 854 default=False,
855 855 )
856 856 coreconfigitem(
857 857 b'experimental',
858 858 b'bundle2.pushback',
859 859 default=False,
860 860 )
861 861 coreconfigitem(
862 862 b'experimental',
863 863 b'bundle2lazylocking',
864 864 default=False,
865 865 )
866 866 coreconfigitem(
867 867 b'experimental',
868 868 b'bundlecomplevel',
869 869 default=None,
870 870 )
871 871 coreconfigitem(
872 872 b'experimental',
873 873 b'bundlecomplevel.bzip2',
874 874 default=None,
875 875 )
876 876 coreconfigitem(
877 877 b'experimental',
878 878 b'bundlecomplevel.gzip',
879 879 default=None,
880 880 )
881 881 coreconfigitem(
882 882 b'experimental',
883 883 b'bundlecomplevel.none',
884 884 default=None,
885 885 )
886 886 coreconfigitem(
887 887 b'experimental',
888 888 b'bundlecomplevel.zstd',
889 889 default=None,
890 890 )
891 891 coreconfigitem(
892 892 b'experimental',
893 893 b'bundlecompthreads',
894 894 default=None,
895 895 )
896 896 coreconfigitem(
897 897 b'experimental',
898 898 b'bundlecompthreads.bzip2',
899 899 default=None,
900 900 )
901 901 coreconfigitem(
902 902 b'experimental',
903 903 b'bundlecompthreads.gzip',
904 904 default=None,
905 905 )
906 906 coreconfigitem(
907 907 b'experimental',
908 908 b'bundlecompthreads.none',
909 909 default=None,
910 910 )
911 911 coreconfigitem(
912 912 b'experimental',
913 913 b'bundlecompthreads.zstd',
914 914 default=None,
915 915 )
916 916 coreconfigitem(
917 917 b'experimental',
918 918 b'changegroup3',
919 919 default=False,
920 920 )
921 921 coreconfigitem(
922 922 b'experimental',
923 923 b'changegroup4',
924 924 default=False,
925 925 )
926 926 coreconfigitem(
927 927 b'experimental',
928 928 b'cleanup-as-archived',
929 929 default=False,
930 930 )
931 931 coreconfigitem(
932 932 b'experimental',
933 933 b'clientcompressionengines',
934 934 default=list,
935 935 )
936 936 coreconfigitem(
937 937 b'experimental',
938 938 b'copytrace',
939 939 default=b'on',
940 940 )
941 941 coreconfigitem(
942 942 b'experimental',
943 943 b'copytrace.movecandidateslimit',
944 944 default=100,
945 945 )
946 946 coreconfigitem(
947 947 b'experimental',
948 948 b'copytrace.sourcecommitlimit',
949 949 default=100,
950 950 )
951 951 coreconfigitem(
952 952 b'experimental',
953 953 b'copies.read-from',
954 954 default=b"filelog-only",
955 955 )
956 956 coreconfigitem(
957 957 b'experimental',
958 958 b'copies.write-to',
959 959 default=b'filelog-only',
960 960 )
961 961 coreconfigitem(
962 962 b'experimental',
963 963 b'crecordtest',
964 964 default=None,
965 965 )
966 966 coreconfigitem(
967 967 b'experimental',
968 968 b'directaccess',
969 969 default=False,
970 970 )
971 971 coreconfigitem(
972 972 b'experimental',
973 973 b'directaccess.revnums',
974 974 default=False,
975 975 )
976 976 coreconfigitem(
977 977 b'experimental',
978 978 b'editortmpinhg',
979 979 default=False,
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution',
984 984 default=list,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.allowdivergence',
989 989 default=False,
990 990 alias=[(b'experimental', b'allowdivergence')],
991 991 )
992 992 coreconfigitem(
993 993 b'experimental',
994 994 b'evolution.allowunstable',
995 995 default=None,
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.createmarkers',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.effect-flags',
1005 1005 default=True,
1006 1006 alias=[(b'experimental', b'effect-flags')],
1007 1007 )
1008 1008 coreconfigitem(
1009 1009 b'experimental',
1010 1010 b'evolution.exchange',
1011 1011 default=None,
1012 1012 )
1013 1013 coreconfigitem(
1014 1014 b'experimental',
1015 1015 b'evolution.bundle-obsmarker',
1016 1016 default=False,
1017 1017 )
1018 1018 coreconfigitem(
1019 1019 b'experimental',
1020 1020 b'evolution.bundle-obsmarker:mandatory',
1021 1021 default=True,
1022 1022 )
1023 1023 coreconfigitem(
1024 1024 b'experimental',
1025 1025 b'log.topo',
1026 1026 default=False,
1027 1027 )
1028 1028 coreconfigitem(
1029 1029 b'experimental',
1030 1030 b'evolution.report-instabilities',
1031 1031 default=True,
1032 1032 )
1033 1033 coreconfigitem(
1034 1034 b'experimental',
1035 1035 b'evolution.track-operation',
1036 1036 default=True,
1037 1037 )
1038 1038 # repo-level config to exclude a revset visibility
1039 1039 #
1040 1040 # The target use case is to use `share` to expose different subset of the same
1041 1041 # repository, especially server side. See also `server.view`.
1042 1042 coreconfigitem(
1043 1043 b'experimental',
1044 1044 b'extra-filter-revs',
1045 1045 default=None,
1046 1046 )
1047 1047 coreconfigitem(
1048 1048 b'experimental',
1049 1049 b'maxdeltachainspan',
1050 1050 default=-1,
1051 1051 )
1052 1052 # tracks files which were undeleted (merge might delete them but we explicitly
1053 1053 # kept/undeleted them) and creates new filenodes for them
1054 1054 coreconfigitem(
1055 1055 b'experimental',
1056 1056 b'merge-track-salvaged',
1057 1057 default=False,
1058 1058 )
1059 1059 coreconfigitem(
1060 1060 b'experimental',
1061 1061 b'mmapindexthreshold',
1062 1062 default=None,
1063 1063 )
1064 1064 coreconfigitem(
1065 1065 b'experimental',
1066 1066 b'narrow',
1067 1067 default=False,
1068 1068 )
1069 1069 coreconfigitem(
1070 1070 b'experimental',
1071 1071 b'nonnormalparanoidcheck',
1072 1072 default=False,
1073 1073 )
1074 1074 coreconfigitem(
1075 1075 b'experimental',
1076 1076 b'exportableenviron',
1077 1077 default=list,
1078 1078 )
1079 1079 coreconfigitem(
1080 1080 b'experimental',
1081 1081 b'extendedheader.index',
1082 1082 default=None,
1083 1083 )
1084 1084 coreconfigitem(
1085 1085 b'experimental',
1086 1086 b'extendedheader.similarity',
1087 1087 default=False,
1088 1088 )
1089 1089 coreconfigitem(
1090 1090 b'experimental',
1091 1091 b'graphshorten',
1092 1092 default=False,
1093 1093 )
1094 1094 coreconfigitem(
1095 1095 b'experimental',
1096 1096 b'graphstyle.parent',
1097 1097 default=dynamicdefault,
1098 1098 )
1099 1099 coreconfigitem(
1100 1100 b'experimental',
1101 1101 b'graphstyle.missing',
1102 1102 default=dynamicdefault,
1103 1103 )
1104 1104 coreconfigitem(
1105 1105 b'experimental',
1106 1106 b'graphstyle.grandparent',
1107 1107 default=dynamicdefault,
1108 1108 )
1109 1109 coreconfigitem(
1110 1110 b'experimental',
1111 1111 b'hook-track-tags',
1112 1112 default=False,
1113 1113 )
1114 1114 coreconfigitem(
1115 1115 b'experimental',
1116 1116 b'httppostargs',
1117 1117 default=False,
1118 1118 )
1119 1119 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1120 1120 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1121 1121
1122 1122 coreconfigitem(
1123 1123 b'experimental',
1124 1124 b'obsmarkers-exchange-debug',
1125 1125 default=False,
1126 1126 )
1127 1127 coreconfigitem(
1128 1128 b'experimental',
1129 1129 b'remotenames',
1130 1130 default=False,
1131 1131 )
1132 1132 coreconfigitem(
1133 1133 b'experimental',
1134 1134 b'removeemptydirs',
1135 1135 default=True,
1136 1136 )
1137 1137 coreconfigitem(
1138 1138 b'experimental',
1139 1139 b'revert.interactive.select-to-keep',
1140 1140 default=False,
1141 1141 )
1142 1142 coreconfigitem(
1143 1143 b'experimental',
1144 1144 b'revisions.prefixhexnode',
1145 1145 default=False,
1146 1146 )
1147 1147 # "out of experimental" todo list.
1148 1148 #
1149 1149 # * include management of a persistent nodemap in the main docket
1150 1150 # * enforce a "no-truncate" policy for mmap safety
1151 1151 # - for censoring operation
1152 1152 # - for stripping operation
1153 1153 # - for rollback operation
1154 1154 # * proper streaming (race free) of the docket file
1155 1155 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1156 1156 # * Exchange-wise, we will also need to do something more efficient than
1157 1157 # keeping references to the affected revlogs, especially memory-wise when
1158 1158 # rewriting sidedata.
1159 1159 # * introduce a proper solution to reduce the number of filelog related files.
1160 1160 # * use caching for reading sidedata (similar to what we do for data).
1161 1161 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1162 1162 # * Improvement to consider
1163 1163 # - avoid compression header in chunk using the default compression?
1164 1164 # - forbid "inline" compression mode entirely?
1165 1165 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1166 1166 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1167 1167 # - keep track of chain base or size (probably not that useful anymore)
1168 1168 coreconfigitem(
1169 1169 b'experimental',
1170 1170 b'revlogv2',
1171 1171 default=None,
1172 1172 )
1173 1173 coreconfigitem(
1174 1174 b'experimental',
1175 1175 b'revisions.disambiguatewithin',
1176 1176 default=None,
1177 1177 )
1178 1178 coreconfigitem(
1179 1179 b'experimental',
1180 1180 b'rust.index',
1181 1181 default=False,
1182 1182 )
1183 1183 coreconfigitem(
1184 1184 b'experimental',
1185 1185 b'server.filesdata.recommended-batch-size',
1186 1186 default=50000,
1187 1187 )
1188 1188 coreconfigitem(
1189 1189 b'experimental',
1190 1190 b'server.manifestdata.recommended-batch-size',
1191 1191 default=100000,
1192 1192 )
1193 1193 coreconfigitem(
1194 1194 b'experimental',
1195 1195 b'server.stream-narrow-clones',
1196 1196 default=False,
1197 1197 )
1198 1198 coreconfigitem(
1199 1199 b'experimental',
1200 1200 b'single-head-per-branch',
1201 1201 default=False,
1202 1202 )
1203 1203 coreconfigitem(
1204 1204 b'experimental',
1205 1205 b'single-head-per-branch:account-closed-heads',
1206 1206 default=False,
1207 1207 )
1208 1208 coreconfigitem(
1209 1209 b'experimental',
1210 1210 b'single-head-per-branch:public-changes-only',
1211 1211 default=False,
1212 1212 )
1213 1213 coreconfigitem(
1214 1214 b'experimental',
1215 1215 b'sparse-read',
1216 1216 default=False,
1217 1217 )
1218 1218 coreconfigitem(
1219 1219 b'experimental',
1220 1220 b'sparse-read.density-threshold',
1221 1221 default=0.50,
1222 1222 )
1223 1223 coreconfigitem(
1224 1224 b'experimental',
1225 1225 b'sparse-read.min-gap-size',
1226 1226 default=b'65K',
1227 1227 )
1228 1228 coreconfigitem(
1229 1229 b'experimental',
1230 1230 b'treemanifest',
1231 1231 default=False,
1232 1232 )
1233 1233 coreconfigitem(
1234 1234 b'experimental',
1235 1235 b'update.atomic-file',
1236 1236 default=False,
1237 1237 )
1238 1238 coreconfigitem(
1239 1239 b'experimental',
1240 1240 b'web.full-garbage-collection-rate',
1241 1241 default=1, # still forcing a full collection on each request
1242 1242 )
1243 1243 coreconfigitem(
1244 1244 b'experimental',
1245 1245 b'worker.wdir-get-thread-safe',
1246 1246 default=False,
1247 1247 )
1248 1248 coreconfigitem(
1249 1249 b'experimental',
1250 1250 b'worker.repository-upgrade',
1251 1251 default=False,
1252 1252 )
1253 1253 coreconfigitem(
1254 1254 b'experimental',
1255 1255 b'xdiff',
1256 1256 default=False,
1257 1257 )
1258 1258 coreconfigitem(
1259 1259 b'extensions',
1260 1260 b'[^:]*',
1261 1261 default=None,
1262 1262 generic=True,
1263 1263 )
1264 1264 coreconfigitem(
1265 1265 b'extensions',
1266 1266 b'[^:]*:required',
1267 1267 default=False,
1268 1268 generic=True,
1269 1269 )
1270 1270 coreconfigitem(
1271 1271 b'extdata',
1272 1272 b'.*',
1273 1273 default=None,
1274 1274 generic=True,
1275 1275 )
1276 1276 coreconfigitem(
1277 1277 b'format',
1278 1278 b'bookmarks-in-store',
1279 1279 default=False,
1280 1280 )
1281 1281 coreconfigitem(
1282 1282 b'format',
1283 1283 b'chunkcachesize',
1284 1284 default=None,
1285 1285 experimental=True,
1286 1286 )
1287 1287 coreconfigitem(
1288 1288 # Enable this dirstate format *when creating a new repository*.
1289 1289 # Which format to use for existing repos is controlled by .hg/requires
1290 1290 b'format',
1291 1291 b'use-dirstate-v2',
1292 1292 default=False,
1293 1293 experimental=True,
1294 1294 alias=[(b'format', b'exp-rc-dirstate-v2')],
1295 1295 )
1296 1296 coreconfigitem(
1297 1297 b'format',
1298 1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1299 1299 default=False,
1300 1300 experimental=True,
1301 1301 )
1302 1302 coreconfigitem(
1303 1303 b'format',
1304 1304 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1305 1305 default=False,
1306 1306 experimental=True,
1307 1307 )
1308 1308 coreconfigitem(
1309 1309 b'format',
1310 1310 b'use-dirstate-tracked-hint',
1311 1311 default=False,
1312 1312 experimental=True,
1313 1313 )
1314 1314 coreconfigitem(
1315 1315 b'format',
1316 1316 b'use-dirstate-tracked-hint.version',
1317 1317 default=1,
1318 1318 experimental=True,
1319 1319 )
1320 1320 coreconfigitem(
1321 1321 b'format',
1322 1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1323 1323 default=False,
1324 1324 experimental=True,
1325 1325 )
1326 1326 coreconfigitem(
1327 1327 b'format',
1328 1328 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1329 1329 default=False,
1330 1330 experimental=True,
1331 1331 )
1332 1332 coreconfigitem(
1333 1333 b'format',
1334 1334 b'dotencode',
1335 1335 default=True,
1336 1336 )
1337 1337 coreconfigitem(
1338 1338 b'format',
1339 1339 b'generaldelta',
1340 1340 default=False,
1341 1341 experimental=True,
1342 1342 )
1343 1343 coreconfigitem(
1344 1344 b'format',
1345 1345 b'manifestcachesize',
1346 1346 default=None,
1347 1347 experimental=True,
1348 1348 )
1349 1349 coreconfigitem(
1350 1350 b'format',
1351 1351 b'maxchainlen',
1352 1352 default=dynamicdefault,
1353 1353 experimental=True,
1354 1354 )
1355 1355 coreconfigitem(
1356 1356 b'format',
1357 1357 b'obsstore-version',
1358 1358 default=None,
1359 1359 )
1360 1360 coreconfigitem(
1361 1361 b'format',
1362 1362 b'sparse-revlog',
1363 1363 default=True,
1364 1364 )
1365 1365 coreconfigitem(
1366 1366 b'format',
1367 1367 b'revlog-compression',
1368 1368 default=lambda: [b'zstd', b'zlib'],
1369 1369 alias=[(b'experimental', b'format.compression')],
1370 1370 )
1371 1371 # Experimental TODOs:
1372 1372 #
1373 1373 # * Same as for revlogv2 (but for the reduction of the number of files)
1374 1374 # * Actually computing the rank of changesets
1375 1375 # * Improvement to investigate
1376 1376 # - storing .hgtags fnode
1377 1377 # - storing branch related identifier
1378 1378
1379 1379 coreconfigitem(
1380 1380 b'format',
1381 1381 b'exp-use-changelog-v2',
1382 1382 default=None,
1383 1383 experimental=True,
1384 1384 )
1385 1385 coreconfigitem(
1386 1386 b'format',
1387 1387 b'usefncache',
1388 1388 default=True,
1389 1389 )
1390 1390 coreconfigitem(
1391 1391 b'format',
1392 1392 b'usegeneraldelta',
1393 1393 default=True,
1394 1394 )
1395 1395 coreconfigitem(
1396 1396 b'format',
1397 1397 b'usestore',
1398 1398 default=True,
1399 1399 )
1400 1400
1401 1401
1402 1402 def _persistent_nodemap_default():
1403 1403 """compute `use-persistent-nodemap` default value
1404 1404
1405 1405 The feature is disabled unless a fast implementation is available.
1406 1406 """
1407 1407 from . import policy
1408 1408
1409 1409 return policy.importrust('revlog') is not None
1410 1410
1411 1411
1412 1412 coreconfigitem(
1413 1413 b'format',
1414 1414 b'use-persistent-nodemap',
1415 1415 default=_persistent_nodemap_default,
1416 1416 )
1417 1417 coreconfigitem(
1418 1418 b'format',
1419 1419 b'exp-use-copies-side-data-changeset',
1420 1420 default=False,
1421 1421 experimental=True,
1422 1422 )
1423 1423 coreconfigitem(
1424 1424 b'format',
1425 1425 b'use-share-safe',
1426 1426 default=True,
1427 1427 )
1428 1428 coreconfigitem(
1429 1429 b'format',
1430 1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1431 1431 default=False,
1432 1432 experimental=True,
1433 1433 )
1434 1434 coreconfigitem(
1435 1435 b'format',
1436 1436 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1437 1437 default=False,
1438 1438 experimental=True,
1439 1439 )
1440 1440
1441 1441 # Moving this on by default means we are confident about the scaling of phases.
1442 1442 # This is not garanteed to be the case at the time this message is written.
1443 1443 coreconfigitem(
1444 1444 b'format',
1445 1445 b'use-internal-phase',
1446 1446 default=False,
1447 1447 experimental=True,
1448 1448 )
1449 1449 # The interaction between the archived phase and obsolescence markers needs to
1450 1450 # be sorted out before wider usage of this are to be considered.
1451 1451 #
1452 1452 # At the time this message is written, behavior when archiving obsolete
1453 1453 # changeset differ significantly from stripping. As part of stripping, we also
1454 1454 # remove the obsolescence marker associated to the stripped changesets,
1455 1455 # revealing the precedecessors changesets when applicable. When archiving, we
1456 1456 # don't touch the obsolescence markers, keeping everything hidden. This can
1457 1457 # result in quite confusing situation for people combining exchanging draft
1458 1458 # with the archived phases. As some markers needed by others may be skipped
1459 1459 # during exchange.
1460 1460 coreconfigitem(
1461 1461 b'format',
1462 1462 b'exp-archived-phase',
1463 1463 default=False,
1464 1464 experimental=True,
1465 1465 )
1466 1466 coreconfigitem(
1467 1467 b'shelve',
1468 1468 b'store',
1469 1469 default=b'internal',
1470 1470 experimental=True,
1471 1471 )
1472 1472 coreconfigitem(
1473 1473 b'fsmonitor',
1474 1474 b'warn_when_unused',
1475 1475 default=True,
1476 1476 )
1477 1477 coreconfigitem(
1478 1478 b'fsmonitor',
1479 1479 b'warn_update_file_count',
1480 1480 default=50000,
1481 1481 )
1482 1482 coreconfigitem(
1483 1483 b'fsmonitor',
1484 1484 b'warn_update_file_count_rust',
1485 1485 default=400000,
1486 1486 )
1487 1487 coreconfigitem(
1488 1488 b'help',
1489 1489 br'hidden-command\..*',
1490 1490 default=False,
1491 1491 generic=True,
1492 1492 )
1493 1493 coreconfigitem(
1494 1494 b'help',
1495 1495 br'hidden-topic\..*',
1496 1496 default=False,
1497 1497 generic=True,
1498 1498 )
1499 1499 coreconfigitem(
1500 1500 b'hooks',
1501 1501 b'[^:]*',
1502 1502 default=dynamicdefault,
1503 1503 generic=True,
1504 1504 )
1505 1505 coreconfigitem(
1506 1506 b'hooks',
1507 1507 b'.*:run-with-plain',
1508 1508 default=True,
1509 1509 generic=True,
1510 1510 )
1511 1511 coreconfigitem(
1512 1512 b'hgweb-paths',
1513 1513 b'.*',
1514 1514 default=list,
1515 1515 generic=True,
1516 1516 )
1517 1517 coreconfigitem(
1518 1518 b'hostfingerprints',
1519 1519 b'.*',
1520 1520 default=list,
1521 1521 generic=True,
1522 1522 )
1523 1523 coreconfigitem(
1524 1524 b'hostsecurity',
1525 1525 b'ciphers',
1526 1526 default=None,
1527 1527 )
1528 1528 coreconfigitem(
1529 1529 b'hostsecurity',
1530 1530 b'minimumprotocol',
1531 1531 default=dynamicdefault,
1532 1532 )
1533 1533 coreconfigitem(
1534 1534 b'hostsecurity',
1535 1535 b'.*:minimumprotocol$',
1536 1536 default=dynamicdefault,
1537 1537 generic=True,
1538 1538 )
1539 1539 coreconfigitem(
1540 1540 b'hostsecurity',
1541 1541 b'.*:ciphers$',
1542 1542 default=dynamicdefault,
1543 1543 generic=True,
1544 1544 )
1545 1545 coreconfigitem(
1546 1546 b'hostsecurity',
1547 1547 b'.*:fingerprints$',
1548 1548 default=list,
1549 1549 generic=True,
1550 1550 )
1551 1551 coreconfigitem(
1552 1552 b'hostsecurity',
1553 1553 b'.*:verifycertsfile$',
1554 1554 default=None,
1555 1555 generic=True,
1556 1556 )
1557 1557
1558 1558 coreconfigitem(
1559 1559 b'http_proxy',
1560 1560 b'always',
1561 1561 default=False,
1562 1562 )
1563 1563 coreconfigitem(
1564 1564 b'http_proxy',
1565 1565 b'host',
1566 1566 default=None,
1567 1567 )
1568 1568 coreconfigitem(
1569 1569 b'http_proxy',
1570 1570 b'no',
1571 1571 default=list,
1572 1572 )
1573 1573 coreconfigitem(
1574 1574 b'http_proxy',
1575 1575 b'passwd',
1576 1576 default=None,
1577 1577 )
1578 1578 coreconfigitem(
1579 1579 b'http_proxy',
1580 1580 b'user',
1581 1581 default=None,
1582 1582 )
1583 1583
1584 1584 coreconfigitem(
1585 1585 b'http',
1586 1586 b'timeout',
1587 1587 default=None,
1588 1588 )
1589 1589
1590 1590 coreconfigitem(
1591 1591 b'logtoprocess',
1592 1592 b'commandexception',
1593 1593 default=None,
1594 1594 )
1595 1595 coreconfigitem(
1596 1596 b'logtoprocess',
1597 1597 b'commandfinish',
1598 1598 default=None,
1599 1599 )
1600 1600 coreconfigitem(
1601 1601 b'logtoprocess',
1602 1602 b'command',
1603 1603 default=None,
1604 1604 )
1605 1605 coreconfigitem(
1606 1606 b'logtoprocess',
1607 1607 b'develwarn',
1608 1608 default=None,
1609 1609 )
1610 1610 coreconfigitem(
1611 1611 b'logtoprocess',
1612 1612 b'uiblocked',
1613 1613 default=None,
1614 1614 )
1615 1615 coreconfigitem(
1616 1616 b'merge',
1617 1617 b'checkunknown',
1618 1618 default=b'abort',
1619 1619 )
1620 1620 coreconfigitem(
1621 1621 b'merge',
1622 1622 b'checkignored',
1623 1623 default=b'abort',
1624 1624 )
1625 1625 coreconfigitem(
1626 1626 b'experimental',
1627 1627 b'merge.checkpathconflicts',
1628 1628 default=False,
1629 1629 )
1630 1630 coreconfigitem(
1631 1631 b'merge',
1632 1632 b'followcopies',
1633 1633 default=True,
1634 1634 )
1635 1635 coreconfigitem(
1636 1636 b'merge',
1637 1637 b'on-failure',
1638 1638 default=b'continue',
1639 1639 )
1640 1640 coreconfigitem(
1641 1641 b'merge',
1642 1642 b'preferancestor',
1643 1643 default=lambda: [b'*'],
1644 1644 experimental=True,
1645 1645 )
1646 1646 coreconfigitem(
1647 1647 b'merge',
1648 1648 b'strict-capability-check',
1649 1649 default=False,
1650 1650 )
1651 1651 coreconfigitem(
1652 1652 b'merge',
1653 1653 b'disable-partial-tools',
1654 1654 default=False,
1655 1655 experimental=True,
1656 1656 )
1657 1657 coreconfigitem(
1658 1658 b'partial-merge-tools',
1659 1659 b'.*',
1660 1660 default=None,
1661 1661 generic=True,
1662 1662 experimental=True,
1663 1663 )
1664 1664 coreconfigitem(
1665 1665 b'partial-merge-tools',
1666 1666 br'.*\.patterns',
1667 1667 default=dynamicdefault,
1668 1668 generic=True,
1669 1669 priority=-1,
1670 1670 experimental=True,
1671 1671 )
1672 1672 coreconfigitem(
1673 1673 b'partial-merge-tools',
1674 1674 br'.*\.executable$',
1675 1675 default=dynamicdefault,
1676 1676 generic=True,
1677 1677 priority=-1,
1678 1678 experimental=True,
1679 1679 )
1680 1680 coreconfigitem(
1681 1681 b'partial-merge-tools',
1682 1682 br'.*\.order',
1683 1683 default=0,
1684 1684 generic=True,
1685 1685 priority=-1,
1686 1686 experimental=True,
1687 1687 )
1688 1688 coreconfigitem(
1689 1689 b'partial-merge-tools',
1690 1690 br'.*\.args',
1691 1691 default=b"$local $base $other",
1692 1692 generic=True,
1693 1693 priority=-1,
1694 1694 experimental=True,
1695 1695 )
1696 1696 coreconfigitem(
1697 1697 b'partial-merge-tools',
1698 1698 br'.*\.disable',
1699 1699 default=False,
1700 1700 generic=True,
1701 1701 priority=-1,
1702 1702 experimental=True,
1703 1703 )
1704 1704 coreconfigitem(
1705 1705 b'merge-tools',
1706 1706 b'.*',
1707 1707 default=None,
1708 1708 generic=True,
1709 1709 )
1710 1710 coreconfigitem(
1711 1711 b'merge-tools',
1712 1712 br'.*\.args$',
1713 1713 default=b"$local $base $other",
1714 1714 generic=True,
1715 1715 priority=-1,
1716 1716 )
1717 1717 coreconfigitem(
1718 1718 b'merge-tools',
1719 1719 br'.*\.binary$',
1720 1720 default=False,
1721 1721 generic=True,
1722 1722 priority=-1,
1723 1723 )
1724 1724 coreconfigitem(
1725 1725 b'merge-tools',
1726 1726 br'.*\.check$',
1727 1727 default=list,
1728 1728 generic=True,
1729 1729 priority=-1,
1730 1730 )
1731 1731 coreconfigitem(
1732 1732 b'merge-tools',
1733 1733 br'.*\.checkchanged$',
1734 1734 default=False,
1735 1735 generic=True,
1736 1736 priority=-1,
1737 1737 )
1738 1738 coreconfigitem(
1739 1739 b'merge-tools',
1740 1740 br'.*\.executable$',
1741 1741 default=dynamicdefault,
1742 1742 generic=True,
1743 1743 priority=-1,
1744 1744 )
1745 1745 coreconfigitem(
1746 1746 b'merge-tools',
1747 1747 br'.*\.fixeol$',
1748 1748 default=False,
1749 1749 generic=True,
1750 1750 priority=-1,
1751 1751 )
1752 1752 coreconfigitem(
1753 1753 b'merge-tools',
1754 1754 br'.*\.gui$',
1755 1755 default=False,
1756 1756 generic=True,
1757 1757 priority=-1,
1758 1758 )
1759 1759 coreconfigitem(
1760 1760 b'merge-tools',
1761 1761 br'.*\.mergemarkers$',
1762 1762 default=b'basic',
1763 1763 generic=True,
1764 1764 priority=-1,
1765 1765 )
1766 1766 coreconfigitem(
1767 1767 b'merge-tools',
1768 1768 br'.*\.mergemarkertemplate$',
1769 1769 default=dynamicdefault, # take from command-templates.mergemarker
1770 1770 generic=True,
1771 1771 priority=-1,
1772 1772 )
1773 1773 coreconfigitem(
1774 1774 b'merge-tools',
1775 1775 br'.*\.priority$',
1776 1776 default=0,
1777 1777 generic=True,
1778 1778 priority=-1,
1779 1779 )
1780 1780 coreconfigitem(
1781 1781 b'merge-tools',
1782 1782 br'.*\.premerge$',
1783 1783 default=dynamicdefault,
1784 1784 generic=True,
1785 1785 priority=-1,
1786 1786 )
1787 1787 coreconfigitem(
1788 1788 b'merge-tools',
1789 1789 br'.*\.regappend$',
1790 1790 default=b"",
1791 1791 generic=True,
1792 1792 priority=-1,
1793 1793 )
1794 1794 coreconfigitem(
1795 1795 b'merge-tools',
1796 1796 br'.*\.symlink$',
1797 1797 default=False,
1798 1798 generic=True,
1799 1799 priority=-1,
1800 1800 )
1801 1801 coreconfigitem(
1802 1802 b'pager',
1803 1803 b'attend-.*',
1804 1804 default=dynamicdefault,
1805 1805 generic=True,
1806 1806 )
1807 1807 coreconfigitem(
1808 1808 b'pager',
1809 1809 b'ignore',
1810 1810 default=list,
1811 1811 )
1812 1812 coreconfigitem(
1813 1813 b'pager',
1814 1814 b'pager',
1815 1815 default=dynamicdefault,
1816 1816 )
1817 1817 coreconfigitem(
1818 1818 b'patch',
1819 1819 b'eol',
1820 1820 default=b'strict',
1821 1821 )
1822 1822 coreconfigitem(
1823 1823 b'patch',
1824 1824 b'fuzz',
1825 1825 default=2,
1826 1826 )
1827 1827 coreconfigitem(
1828 1828 b'paths',
1829 1829 b'default',
1830 1830 default=None,
1831 1831 )
1832 1832 coreconfigitem(
1833 1833 b'paths',
1834 1834 b'default-push',
1835 1835 default=None,
1836 1836 )
1837 1837 coreconfigitem(
1838 1838 b'paths',
1839 1839 b'.*',
1840 1840 default=None,
1841 1841 generic=True,
1842 1842 )
1843 1843 coreconfigitem(
1844 1844 b'paths',
1845 1845 b'.*:bookmarks.mode',
1846 1846 default='default',
1847 1847 generic=True,
1848 1848 )
1849 1849 coreconfigitem(
1850 1850 b'paths',
1851 1851 b'.*:multi-urls',
1852 1852 default=False,
1853 1853 generic=True,
1854 1854 )
1855 1855 coreconfigitem(
1856 1856 b'paths',
1857 1857 b'.*:pushrev',
1858 1858 default=None,
1859 1859 generic=True,
1860 1860 )
1861 1861 coreconfigitem(
1862 1862 b'paths',
1863 1863 b'.*:pushurl',
1864 1864 default=None,
1865 1865 generic=True,
1866 1866 )
1867 1867 coreconfigitem(
1868 1868 b'phases',
1869 1869 b'checksubrepos',
1870 1870 default=b'follow',
1871 1871 )
1872 1872 coreconfigitem(
1873 1873 b'phases',
1874 1874 b'new-commit',
1875 1875 default=b'draft',
1876 1876 )
1877 1877 coreconfigitem(
1878 1878 b'phases',
1879 1879 b'publish',
1880 1880 default=True,
1881 1881 )
1882 1882 coreconfigitem(
1883 1883 b'profiling',
1884 1884 b'enabled',
1885 1885 default=False,
1886 1886 )
1887 1887 coreconfigitem(
1888 1888 b'profiling',
1889 1889 b'format',
1890 1890 default=b'text',
1891 1891 )
1892 1892 coreconfigitem(
1893 1893 b'profiling',
1894 1894 b'freq',
1895 1895 default=1000,
1896 1896 )
1897 1897 coreconfigitem(
1898 1898 b'profiling',
1899 1899 b'limit',
1900 1900 default=30,
1901 1901 )
1902 1902 coreconfigitem(
1903 1903 b'profiling',
1904 1904 b'nested',
1905 1905 default=0,
1906 1906 )
1907 1907 coreconfigitem(
1908 1908 b'profiling',
1909 1909 b'output',
1910 1910 default=None,
1911 1911 )
1912 1912 coreconfigitem(
1913 1913 b'profiling',
1914 1914 b'showmax',
1915 1915 default=0.999,
1916 1916 )
1917 1917 coreconfigitem(
1918 1918 b'profiling',
1919 1919 b'showmin',
1920 1920 default=dynamicdefault,
1921 1921 )
1922 1922 coreconfigitem(
1923 1923 b'profiling',
1924 1924 b'showtime',
1925 1925 default=True,
1926 1926 )
1927 1927 coreconfigitem(
1928 1928 b'profiling',
1929 1929 b'sort',
1930 1930 default=b'inlinetime',
1931 1931 )
1932 1932 coreconfigitem(
1933 1933 b'profiling',
1934 1934 b'statformat',
1935 1935 default=b'hotpath',
1936 1936 )
1937 1937 coreconfigitem(
1938 1938 b'profiling',
1939 1939 b'time-track',
1940 1940 default=dynamicdefault,
1941 1941 )
1942 1942 coreconfigitem(
1943 1943 b'profiling',
1944 1944 b'type',
1945 1945 default=b'stat',
1946 1946 )
1947 1947 coreconfigitem(
1948 1948 b'progress',
1949 1949 b'assume-tty',
1950 1950 default=False,
1951 1951 )
1952 1952 coreconfigitem(
1953 1953 b'progress',
1954 1954 b'changedelay',
1955 1955 default=1,
1956 1956 )
1957 1957 coreconfigitem(
1958 1958 b'progress',
1959 1959 b'clear-complete',
1960 1960 default=True,
1961 1961 )
1962 1962 coreconfigitem(
1963 1963 b'progress',
1964 1964 b'debug',
1965 1965 default=False,
1966 1966 )
1967 1967 coreconfigitem(
1968 1968 b'progress',
1969 1969 b'delay',
1970 1970 default=3,
1971 1971 )
1972 1972 coreconfigitem(
1973 1973 b'progress',
1974 1974 b'disable',
1975 1975 default=False,
1976 1976 )
1977 1977 coreconfigitem(
1978 1978 b'progress',
1979 1979 b'estimateinterval',
1980 1980 default=60.0,
1981 1981 )
1982 1982 coreconfigitem(
1983 1983 b'progress',
1984 1984 b'format',
1985 1985 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1986 1986 )
1987 1987 coreconfigitem(
1988 1988 b'progress',
1989 1989 b'refresh',
1990 1990 default=0.1,
1991 1991 )
1992 1992 coreconfigitem(
1993 1993 b'progress',
1994 1994 b'width',
1995 1995 default=dynamicdefault,
1996 1996 )
1997 1997 coreconfigitem(
1998 1998 b'pull',
1999 1999 b'confirm',
2000 2000 default=False,
2001 2001 )
2002 2002 coreconfigitem(
2003 2003 b'push',
2004 2004 b'pushvars.server',
2005 2005 default=False,
2006 2006 )
2007 2007 coreconfigitem(
2008 2008 b'rewrite',
2009 2009 b'backup-bundle',
2010 2010 default=True,
2011 2011 alias=[(b'ui', b'history-editing-backup')],
2012 2012 )
2013 2013 coreconfigitem(
2014 2014 b'rewrite',
2015 2015 b'update-timestamp',
2016 2016 default=False,
2017 2017 )
2018 2018 coreconfigitem(
2019 2019 b'rewrite',
2020 2020 b'empty-successor',
2021 2021 default=b'skip',
2022 2022 experimental=True,
2023 2023 )
2024 2024 # experimental as long as format.use-dirstate-v2 is.
2025 2025 coreconfigitem(
2026 2026 b'storage',
2027 2027 b'dirstate-v2.slow-path',
2028 2028 default=b"abort",
2029 2029 experimental=True,
2030 2030 )
2031 2031 coreconfigitem(
2032 2032 b'storage',
2033 2033 b'new-repo-backend',
2034 2034 default=b'revlogv1',
2035 2035 experimental=True,
2036 2036 )
2037 2037 coreconfigitem(
2038 2038 b'storage',
2039 2039 b'revlog.optimize-delta-parent-choice',
2040 2040 default=True,
2041 2041 alias=[(b'format', b'aggressivemergedeltas')],
2042 2042 )
2043 2043 coreconfigitem(
2044 2044 b'storage',
2045 b'revlog.delta-parent-search.candidate-group-chunk-size',
2046 default=0,
2047 )
2048 coreconfigitem(
2049 b'storage',
2045 2050 b'revlog.issue6528.fix-incoming',
2046 2051 default=True,
2047 2052 )
2048 2053 # experimental as long as rust is experimental (or a C version is implemented)
2049 2054 coreconfigitem(
2050 2055 b'storage',
2051 2056 b'revlog.persistent-nodemap.mmap',
2052 2057 default=True,
2053 2058 )
2054 2059 # experimental as long as format.use-persistent-nodemap is.
2055 2060 coreconfigitem(
2056 2061 b'storage',
2057 2062 b'revlog.persistent-nodemap.slow-path',
2058 2063 default=b"abort",
2059 2064 )
2060 2065
2061 2066 coreconfigitem(
2062 2067 b'storage',
2063 2068 b'revlog.reuse-external-delta',
2064 2069 default=True,
2065 2070 )
2066 2071 coreconfigitem(
2067 2072 b'storage',
2068 2073 b'revlog.reuse-external-delta-parent',
2069 2074 default=None,
2070 2075 )
2071 2076 coreconfigitem(
2072 2077 b'storage',
2073 2078 b'revlog.zlib.level',
2074 2079 default=None,
2075 2080 )
2076 2081 coreconfigitem(
2077 2082 b'storage',
2078 2083 b'revlog.zstd.level',
2079 2084 default=None,
2080 2085 )
2081 2086 coreconfigitem(
2082 2087 b'server',
2083 2088 b'bookmarks-pushkey-compat',
2084 2089 default=True,
2085 2090 )
2086 2091 coreconfigitem(
2087 2092 b'server',
2088 2093 b'bundle1',
2089 2094 default=True,
2090 2095 )
2091 2096 coreconfigitem(
2092 2097 b'server',
2093 2098 b'bundle1gd',
2094 2099 default=None,
2095 2100 )
2096 2101 coreconfigitem(
2097 2102 b'server',
2098 2103 b'bundle1.pull',
2099 2104 default=None,
2100 2105 )
2101 2106 coreconfigitem(
2102 2107 b'server',
2103 2108 b'bundle1gd.pull',
2104 2109 default=None,
2105 2110 )
2106 2111 coreconfigitem(
2107 2112 b'server',
2108 2113 b'bundle1.push',
2109 2114 default=None,
2110 2115 )
2111 2116 coreconfigitem(
2112 2117 b'server',
2113 2118 b'bundle1gd.push',
2114 2119 default=None,
2115 2120 )
2116 2121 coreconfigitem(
2117 2122 b'server',
2118 2123 b'bundle2.stream',
2119 2124 default=True,
2120 2125 alias=[(b'experimental', b'bundle2.stream')],
2121 2126 )
2122 2127 coreconfigitem(
2123 2128 b'server',
2124 2129 b'compressionengines',
2125 2130 default=list,
2126 2131 )
2127 2132 coreconfigitem(
2128 2133 b'server',
2129 2134 b'concurrent-push-mode',
2130 2135 default=b'check-related',
2131 2136 )
2132 2137 coreconfigitem(
2133 2138 b'server',
2134 2139 b'disablefullbundle',
2135 2140 default=False,
2136 2141 )
2137 2142 coreconfigitem(
2138 2143 b'server',
2139 2144 b'maxhttpheaderlen',
2140 2145 default=1024,
2141 2146 )
2142 2147 coreconfigitem(
2143 2148 b'server',
2144 2149 b'pullbundle',
2145 2150 default=True,
2146 2151 )
2147 2152 coreconfigitem(
2148 2153 b'server',
2149 2154 b'preferuncompressed',
2150 2155 default=False,
2151 2156 )
2152 2157 coreconfigitem(
2153 2158 b'server',
2154 2159 b'streamunbundle',
2155 2160 default=False,
2156 2161 )
2157 2162 coreconfigitem(
2158 2163 b'server',
2159 2164 b'uncompressed',
2160 2165 default=True,
2161 2166 )
2162 2167 coreconfigitem(
2163 2168 b'server',
2164 2169 b'uncompressedallowsecret',
2165 2170 default=False,
2166 2171 )
2167 2172 coreconfigitem(
2168 2173 b'server',
2169 2174 b'view',
2170 2175 default=b'served',
2171 2176 )
2172 2177 coreconfigitem(
2173 2178 b'server',
2174 2179 b'validate',
2175 2180 default=False,
2176 2181 )
2177 2182 coreconfigitem(
2178 2183 b'server',
2179 2184 b'zliblevel',
2180 2185 default=-1,
2181 2186 )
2182 2187 coreconfigitem(
2183 2188 b'server',
2184 2189 b'zstdlevel',
2185 2190 default=3,
2186 2191 )
2187 2192 coreconfigitem(
2188 2193 b'share',
2189 2194 b'pool',
2190 2195 default=None,
2191 2196 )
2192 2197 coreconfigitem(
2193 2198 b'share',
2194 2199 b'poolnaming',
2195 2200 default=b'identity',
2196 2201 )
2197 2202 coreconfigitem(
2198 2203 b'share',
2199 2204 b'safe-mismatch.source-not-safe',
2200 2205 default=b'abort',
2201 2206 )
2202 2207 coreconfigitem(
2203 2208 b'share',
2204 2209 b'safe-mismatch.source-safe',
2205 2210 default=b'abort',
2206 2211 )
2207 2212 coreconfigitem(
2208 2213 b'share',
2209 2214 b'safe-mismatch.source-not-safe.warn',
2210 2215 default=True,
2211 2216 )
2212 2217 coreconfigitem(
2213 2218 b'share',
2214 2219 b'safe-mismatch.source-safe.warn',
2215 2220 default=True,
2216 2221 )
2217 2222 coreconfigitem(
2218 2223 b'share',
2219 2224 b'safe-mismatch.source-not-safe:verbose-upgrade',
2220 2225 default=True,
2221 2226 )
2222 2227 coreconfigitem(
2223 2228 b'share',
2224 2229 b'safe-mismatch.source-safe:verbose-upgrade',
2225 2230 default=True,
2226 2231 )
2227 2232 coreconfigitem(
2228 2233 b'shelve',
2229 2234 b'maxbackups',
2230 2235 default=10,
2231 2236 )
2232 2237 coreconfigitem(
2233 2238 b'smtp',
2234 2239 b'host',
2235 2240 default=None,
2236 2241 )
2237 2242 coreconfigitem(
2238 2243 b'smtp',
2239 2244 b'local_hostname',
2240 2245 default=None,
2241 2246 )
2242 2247 coreconfigitem(
2243 2248 b'smtp',
2244 2249 b'password',
2245 2250 default=None,
2246 2251 )
2247 2252 coreconfigitem(
2248 2253 b'smtp',
2249 2254 b'port',
2250 2255 default=dynamicdefault,
2251 2256 )
2252 2257 coreconfigitem(
2253 2258 b'smtp',
2254 2259 b'tls',
2255 2260 default=b'none',
2256 2261 )
2257 2262 coreconfigitem(
2258 2263 b'smtp',
2259 2264 b'username',
2260 2265 default=None,
2261 2266 )
2262 2267 coreconfigitem(
2263 2268 b'sparse',
2264 2269 b'missingwarning',
2265 2270 default=True,
2266 2271 experimental=True,
2267 2272 )
2268 2273 coreconfigitem(
2269 2274 b'subrepos',
2270 2275 b'allowed',
2271 2276 default=dynamicdefault, # to make backporting simpler
2272 2277 )
2273 2278 coreconfigitem(
2274 2279 b'subrepos',
2275 2280 b'hg:allowed',
2276 2281 default=dynamicdefault,
2277 2282 )
2278 2283 coreconfigitem(
2279 2284 b'subrepos',
2280 2285 b'git:allowed',
2281 2286 default=dynamicdefault,
2282 2287 )
2283 2288 coreconfigitem(
2284 2289 b'subrepos',
2285 2290 b'svn:allowed',
2286 2291 default=dynamicdefault,
2287 2292 )
2288 2293 coreconfigitem(
2289 2294 b'templates',
2290 2295 b'.*',
2291 2296 default=None,
2292 2297 generic=True,
2293 2298 )
2294 2299 coreconfigitem(
2295 2300 b'templateconfig',
2296 2301 b'.*',
2297 2302 default=dynamicdefault,
2298 2303 generic=True,
2299 2304 )
2300 2305 coreconfigitem(
2301 2306 b'trusted',
2302 2307 b'groups',
2303 2308 default=list,
2304 2309 )
2305 2310 coreconfigitem(
2306 2311 b'trusted',
2307 2312 b'users',
2308 2313 default=list,
2309 2314 )
2310 2315 coreconfigitem(
2311 2316 b'ui',
2312 2317 b'_usedassubrepo',
2313 2318 default=False,
2314 2319 )
2315 2320 coreconfigitem(
2316 2321 b'ui',
2317 2322 b'allowemptycommit',
2318 2323 default=False,
2319 2324 )
2320 2325 coreconfigitem(
2321 2326 b'ui',
2322 2327 b'archivemeta',
2323 2328 default=True,
2324 2329 )
2325 2330 coreconfigitem(
2326 2331 b'ui',
2327 2332 b'askusername',
2328 2333 default=False,
2329 2334 )
2330 2335 coreconfigitem(
2331 2336 b'ui',
2332 2337 b'available-memory',
2333 2338 default=None,
2334 2339 )
2335 2340
2336 2341 coreconfigitem(
2337 2342 b'ui',
2338 2343 b'clonebundlefallback',
2339 2344 default=False,
2340 2345 )
2341 2346 coreconfigitem(
2342 2347 b'ui',
2343 2348 b'clonebundleprefers',
2344 2349 default=list,
2345 2350 )
2346 2351 coreconfigitem(
2347 2352 b'ui',
2348 2353 b'clonebundles',
2349 2354 default=True,
2350 2355 )
2351 2356 coreconfigitem(
2352 2357 b'ui',
2353 2358 b'color',
2354 2359 default=b'auto',
2355 2360 )
2356 2361 coreconfigitem(
2357 2362 b'ui',
2358 2363 b'commitsubrepos',
2359 2364 default=False,
2360 2365 )
2361 2366 coreconfigitem(
2362 2367 b'ui',
2363 2368 b'debug',
2364 2369 default=False,
2365 2370 )
2366 2371 coreconfigitem(
2367 2372 b'ui',
2368 2373 b'debugger',
2369 2374 default=None,
2370 2375 )
2371 2376 coreconfigitem(
2372 2377 b'ui',
2373 2378 b'editor',
2374 2379 default=dynamicdefault,
2375 2380 )
2376 2381 coreconfigitem(
2377 2382 b'ui',
2378 2383 b'detailed-exit-code',
2379 2384 default=False,
2380 2385 experimental=True,
2381 2386 )
2382 2387 coreconfigitem(
2383 2388 b'ui',
2384 2389 b'fallbackencoding',
2385 2390 default=None,
2386 2391 )
2387 2392 coreconfigitem(
2388 2393 b'ui',
2389 2394 b'forcecwd',
2390 2395 default=None,
2391 2396 )
2392 2397 coreconfigitem(
2393 2398 b'ui',
2394 2399 b'forcemerge',
2395 2400 default=None,
2396 2401 )
2397 2402 coreconfigitem(
2398 2403 b'ui',
2399 2404 b'formatdebug',
2400 2405 default=False,
2401 2406 )
2402 2407 coreconfigitem(
2403 2408 b'ui',
2404 2409 b'formatjson',
2405 2410 default=False,
2406 2411 )
2407 2412 coreconfigitem(
2408 2413 b'ui',
2409 2414 b'formatted',
2410 2415 default=None,
2411 2416 )
2412 2417 coreconfigitem(
2413 2418 b'ui',
2414 2419 b'interactive',
2415 2420 default=None,
2416 2421 )
2417 2422 coreconfigitem(
2418 2423 b'ui',
2419 2424 b'interface',
2420 2425 default=None,
2421 2426 )
2422 2427 coreconfigitem(
2423 2428 b'ui',
2424 2429 b'interface.chunkselector',
2425 2430 default=None,
2426 2431 )
2427 2432 coreconfigitem(
2428 2433 b'ui',
2429 2434 b'large-file-limit',
2430 2435 default=10 * (2 ** 20),
2431 2436 )
2432 2437 coreconfigitem(
2433 2438 b'ui',
2434 2439 b'logblockedtimes',
2435 2440 default=False,
2436 2441 )
2437 2442 coreconfigitem(
2438 2443 b'ui',
2439 2444 b'merge',
2440 2445 default=None,
2441 2446 )
2442 2447 coreconfigitem(
2443 2448 b'ui',
2444 2449 b'mergemarkers',
2445 2450 default=b'basic',
2446 2451 )
2447 2452 coreconfigitem(
2448 2453 b'ui',
2449 2454 b'message-output',
2450 2455 default=b'stdio',
2451 2456 )
2452 2457 coreconfigitem(
2453 2458 b'ui',
2454 2459 b'nontty',
2455 2460 default=False,
2456 2461 )
2457 2462 coreconfigitem(
2458 2463 b'ui',
2459 2464 b'origbackuppath',
2460 2465 default=None,
2461 2466 )
2462 2467 coreconfigitem(
2463 2468 b'ui',
2464 2469 b'paginate',
2465 2470 default=True,
2466 2471 )
2467 2472 coreconfigitem(
2468 2473 b'ui',
2469 2474 b'patch',
2470 2475 default=None,
2471 2476 )
2472 2477 coreconfigitem(
2473 2478 b'ui',
2474 2479 b'portablefilenames',
2475 2480 default=b'warn',
2476 2481 )
2477 2482 coreconfigitem(
2478 2483 b'ui',
2479 2484 b'promptecho',
2480 2485 default=False,
2481 2486 )
2482 2487 coreconfigitem(
2483 2488 b'ui',
2484 2489 b'quiet',
2485 2490 default=False,
2486 2491 )
2487 2492 coreconfigitem(
2488 2493 b'ui',
2489 2494 b'quietbookmarkmove',
2490 2495 default=False,
2491 2496 )
2492 2497 coreconfigitem(
2493 2498 b'ui',
2494 2499 b'relative-paths',
2495 2500 default=b'legacy',
2496 2501 )
2497 2502 coreconfigitem(
2498 2503 b'ui',
2499 2504 b'remotecmd',
2500 2505 default=b'hg',
2501 2506 )
2502 2507 coreconfigitem(
2503 2508 b'ui',
2504 2509 b'report_untrusted',
2505 2510 default=True,
2506 2511 )
2507 2512 coreconfigitem(
2508 2513 b'ui',
2509 2514 b'rollback',
2510 2515 default=True,
2511 2516 )
2512 2517 coreconfigitem(
2513 2518 b'ui',
2514 2519 b'signal-safe-lock',
2515 2520 default=True,
2516 2521 )
2517 2522 coreconfigitem(
2518 2523 b'ui',
2519 2524 b'slash',
2520 2525 default=False,
2521 2526 )
2522 2527 coreconfigitem(
2523 2528 b'ui',
2524 2529 b'ssh',
2525 2530 default=b'ssh',
2526 2531 )
2527 2532 coreconfigitem(
2528 2533 b'ui',
2529 2534 b'ssherrorhint',
2530 2535 default=None,
2531 2536 )
2532 2537 coreconfigitem(
2533 2538 b'ui',
2534 2539 b'statuscopies',
2535 2540 default=False,
2536 2541 )
2537 2542 coreconfigitem(
2538 2543 b'ui',
2539 2544 b'strict',
2540 2545 default=False,
2541 2546 )
2542 2547 coreconfigitem(
2543 2548 b'ui',
2544 2549 b'style',
2545 2550 default=b'',
2546 2551 )
2547 2552 coreconfigitem(
2548 2553 b'ui',
2549 2554 b'supportcontact',
2550 2555 default=None,
2551 2556 )
2552 2557 coreconfigitem(
2553 2558 b'ui',
2554 2559 b'textwidth',
2555 2560 default=78,
2556 2561 )
2557 2562 coreconfigitem(
2558 2563 b'ui',
2559 2564 b'timeout',
2560 2565 default=b'600',
2561 2566 )
2562 2567 coreconfigitem(
2563 2568 b'ui',
2564 2569 b'timeout.warn',
2565 2570 default=0,
2566 2571 )
2567 2572 coreconfigitem(
2568 2573 b'ui',
2569 2574 b'timestamp-output',
2570 2575 default=False,
2571 2576 )
2572 2577 coreconfigitem(
2573 2578 b'ui',
2574 2579 b'traceback',
2575 2580 default=False,
2576 2581 )
2577 2582 coreconfigitem(
2578 2583 b'ui',
2579 2584 b'tweakdefaults',
2580 2585 default=False,
2581 2586 )
2582 2587 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2583 2588 coreconfigitem(
2584 2589 b'ui',
2585 2590 b'verbose',
2586 2591 default=False,
2587 2592 )
2588 2593 coreconfigitem(
2589 2594 b'verify',
2590 2595 b'skipflags',
2591 2596 default=0,
2592 2597 )
2593 2598 coreconfigitem(
2594 2599 b'web',
2595 2600 b'allowbz2',
2596 2601 default=False,
2597 2602 )
2598 2603 coreconfigitem(
2599 2604 b'web',
2600 2605 b'allowgz',
2601 2606 default=False,
2602 2607 )
2603 2608 coreconfigitem(
2604 2609 b'web',
2605 2610 b'allow-pull',
2606 2611 alias=[(b'web', b'allowpull')],
2607 2612 default=True,
2608 2613 )
2609 2614 coreconfigitem(
2610 2615 b'web',
2611 2616 b'allow-push',
2612 2617 alias=[(b'web', b'allow_push')],
2613 2618 default=list,
2614 2619 )
2615 2620 coreconfigitem(
2616 2621 b'web',
2617 2622 b'allowzip',
2618 2623 default=False,
2619 2624 )
2620 2625 coreconfigitem(
2621 2626 b'web',
2622 2627 b'archivesubrepos',
2623 2628 default=False,
2624 2629 )
2625 2630 coreconfigitem(
2626 2631 b'web',
2627 2632 b'cache',
2628 2633 default=True,
2629 2634 )
2630 2635 coreconfigitem(
2631 2636 b'web',
2632 2637 b'comparisoncontext',
2633 2638 default=5,
2634 2639 )
2635 2640 coreconfigitem(
2636 2641 b'web',
2637 2642 b'contact',
2638 2643 default=None,
2639 2644 )
2640 2645 coreconfigitem(
2641 2646 b'web',
2642 2647 b'deny_push',
2643 2648 default=list,
2644 2649 )
2645 2650 coreconfigitem(
2646 2651 b'web',
2647 2652 b'guessmime',
2648 2653 default=False,
2649 2654 )
2650 2655 coreconfigitem(
2651 2656 b'web',
2652 2657 b'hidden',
2653 2658 default=False,
2654 2659 )
2655 2660 coreconfigitem(
2656 2661 b'web',
2657 2662 b'labels',
2658 2663 default=list,
2659 2664 )
2660 2665 coreconfigitem(
2661 2666 b'web',
2662 2667 b'logoimg',
2663 2668 default=b'hglogo.png',
2664 2669 )
2665 2670 coreconfigitem(
2666 2671 b'web',
2667 2672 b'logourl',
2668 2673 default=b'https://mercurial-scm.org/',
2669 2674 )
2670 2675 coreconfigitem(
2671 2676 b'web',
2672 2677 b'accesslog',
2673 2678 default=b'-',
2674 2679 )
2675 2680 coreconfigitem(
2676 2681 b'web',
2677 2682 b'address',
2678 2683 default=b'',
2679 2684 )
2680 2685 coreconfigitem(
2681 2686 b'web',
2682 2687 b'allow-archive',
2683 2688 alias=[(b'web', b'allow_archive')],
2684 2689 default=list,
2685 2690 )
2686 2691 coreconfigitem(
2687 2692 b'web',
2688 2693 b'allow_read',
2689 2694 default=list,
2690 2695 )
2691 2696 coreconfigitem(
2692 2697 b'web',
2693 2698 b'baseurl',
2694 2699 default=None,
2695 2700 )
2696 2701 coreconfigitem(
2697 2702 b'web',
2698 2703 b'cacerts',
2699 2704 default=None,
2700 2705 )
2701 2706 coreconfigitem(
2702 2707 b'web',
2703 2708 b'certificate',
2704 2709 default=None,
2705 2710 )
2706 2711 coreconfigitem(
2707 2712 b'web',
2708 2713 b'collapse',
2709 2714 default=False,
2710 2715 )
2711 2716 coreconfigitem(
2712 2717 b'web',
2713 2718 b'csp',
2714 2719 default=None,
2715 2720 )
2716 2721 coreconfigitem(
2717 2722 b'web',
2718 2723 b'deny_read',
2719 2724 default=list,
2720 2725 )
2721 2726 coreconfigitem(
2722 2727 b'web',
2723 2728 b'descend',
2724 2729 default=True,
2725 2730 )
2726 2731 coreconfigitem(
2727 2732 b'web',
2728 2733 b'description',
2729 2734 default=b"",
2730 2735 )
2731 2736 coreconfigitem(
2732 2737 b'web',
2733 2738 b'encoding',
2734 2739 default=lambda: encoding.encoding,
2735 2740 )
2736 2741 coreconfigitem(
2737 2742 b'web',
2738 2743 b'errorlog',
2739 2744 default=b'-',
2740 2745 )
2741 2746 coreconfigitem(
2742 2747 b'web',
2743 2748 b'ipv6',
2744 2749 default=False,
2745 2750 )
2746 2751 coreconfigitem(
2747 2752 b'web',
2748 2753 b'maxchanges',
2749 2754 default=10,
2750 2755 )
2751 2756 coreconfigitem(
2752 2757 b'web',
2753 2758 b'maxfiles',
2754 2759 default=10,
2755 2760 )
2756 2761 coreconfigitem(
2757 2762 b'web',
2758 2763 b'maxshortchanges',
2759 2764 default=60,
2760 2765 )
2761 2766 coreconfigitem(
2762 2767 b'web',
2763 2768 b'motd',
2764 2769 default=b'',
2765 2770 )
2766 2771 coreconfigitem(
2767 2772 b'web',
2768 2773 b'name',
2769 2774 default=dynamicdefault,
2770 2775 )
2771 2776 coreconfigitem(
2772 2777 b'web',
2773 2778 b'port',
2774 2779 default=8000,
2775 2780 )
2776 2781 coreconfigitem(
2777 2782 b'web',
2778 2783 b'prefix',
2779 2784 default=b'',
2780 2785 )
2781 2786 coreconfigitem(
2782 2787 b'web',
2783 2788 b'push_ssl',
2784 2789 default=True,
2785 2790 )
2786 2791 coreconfigitem(
2787 2792 b'web',
2788 2793 b'refreshinterval',
2789 2794 default=20,
2790 2795 )
2791 2796 coreconfigitem(
2792 2797 b'web',
2793 2798 b'server-header',
2794 2799 default=None,
2795 2800 )
2796 2801 coreconfigitem(
2797 2802 b'web',
2798 2803 b'static',
2799 2804 default=None,
2800 2805 )
2801 2806 coreconfigitem(
2802 2807 b'web',
2803 2808 b'staticurl',
2804 2809 default=None,
2805 2810 )
2806 2811 coreconfigitem(
2807 2812 b'web',
2808 2813 b'stripes',
2809 2814 default=1,
2810 2815 )
2811 2816 coreconfigitem(
2812 2817 b'web',
2813 2818 b'style',
2814 2819 default=b'paper',
2815 2820 )
2816 2821 coreconfigitem(
2817 2822 b'web',
2818 2823 b'templates',
2819 2824 default=None,
2820 2825 )
2821 2826 coreconfigitem(
2822 2827 b'web',
2823 2828 b'view',
2824 2829 default=b'served',
2825 2830 experimental=True,
2826 2831 )
2827 2832 coreconfigitem(
2828 2833 b'worker',
2829 2834 b'backgroundclose',
2830 2835 default=dynamicdefault,
2831 2836 )
2832 2837 # Windows defaults to a limit of 512 open files. A buffer of 128
2833 2838 # should give us enough headway.
2834 2839 coreconfigitem(
2835 2840 b'worker',
2836 2841 b'backgroundclosemaxqueue',
2837 2842 default=384,
2838 2843 )
2839 2844 coreconfigitem(
2840 2845 b'worker',
2841 2846 b'backgroundcloseminfilecount',
2842 2847 default=2048,
2843 2848 )
2844 2849 coreconfigitem(
2845 2850 b'worker',
2846 2851 b'backgroundclosethreadcount',
2847 2852 default=4,
2848 2853 )
2849 2854 coreconfigitem(
2850 2855 b'worker',
2851 2856 b'enabled',
2852 2857 default=True,
2853 2858 )
2854 2859 coreconfigitem(
2855 2860 b'worker',
2856 2861 b'numcpus',
2857 2862 default=None,
2858 2863 )
2859 2864
2860 2865 # Rebase related configuration moved to core because other extension are doing
2861 2866 # strange things. For example, shelve import the extensions to reuse some bit
2862 2867 # without formally loading it.
2863 2868 coreconfigitem(
2864 2869 b'commands',
2865 2870 b'rebase.requiredest',
2866 2871 default=False,
2867 2872 )
2868 2873 coreconfigitem(
2869 2874 b'experimental',
2870 2875 b'rebaseskipobsolete',
2871 2876 default=True,
2872 2877 )
2873 2878 coreconfigitem(
2874 2879 b'rebase',
2875 2880 b'singletransaction',
2876 2881 default=False,
2877 2882 )
2878 2883 coreconfigitem(
2879 2884 b'rebase',
2880 2885 b'experimental.inmemory',
2881 2886 default=False,
2882 2887 )
2883 2888
2884 2889 # This setting controls creation of a rebase_source extra field
2885 2890 # during rebase. When False, no such field is created. This is
2886 2891 # useful eg for incrementally converting changesets and then
2887 2892 # rebasing them onto an existing repo.
2888 2893 # WARNING: this is an advanced setting reserved for people who know
2889 2894 # exactly what they are doing. Misuse of this setting can easily
2890 2895 # result in obsmarker cycles and a vivid headache.
2891 2896 coreconfigitem(
2892 2897 b'rebase',
2893 2898 b'store-source',
2894 2899 default=True,
2895 2900 experimental=True,
2896 2901 )
@@ -1,3332 +1,3347 b''
1 1 The Mercurial system uses a set of configuration files to control
2 2 aspects of its behavior.
3 3
4 4 Troubleshooting
5 5 ===============
6 6
7 7 If you're having problems with your configuration,
8 8 :hg:`config --source` can help you understand what is introducing
9 9 a setting into your environment.
10 10
11 11 See :hg:`help config.syntax` and :hg:`help config.files`
12 12 for information about how and where to override things.
13 13
14 14 Structure
15 15 =========
16 16
17 17 The configuration files use a simple ini-file format. A configuration
18 18 file consists of sections, led by a ``[section]`` header and followed
19 19 by ``name = value`` entries::
20 20
21 21 [ui]
22 22 username = Firstname Lastname <firstname.lastname@example.net>
23 23 verbose = True
24 24
25 25 The above entries will be referred to as ``ui.username`` and
26 26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27 27
28 28 Files
29 29 =====
30 30
31 31 Mercurial reads configuration data from several files, if they exist.
32 32 These files do not exist by default and you will have to create the
33 33 appropriate configuration files yourself:
34 34
35 35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36 36
37 37 Global configuration like the username setting is typically put into:
38 38
39 39 .. container:: windows
40 40
41 41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42 42
43 43 .. container:: unix.plan9
44 44
45 45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46 46
47 47 The names of these files depend on the system on which Mercurial is
48 48 installed. ``*.rc`` files from a single directory are read in
49 49 alphabetical order, later ones overriding earlier ones. Where multiple
50 50 paths are given below, settings from earlier paths override later
51 51 ones.
52 52
53 53 .. container:: verbose.unix
54 54
55 55 On Unix, the following files are consulted:
56 56
57 57 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
58 58 - ``<repo>/.hg/hgrc`` (per-repository)
59 59 - ``$HOME/.hgrc`` (per-user)
60 60 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
61 61 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
62 62 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
63 63 - ``/etc/mercurial/hgrc`` (per-system)
64 64 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
65 65 - ``<internal>/*.rc`` (defaults)
66 66
67 67 .. container:: verbose.windows
68 68
69 69 On Windows, the following files are consulted:
70 70
71 71 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
72 72 - ``<repo>/.hg/hgrc`` (per-repository)
73 73 - ``%USERPROFILE%\.hgrc`` (per-user)
74 74 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
75 75 - ``%HOME%\.hgrc`` (per-user)
76 76 - ``%HOME%\Mercurial.ini`` (per-user)
77 77 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
78 78 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
79 79 - ``<install-dir>\Mercurial.ini`` (per-installation)
80 80 - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
81 81 - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
82 82 - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
83 83 - ``<internal>/*.rc`` (defaults)
84 84
85 85 .. note::
86 86
87 87 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
88 88 is used when running 32-bit Python on 64-bit Windows.
89 89
90 90 .. container:: verbose.plan9
91 91
92 92 On Plan9, the following files are consulted:
93 93
94 94 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
95 95 - ``<repo>/.hg/hgrc`` (per-repository)
96 96 - ``$home/lib/hgrc`` (per-user)
97 97 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
98 98 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
99 99 - ``/lib/mercurial/hgrc`` (per-system)
100 100 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
101 101 - ``<internal>/*.rc`` (defaults)
102 102
103 103 Per-repository configuration options only apply in a
104 104 particular repository. This file is not version-controlled, and
105 105 will not get transferred during a "clone" operation. Options in
106 106 this file override options in all other configuration files.
107 107
108 108 .. container:: unix.plan9
109 109
110 110 On Plan 9 and Unix, most of this file will be ignored if it doesn't
111 111 belong to a trusted user or to a trusted group. See
112 112 :hg:`help config.trusted` for more details.
113 113
114 114 Per-user configuration file(s) are for the user running Mercurial. Options
115 115 in these files apply to all Mercurial commands executed by this user in any
116 116 directory. Options in these files override per-system and per-installation
117 117 options.
118 118
119 119 Per-installation configuration files are searched for in the
120 120 directory where Mercurial is installed. ``<install-root>`` is the
121 121 parent directory of the **hg** executable (or symlink) being run.
122 122
123 123 .. container:: unix.plan9
124 124
125 125 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
126 126 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
127 127 files apply to all Mercurial commands executed by any user in any
128 128 directory.
129 129
130 130 Per-installation configuration files are for the system on
131 131 which Mercurial is running. Options in these files apply to all
132 132 Mercurial commands executed by any user in any directory. Registry
133 133 keys contain PATH-like strings, every part of which must reference
134 134 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
135 135 be read. Mercurial checks each of these locations in the specified
136 136 order until one or more configuration files are detected.
137 137
138 138 Per-system configuration files are for the system on which Mercurial
139 139 is running. Options in these files apply to all Mercurial commands
140 140 executed by any user in any directory. Options in these files
141 141 override per-installation options.
142 142
143 143 Mercurial comes with some default configuration. The default configuration
144 144 files are installed with Mercurial and will be overwritten on upgrades. Default
145 145 configuration files should never be edited by users or administrators but can
146 146 be overridden in other configuration files. So far the directory only contains
147 147 merge tool configuration but packagers can also put other default configuration
148 148 there.
149 149
150 150 On versions 5.7 and later, if share-safe functionality is enabled,
151 151 shares will read config file of share source too.
152 152 `<share-source/.hg/hgrc>` is read before reading `<repo/.hg/hgrc>`.
153 153
154 154 For configs which should not be shared, `<repo/.hg/hgrc-not-shared>`
155 155 should be used.
156 156
157 157 Syntax
158 158 ======
159 159
160 160 A configuration file consists of sections, led by a ``[section]`` header
161 161 and followed by ``name = value`` entries (sometimes called
162 162 ``configuration keys``)::
163 163
164 164 [spam]
165 165 eggs=ham
166 166 green=
167 167 eggs
168 168
169 169 Each line contains one entry. If the lines that follow are indented,
170 170 they are treated as continuations of that entry. Leading whitespace is
171 171 removed from values. Empty lines are skipped. Lines beginning with
172 172 ``#`` or ``;`` are ignored and may be used to provide comments.
173 173
174 174 Configuration keys can be set multiple times, in which case Mercurial
175 175 will use the value that was configured last. As an example::
176 176
177 177 [spam]
178 178 eggs=large
179 179 ham=serrano
180 180 eggs=small
181 181
182 182 This would set the configuration key named ``eggs`` to ``small``.
183 183
184 184 It is also possible to define a section multiple times. A section can
185 185 be redefined on the same and/or on different configuration files. For
186 186 example::
187 187
188 188 [foo]
189 189 eggs=large
190 190 ham=serrano
191 191 eggs=small
192 192
193 193 [bar]
194 194 eggs=ham
195 195 green=
196 196 eggs
197 197
198 198 [foo]
199 199 ham=prosciutto
200 200 eggs=medium
201 201 bread=toasted
202 202
203 203 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
204 204 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
205 205 respectively. As you can see there only thing that matters is the last
206 206 value that was set for each of the configuration keys.
207 207
208 208 If a configuration key is set multiple times in different
209 209 configuration files the final value will depend on the order in which
210 210 the different configuration files are read, with settings from earlier
211 211 paths overriding later ones as described on the ``Files`` section
212 212 above.
213 213
214 214 A line of the form ``%include file`` will include ``file`` into the
215 215 current configuration file. The inclusion is recursive, which means
216 216 that included files can include other files. Filenames are relative to
217 217 the configuration file in which the ``%include`` directive is found.
218 218 Environment variables and ``~user`` constructs are expanded in
219 219 ``file``. This lets you do something like::
220 220
221 221 %include ~/.hgrc.d/$HOST.rc
222 222
223 223 to include a different configuration file on each computer you use.
224 224
225 225 A line with ``%unset name`` will remove ``name`` from the current
226 226 section, if it has been set previously.
227 227
228 228 The values are either free-form text strings, lists of text strings,
229 229 or Boolean values. Boolean values can be set to true using any of "1",
230 230 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
231 231 (all case insensitive).
232 232
233 233 List values are separated by whitespace or comma, except when values are
234 234 placed in double quotation marks::
235 235
236 236 allow_read = "John Doe, PhD", brian, betty
237 237
238 238 Quotation marks can be escaped by prefixing them with a backslash. Only
239 239 quotation marks at the beginning of a word is counted as a quotation
240 240 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
241 241
242 242 Sections
243 243 ========
244 244
245 245 This section describes the different sections that may appear in a
246 246 Mercurial configuration file, the purpose of each section, its possible
247 247 keys, and their possible values.
248 248
249 249 ``alias``
250 250 ---------
251 251
252 252 Defines command aliases.
253 253
254 254 Aliases allow you to define your own commands in terms of other
255 255 commands (or aliases), optionally including arguments. Positional
256 256 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
257 257 are expanded by Mercurial before execution. Positional arguments not
258 258 already used by ``$N`` in the definition are put at the end of the
259 259 command to be executed.
260 260
261 261 Alias definitions consist of lines of the form::
262 262
263 263 <alias> = <command> [<argument>]...
264 264
265 265 For example, this definition::
266 266
267 267 latest = log --limit 5
268 268
269 269 creates a new command ``latest`` that shows only the five most recent
270 270 changesets. You can define subsequent aliases using earlier ones::
271 271
272 272 stable5 = latest -b stable
273 273
274 274 .. note::
275 275
276 276 It is possible to create aliases with the same names as
277 277 existing commands, which will then override the original
278 278 definitions. This is almost always a bad idea!
279 279
280 280 An alias can start with an exclamation point (``!``) to make it a
281 281 shell alias. A shell alias is executed with the shell and will let you
282 282 run arbitrary commands. As an example, ::
283 283
284 284 echo = !echo $@
285 285
286 286 will let you do ``hg echo foo`` to have ``foo`` printed in your
287 287 terminal. A better example might be::
288 288
289 289 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
290 290
291 291 which will make ``hg purge`` delete all unknown files in the
292 292 repository in the same manner as the purge extension.
293 293
294 294 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
295 295 expand to the command arguments. Unmatched arguments are
296 296 removed. ``$0`` expands to the alias name and ``$@`` expands to all
297 297 arguments separated by a space. ``"$@"`` (with quotes) expands to all
298 298 arguments quoted individually and separated by a space. These expansions
299 299 happen before the command is passed to the shell.
300 300
301 301 Shell aliases are executed in an environment where ``$HG`` expands to
302 302 the path of the Mercurial that was used to execute the alias. This is
303 303 useful when you want to call further Mercurial commands in a shell
304 304 alias, as was done above for the purge alias. In addition,
305 305 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
306 306 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
307 307
308 308 .. note::
309 309
310 310 Some global configuration options such as ``-R`` are
311 311 processed before shell aliases and will thus not be passed to
312 312 aliases.
313 313
314 314
315 315 ``annotate``
316 316 ------------
317 317
318 318 Settings used when displaying file annotations. All values are
319 319 Booleans and default to False. See :hg:`help config.diff` for
320 320 related options for the diff command.
321 321
322 322 ``ignorews``
323 323 Ignore white space when comparing lines.
324 324
325 325 ``ignorewseol``
326 326 Ignore white space at the end of a line when comparing lines.
327 327
328 328 ``ignorewsamount``
329 329 Ignore changes in the amount of white space.
330 330
331 331 ``ignoreblanklines``
332 332 Ignore changes whose lines are all blank.
333 333
334 334
335 335 ``auth``
336 336 --------
337 337
338 338 Authentication credentials and other authentication-like configuration
339 339 for HTTP connections. This section allows you to store usernames and
340 340 passwords for use when logging *into* HTTP servers. See
341 341 :hg:`help config.web` if you want to configure *who* can login to
342 342 your HTTP server.
343 343
344 344 The following options apply to all hosts.
345 345
346 346 ``cookiefile``
347 347 Path to a file containing HTTP cookie lines. Cookies matching a
348 348 host will be sent automatically.
349 349
350 350 The file format uses the Mozilla cookies.txt format, which defines cookies
351 351 on their own lines. Each line contains 7 fields delimited by the tab
352 352 character (domain, is_domain_cookie, path, is_secure, expires, name,
353 353 value). For more info, do an Internet search for "Netscape cookies.txt
354 354 format."
355 355
356 356 Note: the cookies parser does not handle port numbers on domains. You
357 357 will need to remove ports from the domain for the cookie to be recognized.
358 358 This could result in a cookie being disclosed to an unwanted server.
359 359
360 360 The cookies file is read-only.
361 361
362 362 Other options in this section are grouped by name and have the following
363 363 format::
364 364
365 365 <name>.<argument> = <value>
366 366
367 367 where ``<name>`` is used to group arguments into authentication
368 368 entries. Example::
369 369
370 370 foo.prefix = hg.intevation.de/mercurial
371 371 foo.username = foo
372 372 foo.password = bar
373 373 foo.schemes = http https
374 374
375 375 bar.prefix = secure.example.org
376 376 bar.key = path/to/file.key
377 377 bar.cert = path/to/file.cert
378 378 bar.schemes = https
379 379
380 380 Supported arguments:
381 381
382 382 ``prefix``
383 383 Either ``*`` or a URI prefix with or without the scheme part.
384 384 The authentication entry with the longest matching prefix is used
385 385 (where ``*`` matches everything and counts as a match of length
386 386 1). If the prefix doesn't include a scheme, the match is performed
387 387 against the URI with its scheme stripped as well, and the schemes
388 388 argument, q.v., is then subsequently consulted.
389 389
390 390 ``username``
391 391 Optional. Username to authenticate with. If not given, and the
392 392 remote site requires basic or digest authentication, the user will
393 393 be prompted for it. Environment variables are expanded in the
394 394 username letting you do ``foo.username = $USER``. If the URI
395 395 includes a username, only ``[auth]`` entries with a matching
396 396 username or without a username will be considered.
397 397
398 398 ``password``
399 399 Optional. Password to authenticate with. If not given, and the
400 400 remote site requires basic or digest authentication, the user
401 401 will be prompted for it.
402 402
403 403 ``key``
404 404 Optional. PEM encoded client certificate key file. Environment
405 405 variables are expanded in the filename.
406 406
407 407 ``cert``
408 408 Optional. PEM encoded client certificate chain file. Environment
409 409 variables are expanded in the filename.
410 410
411 411 ``schemes``
412 412 Optional. Space separated list of URI schemes to use this
413 413 authentication entry with. Only used if the prefix doesn't include
414 414 a scheme. Supported schemes are http and https. They will match
415 415 static-http and static-https respectively, as well.
416 416 (default: https)
417 417
418 418 If no suitable authentication entry is found, the user is prompted
419 419 for credentials as usual if required by the remote.
420 420
421 421 ``cmdserver``
422 422 -------------
423 423
424 424 Controls command server settings. (ADVANCED)
425 425
426 426 ``message-encodings``
427 427 List of encodings for the ``m`` (message) channel. The first encoding
428 428 supported by the server will be selected and advertised in the hello
429 429 message. This is useful only when ``ui.message-output`` is set to
430 430 ``channel``. Supported encodings are ``cbor``.
431 431
432 432 ``shutdown-on-interrupt``
433 433 If set to false, the server's main loop will continue running after
434 434 SIGINT received. ``runcommand`` requests can still be interrupted by
435 435 SIGINT. Close the write end of the pipe to shut down the server
436 436 process gracefully.
437 437 (default: True)
438 438
439 439 ``color``
440 440 ---------
441 441
442 442 Configure the Mercurial color mode. For details about how to define your custom
443 443 effect and style see :hg:`help color`.
444 444
445 445 ``mode``
446 446 String: control the method used to output color. One of ``auto``, ``ansi``,
447 447 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
448 448 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
449 449 terminal. Any invalid value will disable color.
450 450
451 451 ``pagermode``
452 452 String: optional override of ``color.mode`` used with pager.
453 453
454 454 On some systems, terminfo mode may cause problems when using
455 455 color with ``less -R`` as a pager program. less with the -R option
456 456 will only display ECMA-48 color codes, and terminfo mode may sometimes
457 457 emit codes that less doesn't understand. You can work around this by
458 458 either using ansi mode (or auto mode), or by using less -r (which will
459 459 pass through all terminal control codes, not just color control
460 460 codes).
461 461
462 462 On some systems (such as MSYS in Windows), the terminal may support
463 463 a different color mode than the pager program.
464 464
465 465 ``commands``
466 466 ------------
467 467
468 468 ``commit.post-status``
469 469 Show status of files in the working directory after successful commit.
470 470 (default: False)
471 471
472 472 ``merge.require-rev``
473 473 Require that the revision to merge the current commit with be specified on
474 474 the command line. If this is enabled and a revision is not specified, the
475 475 command aborts.
476 476 (default: False)
477 477
478 478 ``push.require-revs``
479 479 Require revisions to push be specified using one or more mechanisms such as
480 480 specifying them positionally on the command line, using ``-r``, ``-b``,
481 481 and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
482 482 configuration. If this is enabled and revisions are not specified, the
483 483 command aborts.
484 484 (default: False)
485 485
486 486 ``resolve.confirm``
487 487 Confirm before performing action if no filename is passed.
488 488 (default: False)
489 489
490 490 ``resolve.explicit-re-merge``
491 491 Require uses of ``hg resolve`` to specify which action it should perform,
492 492 instead of re-merging files by default.
493 493 (default: False)
494 494
495 495 ``resolve.mark-check``
496 496 Determines what level of checking :hg:`resolve --mark` will perform before
497 497 marking files as resolved. Valid values are ``none`, ``warn``, and
498 498 ``abort``. ``warn`` will output a warning listing the file(s) that still
499 499 have conflict markers in them, but will still mark everything resolved.
500 500 ``abort`` will output the same warning but will not mark things as resolved.
501 501 If --all is passed and this is set to ``abort``, only a warning will be
502 502 shown (an error will not be raised).
503 503 (default: ``none``)
504 504
505 505 ``status.relative``
506 506 Make paths in :hg:`status` output relative to the current directory.
507 507 (default: False)
508 508
509 509 ``status.terse``
510 510 Default value for the --terse flag, which condenses status output.
511 511 (default: empty)
512 512
513 513 ``update.check``
514 514 Determines what level of checking :hg:`update` will perform before moving
515 515 to a destination revision. Valid values are ``abort``, ``none``,
516 516 ``linear``, and ``noconflict``.
517 517
518 518 - ``abort`` always fails if the working directory has uncommitted changes.
519 519
520 520 - ``none`` performs no checking, and may result in a merge with uncommitted changes.
521 521
522 522 - ``linear`` allows any update as long as it follows a straight line in the
523 523 revision history, and may trigger a merge with uncommitted changes.
524 524
525 525 - ``noconflict`` will allow any update which would not trigger a merge with
526 526 uncommitted changes, if any are present.
527 527
528 528 (default: ``linear``)
529 529
530 530 ``update.requiredest``
531 531 Require that the user pass a destination when running :hg:`update`.
532 532 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
533 533 will be disallowed.
534 534 (default: False)
535 535
536 536 ``committemplate``
537 537 ------------------
538 538
539 539 ``changeset``
540 540 String: configuration in this section is used as the template to
541 541 customize the text shown in the editor when committing.
542 542
543 543 In addition to pre-defined template keywords, commit log specific one
544 544 below can be used for customization:
545 545
546 546 ``extramsg``
547 547 String: Extra message (typically 'Leave message empty to abort
548 548 commit.'). This may be changed by some commands or extensions.
549 549
550 550 For example, the template configuration below shows as same text as
551 551 one shown by default::
552 552
553 553 [committemplate]
554 554 changeset = {desc}\n\n
555 555 HG: Enter commit message. Lines beginning with 'HG:' are removed.
556 556 HG: {extramsg}
557 557 HG: --
558 558 HG: user: {author}\n{ifeq(p2rev, "-1", "",
559 559 "HG: branch merge\n")
560 560 }HG: branch '{branch}'\n{if(activebookmark,
561 561 "HG: bookmark '{activebookmark}'\n") }{subrepos %
562 562 "HG: subrepo {subrepo}\n" }{file_adds %
563 563 "HG: added {file}\n" }{file_mods %
564 564 "HG: changed {file}\n" }{file_dels %
565 565 "HG: removed {file}\n" }{if(files, "",
566 566 "HG: no files changed\n")}
567 567
568 568 ``diff()``
569 569 String: show the diff (see :hg:`help templates` for detail)
570 570
571 571 Sometimes it is helpful to show the diff of the changeset in the editor without
572 572 having to prefix 'HG: ' to each line so that highlighting works correctly. For
573 573 this, Mercurial provides a special string which will ignore everything below
574 574 it::
575 575
576 576 HG: ------------------------ >8 ------------------------
577 577
578 578 For example, the template configuration below will show the diff below the
579 579 extra message::
580 580
581 581 [committemplate]
582 582 changeset = {desc}\n\n
583 583 HG: Enter commit message. Lines beginning with 'HG:' are removed.
584 584 HG: {extramsg}
585 585 HG: ------------------------ >8 ------------------------
586 586 HG: Do not touch the line above.
587 587 HG: Everything below will be removed.
588 588 {diff()}
589 589
590 590 .. note::
591 591
592 592 For some problematic encodings (see :hg:`help win32mbcs` for
593 593 detail), this customization should be configured carefully, to
594 594 avoid showing broken characters.
595 595
596 596 For example, if a multibyte character ending with backslash (0x5c) is
597 597 followed by the ASCII character 'n' in the customized template,
598 598 the sequence of backslash and 'n' is treated as line-feed unexpectedly
599 599 (and the multibyte character is broken, too).
600 600
601 601 Customized template is used for commands below (``--edit`` may be
602 602 required):
603 603
604 604 - :hg:`backout`
605 605 - :hg:`commit`
606 606 - :hg:`fetch` (for merge commit only)
607 607 - :hg:`graft`
608 608 - :hg:`histedit`
609 609 - :hg:`import`
610 610 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
611 611 - :hg:`rebase`
612 612 - :hg:`shelve`
613 613 - :hg:`sign`
614 614 - :hg:`tag`
615 615 - :hg:`transplant`
616 616
617 617 Configuring items below instead of ``changeset`` allows showing
618 618 customized message only for specific actions, or showing different
619 619 messages for each action.
620 620
621 621 - ``changeset.backout`` for :hg:`backout`
622 622 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
623 623 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
624 624 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
625 625 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
626 626 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
627 627 - ``changeset.gpg.sign`` for :hg:`sign`
628 628 - ``changeset.graft`` for :hg:`graft`
629 629 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
630 630 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
631 631 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
632 632 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
633 633 - ``changeset.import.bypass`` for :hg:`import --bypass`
634 634 - ``changeset.import.normal.merge`` for :hg:`import` on merges
635 635 - ``changeset.import.normal.normal`` for :hg:`import` on other
636 636 - ``changeset.mq.qnew`` for :hg:`qnew`
637 637 - ``changeset.mq.qfold`` for :hg:`qfold`
638 638 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
639 639 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
640 640 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
641 641 - ``changeset.rebase.normal`` for :hg:`rebase` on other
642 642 - ``changeset.shelve.shelve`` for :hg:`shelve`
643 643 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
644 644 - ``changeset.tag.remove`` for :hg:`tag --remove`
645 645 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
646 646 - ``changeset.transplant.normal`` for :hg:`transplant` on other
647 647
648 648 These dot-separated lists of names are treated as hierarchical ones.
649 649 For example, ``changeset.tag.remove`` customizes the commit message
650 650 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
651 651 commit message for :hg:`tag` regardless of ``--remove`` option.
652 652
653 653 When the external editor is invoked for a commit, the corresponding
654 654 dot-separated list of names without the ``changeset.`` prefix
655 655 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
656 656 variable.
657 657
658 658 In this section, items other than ``changeset`` can be referred from
659 659 others. For example, the configuration to list committed files up
660 660 below can be referred as ``{listupfiles}``::
661 661
662 662 [committemplate]
663 663 listupfiles = {file_adds %
664 664 "HG: added {file}\n" }{file_mods %
665 665 "HG: changed {file}\n" }{file_dels %
666 666 "HG: removed {file}\n" }{if(files, "",
667 667 "HG: no files changed\n")}
668 668
669 669 ``decode/encode``
670 670 -----------------
671 671
672 672 Filters for transforming files on checkout/checkin. This would
673 673 typically be used for newline processing or other
674 674 localization/canonicalization of files.
675 675
676 676 Filters consist of a filter pattern followed by a filter command.
677 677 Filter patterns are globs by default, rooted at the repository root.
678 678 For example, to match any file ending in ``.txt`` in the root
679 679 directory only, use the pattern ``*.txt``. To match any file ending
680 680 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
681 681 For each file only the first matching filter applies.
682 682
683 683 The filter command can start with a specifier, either ``pipe:`` or
684 684 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
685 685
686 686 A ``pipe:`` command must accept data on stdin and return the transformed
687 687 data on stdout.
688 688
689 689 Pipe example::
690 690
691 691 [encode]
692 692 # uncompress gzip files on checkin to improve delta compression
693 693 # note: not necessarily a good idea, just an example
694 694 *.gz = pipe: gunzip
695 695
696 696 [decode]
697 697 # recompress gzip files when writing them to the working dir (we
698 698 # can safely omit "pipe:", because it's the default)
699 699 *.gz = gzip
700 700
701 701 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
702 702 with the name of a temporary file that contains the data to be
703 703 filtered by the command. The string ``OUTFILE`` is replaced with the name
704 704 of an empty temporary file, where the filtered data must be written by
705 705 the command.
706 706
707 707 .. container:: windows
708 708
709 709 .. note::
710 710
711 711 The tempfile mechanism is recommended for Windows systems,
712 712 where the standard shell I/O redirection operators often have
713 713 strange effects and may corrupt the contents of your files.
714 714
715 715 This filter mechanism is used internally by the ``eol`` extension to
716 716 translate line ending characters between Windows (CRLF) and Unix (LF)
717 717 format. We suggest you use the ``eol`` extension for convenience.
718 718
719 719
720 720 ``defaults``
721 721 ------------
722 722
723 723 (defaults are deprecated. Don't use them. Use aliases instead.)
724 724
725 725 Use the ``[defaults]`` section to define command defaults, i.e. the
726 726 default options/arguments to pass to the specified commands.
727 727
728 728 The following example makes :hg:`log` run in verbose mode, and
729 729 :hg:`status` show only the modified files, by default::
730 730
731 731 [defaults]
732 732 log = -v
733 733 status = -m
734 734
735 735 The actual commands, instead of their aliases, must be used when
736 736 defining command defaults. The command defaults will also be applied
737 737 to the aliases of the commands defined.
738 738
739 739
740 740 ``diff``
741 741 --------
742 742
743 743 Settings used when displaying diffs. Everything except for ``unified``
744 744 is a Boolean and defaults to False. See :hg:`help config.annotate`
745 745 for related options for the annotate command.
746 746
747 747 ``git``
748 748 Use git extended diff format.
749 749
750 750 ``nobinary``
751 751 Omit git binary patches.
752 752
753 753 ``nodates``
754 754 Don't include dates in diff headers.
755 755
756 756 ``noprefix``
757 757 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
758 758
759 759 ``showfunc``
760 760 Show which function each change is in.
761 761
762 762 ``ignorews``
763 763 Ignore white space when comparing lines.
764 764
765 765 ``ignorewsamount``
766 766 Ignore changes in the amount of white space.
767 767
768 768 ``ignoreblanklines``
769 769 Ignore changes whose lines are all blank.
770 770
771 771 ``unified``
772 772 Number of lines of context to show.
773 773
774 774 ``word-diff``
775 775 Highlight changed words.
776 776
777 777 ``email``
778 778 ---------
779 779
780 780 Settings for extensions that send email messages.
781 781
782 782 ``from``
783 783 Optional. Email address to use in "From" header and SMTP envelope
784 784 of outgoing messages.
785 785
786 786 ``to``
787 787 Optional. Comma-separated list of recipients' email addresses.
788 788
789 789 ``cc``
790 790 Optional. Comma-separated list of carbon copy recipients'
791 791 email addresses.
792 792
793 793 ``bcc``
794 794 Optional. Comma-separated list of blind carbon copy recipients'
795 795 email addresses.
796 796
797 797 ``method``
798 798 Optional. Method to use to send email messages. If value is ``smtp``
799 799 (default), use SMTP (see the ``[smtp]`` section for configuration).
800 800 Otherwise, use as name of program to run that acts like sendmail
801 801 (takes ``-f`` option for sender, list of recipients on command line,
802 802 message on stdin). Normally, setting this to ``sendmail`` or
803 803 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
804 804
805 805 ``charsets``
806 806 Optional. Comma-separated list of character sets considered
807 807 convenient for recipients. Addresses, headers, and parts not
808 808 containing patches of outgoing messages will be encoded in the
809 809 first character set to which conversion from local encoding
810 810 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
811 811 conversion fails, the text in question is sent as is.
812 812 (default: '')
813 813
814 814 Order of outgoing email character sets:
815 815
816 816 1. ``us-ascii``: always first, regardless of settings
817 817 2. ``email.charsets``: in order given by user
818 818 3. ``ui.fallbackencoding``: if not in email.charsets
819 819 4. ``$HGENCODING``: if not in email.charsets
820 820 5. ``utf-8``: always last, regardless of settings
821 821
822 822 Email example::
823 823
824 824 [email]
825 825 from = Joseph User <joe.user@example.com>
826 826 method = /usr/sbin/sendmail
827 827 # charsets for western Europeans
828 828 # us-ascii, utf-8 omitted, as they are tried first and last
829 829 charsets = iso-8859-1, iso-8859-15, windows-1252
830 830
831 831
832 832 ``extensions``
833 833 --------------
834 834
835 835 Mercurial has an extension mechanism for adding new features. To
836 836 enable an extension, create an entry for it in this section.
837 837
838 838 If you know that the extension is already in Python's search path,
839 839 you can give the name of the module, followed by ``=``, with nothing
840 840 after the ``=``.
841 841
842 842 Otherwise, give a name that you choose, followed by ``=``, followed by
843 843 the path to the ``.py`` file (including the file name extension) that
844 844 defines the extension.
845 845
846 846 To explicitly disable an extension that is enabled in an hgrc of
847 847 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
848 848 or ``foo = !`` when path is not supplied.
849 849
850 850 Example for ``~/.hgrc``::
851 851
852 852 [extensions]
853 853 # (the churn extension will get loaded from Mercurial's path)
854 854 churn =
855 855 # (this extension will get loaded from the file specified)
856 856 myfeature = ~/.hgext/myfeature.py
857 857
858 858 If an extension fails to load, a warning will be issued, and Mercurial will
859 859 proceed. To enforce that an extension must be loaded, one can set the `required`
860 860 suboption in the config::
861 861
862 862 [extensions]
863 863 myfeature = ~/.hgext/myfeature.py
864 864 myfeature:required = yes
865 865
866 866 To debug extension loading issue, one can add `--traceback` to their mercurial
867 867 invocation.
868 868
869 869 A default setting can we set using the special `*` extension key::
870 870
871 871 [extensions]
872 872 *:required = yes
873 873 myfeature = ~/.hgext/myfeature.py
874 874 rebase=
875 875
876 876
877 877 ``format``
878 878 ----------
879 879
880 880 Configuration that controls the repository format. Newer format options are more
881 881 powerful, but incompatible with some older versions of Mercurial. Format options
882 882 are considered at repository initialization only. You need to make a new clone
883 883 for config changes to be taken into account.
884 884
885 885 For more details about repository format and version compatibility, see
886 886 https://www.mercurial-scm.org/wiki/MissingRequirement
887 887
888 888 ``usegeneraldelta``
889 889 Enable or disable the "generaldelta" repository format which improves
890 890 repository compression by allowing "revlog" to store deltas against
891 891 arbitrary revisions instead of the previously stored one. This provides
892 892 significant improvement for repositories with branches.
893 893
894 894 Repositories with this on-disk format require Mercurial version 1.9.
895 895
896 896 Enabled by default.
897 897
898 898 ``dotencode``
899 899 Enable or disable the "dotencode" repository format which enhances
900 900 the "fncache" repository format (which has to be enabled to use
901 901 dotencode) to avoid issues with filenames starting with "._" on
902 902 Mac OS X and spaces on Windows.
903 903
904 904 Repositories with this on-disk format require Mercurial version 1.7.
905 905
906 906 Enabled by default.
907 907
908 908 ``usefncache``
909 909 Enable or disable the "fncache" repository format which enhances
910 910 the "store" repository format (which has to be enabled to use
911 911 fncache) to allow longer filenames and avoids using Windows
912 912 reserved names, e.g. "nul".
913 913
914 914 Repositories with this on-disk format require Mercurial version 1.1.
915 915
916 916 Enabled by default.
917 917
918 918 ``use-dirstate-v2``
919 919 Enable or disable the experimental "dirstate-v2" feature. The dirstate
920 920 functionality is shared by all commands interacting with the working copy.
921 921 The new version is more robust, faster and stores more information.
922 922
923 923 The performance-improving version of this feature is currently only
924 924 implemented in Rust (see :hg:`help rust`), so people not using a version of
925 925 Mercurial compiled with the Rust parts might actually suffer some slowdown.
926 926 For this reason, such versions will by default refuse to access repositories
927 927 with "dirstate-v2" enabled.
928 928
929 929 This behavior can be adjusted via configuration: check
930 930 :hg:`help config.storage.dirstate-v2.slow-path` for details.
931 931
932 932 Repositories with this on-disk format require Mercurial 6.0 or above.
933 933
934 934 By default this format variant is disabled if the fast implementation is not
935 935 available, and enabled by default if the fast implementation is available.
936 936
937 937 To accomodate installations of Mercurial without the fast implementation,
938 938 you can downgrade your repository. To do so run the following command:
939 939
940 940 $ hg debugupgraderepo \
941 941 --run \
942 942 --config format.use-dirstate-v2=False \
943 943 --config storage.dirstate-v2.slow-path=allow
944 944
945 945 For a more comprehensive guide, see :hg:`help internals.dirstate-v2`.
946 946
947 947 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories``
948 948 When enabled, an automatic upgrade will be triggered when a repository format
949 949 does not match its `use-dirstate-v2` config.
950 950
951 951 This is an advanced behavior that most users will not need. We recommend you
952 952 don't use this unless you are a seasoned administrator of a Mercurial install
953 953 base.
954 954
955 955 Automatic upgrade means that any process accessing the repository will
956 956 upgrade the repository format to use `dirstate-v2`. This only triggers if a
957 957 change is needed. This also applies to operations that would have been
958 958 read-only (like hg status).
959 959
960 960 If the repository cannot be locked, the automatic-upgrade operation will be
961 961 skipped. The next operation will attempt it again.
962 962
963 963 This configuration will apply for moves in any direction, either adding the
964 964 `dirstate-v2` format if `format.use-dirstate-v2=yes` or removing the
965 965 `dirstate-v2` requirement if `format.use-dirstate-v2=no`. So we recommend
966 966 setting both this value and `format.use-dirstate-v2` at the same time.
967 967
968 968 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet``
969 969 Hide message when performing such automatic upgrade.
970 970
971 971 ``use-dirstate-tracked-hint``
972 972 Enable or disable the writing of "tracked key" file alongside the dirstate.
973 973 (default to disabled)
974 974
975 975 That "tracked-hint" can help external automations to detect changes to the
976 976 set of tracked files. (i.e the result of `hg files` or `hg status -macd`)
977 977
978 978 The tracked-hint is written in a new `.hg/dirstate-tracked-hint`. That file
979 979 contains two lines:
980 980 - the first line is the file version (currently: 1),
981 981 - the second line contains the "tracked-hint".
982 982 That file is written right after the dirstate is written.
983 983
984 984 The tracked-hint changes whenever the set of file tracked in the dirstate
985 985 changes. The general idea is:
986 986 - if the hint is identical, the set of tracked file SHOULD be identical,
987 987 - if the hint is different, the set of tracked file MIGHT be different.
988 988
989 989 The "hint is identical" case uses `SHOULD` as the dirstate and the hint file
990 990 are two distinct files and therefore that cannot be read or written to in an
991 991 atomic way. If the key is identical, nothing garantees that the dirstate is
992 992 not updated right after the hint file. This is considered a negligible
993 993 limitation for the intended usecase. It is actually possible to prevent this
994 994 race by taking the repository lock during read operations.
995 995
996 996 They are two "ways" to use this feature:
997 997
998 998 1) monitoring changes to the `.hg/dirstate-tracked-hint`, if the file
999 999 changes, the tracked set might have changed.
1000 1000
1001 1001 2) storing the value and comparing it to a later value.
1002 1002
1003 1003
1004 1004 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories``
1005 1005 When enabled, an automatic upgrade will be triggered when a repository format
1006 1006 does not match its `use-dirstate-tracked-hint` config.
1007 1007
1008 1008 This is an advanced behavior that most users will not need. We recommend you
1009 1009 don't use this unless you are a seasoned administrator of a Mercurial install
1010 1010 base.
1011 1011
1012 1012 Automatic upgrade means that any process accessing the repository will
1013 1013 upgrade the repository format to use `dirstate-tracked-hint`. This only
1014 1014 triggers if a change is needed. This also applies to operations that would
1015 1015 have been read-only (like hg status).
1016 1016
1017 1017 If the repository cannot be locked, the automatic-upgrade operation will be
1018 1018 skipped. The next operation will attempt it again.
1019 1019
1020 1020 This configuration will apply for moves in any direction, either adding the
1021 1021 `dirstate-tracked-hint` format if `format.use-dirstate-tracked-hint=yes` or
1022 1022 removing the `dirstate-tracked-hint` requirement if
1023 1023 `format.use-dirstate-tracked-hint=no`. So we recommend setting both this
1024 1024 value and `format.use-dirstate-tracked-hint` at the same time.
1025 1025
1026 1026
1027 1027 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet``
1028 1028 Hide message when performing such automatic upgrade.
1029 1029
1030 1030
1031 1031 ``use-persistent-nodemap``
1032 1032 Enable or disable the "persistent-nodemap" feature which improves
1033 1033 performance if the Rust extensions are available.
1034 1034
1035 1035 The "persistent-nodemap" persist the "node -> rev" on disk removing the
1036 1036 need to dynamically build that mapping for each Mercurial invocation. This
1037 1037 significantly reduces the startup cost of various local and server-side
1038 1038 operation for larger repositories.
1039 1039
1040 1040 The performance-improving version of this feature is currently only
1041 1041 implemented in Rust (see :hg:`help rust`), so people not using a version of
1042 1042 Mercurial compiled with the Rust parts might actually suffer some slowdown.
1043 1043 For this reason, such versions will by default refuse to access repositories
1044 1044 with "persistent-nodemap".
1045 1045
1046 1046 This behavior can be adjusted via configuration: check
1047 1047 :hg:`help config.storage.revlog.persistent-nodemap.slow-path` for details.
1048 1048
1049 1049 Repositories with this on-disk format require Mercurial 5.4 or above.
1050 1050
1051 1051 By default this format variant is disabled if the fast implementation is not
1052 1052 available, and enabled by default if the fast implementation is available.
1053 1053
1054 1054 To accomodate installations of Mercurial without the fast implementation,
1055 1055 you can downgrade your repository. To do so run the following command:
1056 1056
1057 1057 $ hg debugupgraderepo \
1058 1058 --run \
1059 1059 --config format.use-persistent-nodemap=False \
1060 1060 --config storage.revlog.persistent-nodemap.slow-path=allow
1061 1061
1062 1062 ``use-share-safe``
1063 1063 Enforce "safe" behaviors for all "shares" that access this repository.
1064 1064
1065 1065 With this feature, "shares" using this repository as a source will:
1066 1066
1067 1067 * read the source repository's configuration (`<source>/.hg/hgrc`).
1068 1068 * read and use the source repository's "requirements"
1069 1069 (except the working copy specific one).
1070 1070
1071 1071 Without this feature, "shares" using this repository as a source will:
1072 1072
1073 1073 * keep tracking the repository "requirements" in the share only, ignoring
1074 1074 the source "requirements", possibly diverging from them.
1075 1075 * ignore source repository config. This can create problems, like silently
1076 1076 ignoring important hooks.
1077 1077
1078 1078 Beware that existing shares will not be upgraded/downgraded, and by
1079 1079 default, Mercurial will refuse to interact with them until the mismatch
1080 1080 is resolved. See :hg:`help config.share.safe-mismatch.source-safe` and
1081 1081 :hg:`help config.share.safe-mismatch.source-not-safe` for details.
1082 1082
1083 1083 Introduced in Mercurial 5.7.
1084 1084
1085 1085 Enabled by default in Mercurial 6.1.
1086 1086
1087 1087 ``use-share-safe.automatic-upgrade-of-mismatching-repositories``
1088 1088 When enabled, an automatic upgrade will be triggered when a repository format
1089 1089 does not match its `use-share-safe` config.
1090 1090
1091 1091 This is an advanced behavior that most users will not need. We recommend you
1092 1092 don't use this unless you are a seasoned administrator of a Mercurial install
1093 1093 base.
1094 1094
1095 1095 Automatic upgrade means that any process accessing the repository will
1096 1096 upgrade the repository format to use `share-safe`. This only triggers if a
1097 1097 change is needed. This also applies to operation that would have been
1098 1098 read-only (like hg status).
1099 1099
1100 1100 If the repository cannot be locked, the automatic-upgrade operation will be
1101 1101 skipped. The next operation will attempt it again.
1102 1102
1103 1103 This configuration will apply for moves in any direction, either adding the
1104 1104 `share-safe` format if `format.use-share-safe=yes` or removing the
1105 1105 `share-safe` requirement if `format.use-share-safe=no`. So we recommend
1106 1106 setting both this value and `format.use-share-safe` at the same time.
1107 1107
1108 1108 ``use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet``
1109 1109 Hide message when performing such automatic upgrade.
1110 1110
1111 1111 ``usestore``
1112 1112 Enable or disable the "store" repository format which improves
1113 1113 compatibility with systems that fold case or otherwise mangle
1114 1114 filenames. Disabling this option will allow you to store longer filenames
1115 1115 in some situations at the expense of compatibility.
1116 1116
1117 1117 Repositories with this on-disk format require Mercurial version 0.9.4.
1118 1118
1119 1119 Enabled by default.
1120 1120
1121 1121 ``sparse-revlog``
1122 1122 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
1123 1123 delta re-use inside revlog. For very branchy repositories, it results in a
1124 1124 smaller store. For repositories with many revisions, it also helps
1125 1125 performance (by using shortened delta chains.)
1126 1126
1127 1127 Repositories with this on-disk format require Mercurial version 4.7
1128 1128
1129 1129 Enabled by default.
1130 1130
1131 1131 ``revlog-compression``
1132 1132 Compression algorithm used by revlog. Supported values are `zlib` and
1133 1133 `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
1134 1134 a newer format that is usually a net win over `zlib`, operating faster at
1135 1135 better compression rates. Use `zstd` to reduce CPU usage. Multiple values
1136 1136 can be specified, the first available one will be used.
1137 1137
1138 1138 On some systems, the Mercurial installation may lack `zstd` support.
1139 1139
1140 1140 Default is `zstd` if available, `zlib` otherwise.
1141 1141
1142 1142 ``bookmarks-in-store``
1143 1143 Store bookmarks in .hg/store/. This means that bookmarks are shared when
1144 1144 using `hg share` regardless of the `-B` option.
1145 1145
1146 1146 Repositories with this on-disk format require Mercurial version 5.1.
1147 1147
1148 1148 Disabled by default.
1149 1149
1150 1150
1151 1151 ``graph``
1152 1152 ---------
1153 1153
1154 1154 Web graph view configuration. This section let you change graph
1155 1155 elements display properties by branches, for instance to make the
1156 1156 ``default`` branch stand out.
1157 1157
1158 1158 Each line has the following format::
1159 1159
1160 1160 <branch>.<argument> = <value>
1161 1161
1162 1162 where ``<branch>`` is the name of the branch being
1163 1163 customized. Example::
1164 1164
1165 1165 [graph]
1166 1166 # 2px width
1167 1167 default.width = 2
1168 1168 # red color
1169 1169 default.color = FF0000
1170 1170
1171 1171 Supported arguments:
1172 1172
1173 1173 ``width``
1174 1174 Set branch edges width in pixels.
1175 1175
1176 1176 ``color``
1177 1177 Set branch edges color in hexadecimal RGB notation.
1178 1178
1179 1179 ``hooks``
1180 1180 ---------
1181 1181
1182 1182 Commands or Python functions that get automatically executed by
1183 1183 various actions such as starting or finishing a commit. Multiple
1184 1184 hooks can be run for the same action by appending a suffix to the
1185 1185 action. Overriding a site-wide hook can be done by changing its
1186 1186 value or setting it to an empty string. Hooks can be prioritized
1187 1187 by adding a prefix of ``priority.`` to the hook name on a new line
1188 1188 and setting the priority. The default priority is 0.
1189 1189
1190 1190 Example ``.hg/hgrc``::
1191 1191
1192 1192 [hooks]
1193 1193 # update working directory after adding changesets
1194 1194 changegroup.update = hg update
1195 1195 # do not use the site-wide hook
1196 1196 incoming =
1197 1197 incoming.email = /my/email/hook
1198 1198 incoming.autobuild = /my/build/hook
1199 1199 # force autobuild hook to run before other incoming hooks
1200 1200 priority.incoming.autobuild = 1
1201 1201 ### control HGPLAIN setting when running autobuild hook
1202 1202 # HGPLAIN always set (default from Mercurial 5.7)
1203 1203 incoming.autobuild:run-with-plain = yes
1204 1204 # HGPLAIN never set
1205 1205 incoming.autobuild:run-with-plain = no
1206 1206 # HGPLAIN inherited from environment (default before Mercurial 5.7)
1207 1207 incoming.autobuild:run-with-plain = auto
1208 1208
1209 1209 Most hooks are run with environment variables set that give useful
1210 1210 additional information. For each hook below, the environment variables
1211 1211 it is passed are listed with names in the form ``$HG_foo``. The
1212 1212 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
1213 1213 They contain the type of hook which triggered the run and the full name
1214 1214 of the hook in the config, respectively. In the example above, this will
1215 1215 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
1216 1216
1217 1217 .. container:: windows
1218 1218
1219 1219 Some basic Unix syntax can be enabled for portability, including ``$VAR``
1220 1220 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
1221 1221 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
1222 1222 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
1223 1223 slash or inside of a strong quote. Strong quotes will be replaced by
1224 1224 double quotes after processing.
1225 1225
1226 1226 This feature is enabled by adding a prefix of ``tonative.`` to the hook
1227 1227 name on a new line, and setting it to ``True``. For example::
1228 1228
1229 1229 [hooks]
1230 1230 incoming.autobuild = /my/build/hook
1231 1231 # enable translation to cmd.exe syntax for autobuild hook
1232 1232 tonative.incoming.autobuild = True
1233 1233
1234 1234 ``changegroup``
1235 1235 Run after a changegroup has been added via push, pull or unbundle. The ID of
1236 1236 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
1237 1237 The URL from which changes came is in ``$HG_URL``.
1238 1238
1239 1239 ``commit``
1240 1240 Run after a changeset has been created in the local repository. The ID
1241 1241 of the newly created changeset is in ``$HG_NODE``. Parent changeset
1242 1242 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1243 1243
1244 1244 ``incoming``
1245 1245 Run after a changeset has been pulled, pushed, or unbundled into
1246 1246 the local repository. The ID of the newly arrived changeset is in
1247 1247 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
1248 1248
1249 1249 ``outgoing``
1250 1250 Run after sending changes from the local repository to another. The ID of
1251 1251 first changeset sent is in ``$HG_NODE``. The source of operation is in
1252 1252 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
1253 1253
1254 1254 ``post-<command>``
1255 1255 Run after successful invocations of the associated command. The
1256 1256 contents of the command line are passed as ``$HG_ARGS`` and the result
1257 1257 code in ``$HG_RESULT``. Parsed command line arguments are passed as
1258 1258 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
1259 1259 the python data internally passed to <command>. ``$HG_OPTS`` is a
1260 1260 dictionary of options (with unspecified options set to their defaults).
1261 1261 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
1262 1262
1263 1263 ``fail-<command>``
1264 1264 Run after a failed invocation of an associated command. The contents
1265 1265 of the command line are passed as ``$HG_ARGS``. Parsed command line
1266 1266 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
1267 1267 string representations of the python data internally passed to
1268 1268 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
1269 1269 options set to their defaults). ``$HG_PATS`` is a list of arguments.
1270 1270 Hook failure is ignored.
1271 1271
1272 1272 ``pre-<command>``
1273 1273 Run before executing the associated command. The contents of the
1274 1274 command line are passed as ``$HG_ARGS``. Parsed command line arguments
1275 1275 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
1276 1276 representations of the data internally passed to <command>. ``$HG_OPTS``
1277 1277 is a dictionary of options (with unspecified options set to their
1278 1278 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
1279 1279 failure, the command doesn't execute and Mercurial returns the failure
1280 1280 code.
1281 1281
1282 1282 ``prechangegroup``
1283 1283 Run before a changegroup is added via push, pull or unbundle. Exit
1284 1284 status 0 allows the changegroup to proceed. A non-zero status will
1285 1285 cause the push, pull or unbundle to fail. The URL from which changes
1286 1286 will come is in ``$HG_URL``.
1287 1287
1288 1288 ``precommit``
1289 1289 Run before starting a local commit. Exit status 0 allows the
1290 1290 commit to proceed. A non-zero status will cause the commit to fail.
1291 1291 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1292 1292
1293 1293 ``prelistkeys``
1294 1294 Run before listing pushkeys (like bookmarks) in the
1295 1295 repository. A non-zero status will cause failure. The key namespace is
1296 1296 in ``$HG_NAMESPACE``.
1297 1297
1298 1298 ``preoutgoing``
1299 1299 Run before collecting changes to send from the local repository to
1300 1300 another. A non-zero status will cause failure. This lets you prevent
1301 1301 pull over HTTP or SSH. It can also prevent propagating commits (via
1302 1302 local pull, push (outbound) or bundle commands), but not completely,
1303 1303 since you can just copy files instead. The source of operation is in
1304 1304 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1305 1305 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1306 1306 is happening on behalf of a repository on same system.
1307 1307
1308 1308 ``prepushkey``
1309 1309 Run before a pushkey (like a bookmark) is added to the
1310 1310 repository. A non-zero status will cause the key to be rejected. The
1311 1311 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1312 1312 the old value (if any) is in ``$HG_OLD``, and the new value is in
1313 1313 ``$HG_NEW``.
1314 1314
1315 1315 ``pretag``
1316 1316 Run before creating a tag. Exit status 0 allows the tag to be
1317 1317 created. A non-zero status will cause the tag to fail. The ID of the
1318 1318 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1319 1319 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1320 1320
1321 1321 ``pretxnopen``
1322 1322 Run before any new repository transaction is open. The reason for the
1323 1323 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1324 1324 transaction will be in ``$HG_TXNID``. A non-zero status will prevent the
1325 1325 transaction from being opened.
1326 1326
1327 1327 ``pretxnclose``
1328 1328 Run right before the transaction is actually finalized. Any repository change
1329 1329 will be visible to the hook program. This lets you validate the transaction
1330 1330 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1331 1331 status will cause the transaction to be rolled back. The reason for the
1332 1332 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1333 1333 the transaction will be in ``$HG_TXNID``. The rest of the available data will
1334 1334 vary according the transaction type. Changes unbundled to the repository will
1335 1335 add ``$HG_URL`` and ``$HG_SOURCE``. New changesets will add ``$HG_NODE`` (the
1336 1336 ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last added
1337 1337 changeset). Bookmark and phase changes will set ``$HG_BOOKMARK_MOVED`` and
1338 1338 ``$HG_PHASES_MOVED`` to ``1`` respectively. The number of new obsmarkers, if
1339 1339 any, will be in ``$HG_NEW_OBSMARKERS``, etc.
1340 1340
1341 1341 ``pretxnclose-bookmark``
1342 1342 Run right before a bookmark change is actually finalized. Any repository
1343 1343 change will be visible to the hook program. This lets you validate the
1344 1344 transaction content or change it. Exit status 0 allows the commit to
1345 1345 proceed. A non-zero status will cause the transaction to be rolled back.
1346 1346 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1347 1347 bookmark location will be available in ``$HG_NODE`` while the previous
1348 1348 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1349 1349 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1350 1350 will be empty.
1351 1351 In addition, the reason for the transaction opening will be in
1352 1352 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1353 1353 ``$HG_TXNID``.
1354 1354
1355 1355 ``pretxnclose-phase``
1356 1356 Run right before a phase change is actually finalized. Any repository change
1357 1357 will be visible to the hook program. This lets you validate the transaction
1358 1358 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1359 1359 status will cause the transaction to be rolled back. The hook is called
1360 1360 multiple times, once for each revision affected by a phase change.
1361 1361 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1362 1362 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1363 1363 will be empty. In addition, the reason for the transaction opening will be in
1364 1364 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1365 1365 ``$HG_TXNID``. The hook is also run for newly added revisions. In this case
1366 1366 the ``$HG_OLDPHASE`` entry will be empty.
1367 1367
1368 1368 ``txnclose``
1369 1369 Run after any repository transaction has been committed. At this
1370 1370 point, the transaction can no longer be rolled back. The hook will run
1371 1371 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1372 1372 details about available variables.
1373 1373
1374 1374 ``txnclose-bookmark``
1375 1375 Run after any bookmark change has been committed. At this point, the
1376 1376 transaction can no longer be rolled back. The hook will run after the lock
1377 1377 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1378 1378 about available variables.
1379 1379
1380 1380 ``txnclose-phase``
1381 1381 Run after any phase change has been committed. At this point, the
1382 1382 transaction can no longer be rolled back. The hook will run after the lock
1383 1383 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1384 1384 available variables.
1385 1385
1386 1386 ``txnabort``
1387 1387 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1388 1388 for details about available variables.
1389 1389
1390 1390 ``pretxnchangegroup``
1391 1391 Run after a changegroup has been added via push, pull or unbundle, but before
1392 1392 the transaction has been committed. The changegroup is visible to the hook
1393 1393 program. This allows validation of incoming changes before accepting them.
1394 1394 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1395 1395 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1396 1396 status will cause the transaction to be rolled back, and the push, pull or
1397 1397 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1398 1398
1399 1399 ``pretxncommit``
1400 1400 Run after a changeset has been created, but before the transaction is
1401 1401 committed. The changeset is visible to the hook program. This allows
1402 1402 validation of the commit message and changes. Exit status 0 allows the
1403 1403 commit to proceed. A non-zero status will cause the transaction to
1404 1404 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1405 1405 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1406 1406
1407 1407 ``preupdate``
1408 1408 Run before updating the working directory. Exit status 0 allows
1409 1409 the update to proceed. A non-zero status will prevent the update.
1410 1410 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1411 1411 merge, the ID of second new parent is in ``$HG_PARENT2``.
1412 1412
1413 1413 ``listkeys``
1414 1414 Run after listing pushkeys (like bookmarks) in the repository. The
1415 1415 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1416 1416 dictionary containing the keys and values.
1417 1417
1418 1418 ``pushkey``
1419 1419 Run after a pushkey (like a bookmark) is added to the
1420 1420 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1421 1421 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1422 1422 value is in ``$HG_NEW``.
1423 1423
1424 1424 ``tag``
1425 1425 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1426 1426 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1427 1427 the repository if ``$HG_LOCAL=0``.
1428 1428
1429 1429 ``update``
1430 1430 Run after updating the working directory. The changeset ID of first
1431 1431 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1432 1432 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1433 1433 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1434 1434
1435 1435 .. note::
1436 1436
1437 1437 It is generally better to use standard hooks rather than the
1438 1438 generic pre- and post- command hooks, as they are guaranteed to be
1439 1439 called in the appropriate contexts for influencing transactions.
1440 1440 Also, hooks like "commit" will be called in all contexts that
1441 1441 generate a commit (e.g. tag) and not just the commit command.
1442 1442
1443 1443 .. note::
1444 1444
1445 1445 Environment variables with empty values may not be passed to
1446 1446 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1447 1447 will have an empty value under Unix-like platforms for non-merge
1448 1448 changesets, while it will not be available at all under Windows.
1449 1449
1450 1450 The syntax for Python hooks is as follows::
1451 1451
1452 1452 hookname = python:modulename.submodule.callable
1453 1453 hookname = python:/path/to/python/module.py:callable
1454 1454
1455 1455 Python hooks are run within the Mercurial process. Each hook is
1456 1456 called with at least three keyword arguments: a ui object (keyword
1457 1457 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1458 1458 keyword that tells what kind of hook is used. Arguments listed as
1459 1459 environment variables above are passed as keyword arguments, with no
1460 1460 ``HG_`` prefix, and names in lower case.
1461 1461
1462 1462 If a Python hook returns a "true" value or raises an exception, this
1463 1463 is treated as a failure.
1464 1464
1465 1465
1466 1466 ``hostfingerprints``
1467 1467 --------------------
1468 1468
1469 1469 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1470 1470
1471 1471 Fingerprints of the certificates of known HTTPS servers.
1472 1472
1473 1473 A HTTPS connection to a server with a fingerprint configured here will
1474 1474 only succeed if the servers certificate matches the fingerprint.
1475 1475 This is very similar to how ssh known hosts works.
1476 1476
1477 1477 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1478 1478 Multiple values can be specified (separated by spaces or commas). This can
1479 1479 be used to define both old and new fingerprints while a host transitions
1480 1480 to a new certificate.
1481 1481
1482 1482 The CA chain and web.cacerts is not used for servers with a fingerprint.
1483 1483
1484 1484 For example::
1485 1485
1486 1486 [hostfingerprints]
1487 1487 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1488 1488 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1489 1489
1490 1490 ``hostsecurity``
1491 1491 ----------------
1492 1492
1493 1493 Used to specify global and per-host security settings for connecting to
1494 1494 other machines.
1495 1495
1496 1496 The following options control default behavior for all hosts.
1497 1497
1498 1498 ``ciphers``
1499 1499 Defines the cryptographic ciphers to use for connections.
1500 1500
1501 1501 Value must be a valid OpenSSL Cipher List Format as documented at
1502 1502 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1503 1503
1504 1504 This setting is for advanced users only. Setting to incorrect values
1505 1505 can significantly lower connection security or decrease performance.
1506 1506 You have been warned.
1507 1507
1508 1508 This option requires Python 2.7.
1509 1509
1510 1510 ``minimumprotocol``
1511 1511 Defines the minimum channel encryption protocol to use.
1512 1512
1513 1513 By default, the highest version of TLS supported by both client and server
1514 1514 is used.
1515 1515
1516 1516 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1517 1517
1518 1518 When running on an old Python version, only ``tls1.0`` is allowed since
1519 1519 old versions of Python only support up to TLS 1.0.
1520 1520
1521 1521 When running a Python that supports modern TLS versions, the default is
1522 1522 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1523 1523 weakens security and should only be used as a feature of last resort if
1524 1524 a server does not support TLS 1.1+.
1525 1525
1526 1526 Options in the ``[hostsecurity]`` section can have the form
1527 1527 ``hostname``:``setting``. This allows multiple settings to be defined on a
1528 1528 per-host basis.
1529 1529
1530 1530 The following per-host settings can be defined.
1531 1531
1532 1532 ``ciphers``
1533 1533 This behaves like ``ciphers`` as described above except it only applies
1534 1534 to the host on which it is defined.
1535 1535
1536 1536 ``fingerprints``
1537 1537 A list of hashes of the DER encoded peer/remote certificate. Values have
1538 1538 the form ``algorithm``:``fingerprint``. e.g.
1539 1539 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1540 1540 In addition, colons (``:``) can appear in the fingerprint part.
1541 1541
1542 1542 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1543 1543 ``sha512``.
1544 1544
1545 1545 Use of ``sha256`` or ``sha512`` is preferred.
1546 1546
1547 1547 If a fingerprint is specified, the CA chain is not validated for this
1548 1548 host and Mercurial will require the remote certificate to match one
1549 1549 of the fingerprints specified. This means if the server updates its
1550 1550 certificate, Mercurial will abort until a new fingerprint is defined.
1551 1551 This can provide stronger security than traditional CA-based validation
1552 1552 at the expense of convenience.
1553 1553
1554 1554 This option takes precedence over ``verifycertsfile``.
1555 1555
1556 1556 ``minimumprotocol``
1557 1557 This behaves like ``minimumprotocol`` as described above except it
1558 1558 only applies to the host on which it is defined.
1559 1559
1560 1560 ``verifycertsfile``
1561 1561 Path to file a containing a list of PEM encoded certificates used to
1562 1562 verify the server certificate. Environment variables and ``~user``
1563 1563 constructs are expanded in the filename.
1564 1564
1565 1565 The server certificate or the certificate's certificate authority (CA)
1566 1566 must match a certificate from this file or certificate verification
1567 1567 will fail and connections to the server will be refused.
1568 1568
1569 1569 If defined, only certificates provided by this file will be used:
1570 1570 ``web.cacerts`` and any system/default certificates will not be
1571 1571 used.
1572 1572
1573 1573 This option has no effect if the per-host ``fingerprints`` option
1574 1574 is set.
1575 1575
1576 1576 The format of the file is as follows::
1577 1577
1578 1578 -----BEGIN CERTIFICATE-----
1579 1579 ... (certificate in base64 PEM encoding) ...
1580 1580 -----END CERTIFICATE-----
1581 1581 -----BEGIN CERTIFICATE-----
1582 1582 ... (certificate in base64 PEM encoding) ...
1583 1583 -----END CERTIFICATE-----
1584 1584
1585 1585 For example::
1586 1586
1587 1587 [hostsecurity]
1588 1588 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1589 1589 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1590 1590 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1591 1591 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1592 1592
1593 1593 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1594 1594 when connecting to ``hg.example.com``::
1595 1595
1596 1596 [hostsecurity]
1597 1597 minimumprotocol = tls1.2
1598 1598 hg.example.com:minimumprotocol = tls1.1
1599 1599
1600 1600 ``http_proxy``
1601 1601 --------------
1602 1602
1603 1603 Used to access web-based Mercurial repositories through a HTTP
1604 1604 proxy.
1605 1605
1606 1606 ``host``
1607 1607 Host name and (optional) port of the proxy server, for example
1608 1608 "myproxy:8000".
1609 1609
1610 1610 ``no``
1611 1611 Optional. Comma-separated list of host names that should bypass
1612 1612 the proxy.
1613 1613
1614 1614 ``passwd``
1615 1615 Optional. Password to authenticate with at the proxy server.
1616 1616
1617 1617 ``user``
1618 1618 Optional. User name to authenticate with at the proxy server.
1619 1619
1620 1620 ``always``
1621 1621 Optional. Always use the proxy, even for localhost and any entries
1622 1622 in ``http_proxy.no``. (default: False)
1623 1623
1624 1624 ``http``
1625 1625 ----------
1626 1626
1627 1627 Used to configure access to Mercurial repositories via HTTP.
1628 1628
1629 1629 ``timeout``
1630 1630 If set, blocking operations will timeout after that many seconds.
1631 1631 (default: None)
1632 1632
1633 1633 ``merge``
1634 1634 ---------
1635 1635
1636 1636 This section specifies behavior during merges and updates.
1637 1637
1638 1638 ``checkignored``
1639 1639 Controls behavior when an ignored file on disk has the same name as a tracked
1640 1640 file in the changeset being merged or updated to, and has different
1641 1641 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1642 1642 abort on such files. With ``warn``, warn on such files and back them up as
1643 1643 ``.orig``. With ``ignore``, don't print a warning and back them up as
1644 1644 ``.orig``. (default: ``abort``)
1645 1645
1646 1646 ``checkunknown``
1647 1647 Controls behavior when an unknown file that isn't ignored has the same name
1648 1648 as a tracked file in the changeset being merged or updated to, and has
1649 1649 different contents. Similar to ``merge.checkignored``, except for files that
1650 1650 are not ignored. (default: ``abort``)
1651 1651
1652 1652 ``on-failure``
1653 1653 When set to ``continue`` (the default), the merge process attempts to
1654 1654 merge all unresolved files using the merge chosen tool, regardless of
1655 1655 whether previous file merge attempts during the process succeeded or not.
1656 1656 Setting this to ``prompt`` will prompt after any merge failure continue
1657 1657 or halt the merge process. Setting this to ``halt`` will automatically
1658 1658 halt the merge process on any merge tool failure. The merge process
1659 1659 can be restarted by using the ``resolve`` command. When a merge is
1660 1660 halted, the repository is left in a normal ``unresolved`` merge state.
1661 1661 (default: ``continue``)
1662 1662
1663 1663 ``strict-capability-check``
1664 1664 Whether capabilities of internal merge tools are checked strictly
1665 1665 or not, while examining rules to decide merge tool to be used.
1666 1666 (default: False)
1667 1667
1668 1668 ``merge-patterns``
1669 1669 ------------------
1670 1670
1671 1671 This section specifies merge tools to associate with particular file
1672 1672 patterns. Tools matched here will take precedence over the default
1673 1673 merge tool. Patterns are globs by default, rooted at the repository
1674 1674 root.
1675 1675
1676 1676 Example::
1677 1677
1678 1678 [merge-patterns]
1679 1679 **.c = kdiff3
1680 1680 **.jpg = myimgmerge
1681 1681
1682 1682 ``merge-tools``
1683 1683 ---------------
1684 1684
1685 1685 This section configures external merge tools to use for file-level
1686 1686 merges. This section has likely been preconfigured at install time.
1687 1687 Use :hg:`config merge-tools` to check the existing configuration.
1688 1688 Also see :hg:`help merge-tools` for more details.
1689 1689
1690 1690 Example ``~/.hgrc``::
1691 1691
1692 1692 [merge-tools]
1693 1693 # Override stock tool location
1694 1694 kdiff3.executable = ~/bin/kdiff3
1695 1695 # Specify command line
1696 1696 kdiff3.args = $base $local $other -o $output
1697 1697 # Give higher priority
1698 1698 kdiff3.priority = 1
1699 1699
1700 1700 # Changing the priority of preconfigured tool
1701 1701 meld.priority = 0
1702 1702
1703 1703 # Disable a preconfigured tool
1704 1704 vimdiff.disabled = yes
1705 1705
1706 1706 # Define new tool
1707 1707 myHtmlTool.args = -m $local $other $base $output
1708 1708 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1709 1709 myHtmlTool.priority = 1
1710 1710
1711 1711 Supported arguments:
1712 1712
1713 1713 ``priority``
1714 1714 The priority in which to evaluate this tool.
1715 1715 (default: 0)
1716 1716
1717 1717 ``executable``
1718 1718 Either just the name of the executable or its pathname.
1719 1719
1720 1720 .. container:: windows
1721 1721
1722 1722 On Windows, the path can use environment variables with ${ProgramFiles}
1723 1723 syntax.
1724 1724
1725 1725 (default: the tool name)
1726 1726
1727 1727 ``args``
1728 1728 The arguments to pass to the tool executable. You can refer to the
1729 1729 files being merged as well as the output file through these
1730 1730 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1731 1731
1732 1732 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1733 1733 being performed. During an update or merge, ``$local`` represents the original
1734 1734 state of the file, while ``$other`` represents the commit you are updating to or
1735 1735 the commit you are merging with. During a rebase, ``$local`` represents the
1736 1736 destination of the rebase, and ``$other`` represents the commit being rebased.
1737 1737
1738 1738 Some operations define custom labels to assist with identifying the revisions,
1739 1739 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1740 1740 labels are not available, these will be ``local``, ``other``, and ``base``,
1741 1741 respectively.
1742 1742 (default: ``$local $base $other``)
1743 1743
1744 1744 ``premerge``
1745 1745 Attempt to run internal non-interactive 3-way merge tool before
1746 1746 launching external tool. Options are ``true``, ``false``, ``keep``,
1747 1747 ``keep-merge3``, or ``keep-mergediff`` (experimental). The ``keep`` option
1748 1748 will leave markers in the file if the premerge fails. The ``keep-merge3``
1749 1749 will do the same but include information about the base of the merge in the
1750 1750 marker (see internal :merge3 in :hg:`help merge-tools`). The
1751 1751 ``keep-mergediff`` option is similar but uses a different marker style
1752 1752 (see internal :merge3 in :hg:`help merge-tools`). (default: True)
1753 1753
1754 1754 ``binary``
1755 1755 This tool can merge binary files. (default: False, unless tool
1756 1756 was selected by file pattern match)
1757 1757
1758 1758 ``symlink``
1759 1759 This tool can merge symlinks. (default: False)
1760 1760
1761 1761 ``check``
1762 1762 A list of merge success-checking options:
1763 1763
1764 1764 ``changed``
1765 1765 Ask whether merge was successful when the merged file shows no changes.
1766 1766 ``conflicts``
1767 1767 Check whether there are conflicts even though the tool reported success.
1768 1768 ``prompt``
1769 1769 Always prompt for merge success, regardless of success reported by tool.
1770 1770
1771 1771 ``fixeol``
1772 1772 Attempt to fix up EOL changes caused by the merge tool.
1773 1773 (default: False)
1774 1774
1775 1775 ``gui``
1776 1776 This tool requires a graphical interface to run. (default: False)
1777 1777
1778 1778 ``mergemarkers``
1779 1779 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1780 1780 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1781 1781 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1782 1782 markers generated during premerge will be ``detailed`` if either this option or
1783 1783 the corresponding option in the ``[ui]`` section is ``detailed``.
1784 1784 (default: ``basic``)
1785 1785
1786 1786 ``mergemarkertemplate``
1787 1787 This setting can be used to override ``mergemarker`` from the
1788 1788 ``[command-templates]`` section on a per-tool basis; this applies to the
1789 1789 ``$label``-prefixed variables and to the conflict markers that are generated
1790 1790 if ``premerge`` is ``keep` or ``keep-merge3``. See the corresponding variable
1791 1791 in ``[ui]`` for more information.
1792 1792
1793 1793 .. container:: windows
1794 1794
1795 1795 ``regkey``
1796 1796 Windows registry key which describes install location of this
1797 1797 tool. Mercurial will search for this key first under
1798 1798 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1799 1799 (default: None)
1800 1800
1801 1801 ``regkeyalt``
1802 1802 An alternate Windows registry key to try if the first key is not
1803 1803 found. The alternate key uses the same ``regname`` and ``regappend``
1804 1804 semantics of the primary key. The most common use for this key
1805 1805 is to search for 32bit applications on 64bit operating systems.
1806 1806 (default: None)
1807 1807
1808 1808 ``regname``
1809 1809 Name of value to read from specified registry key.
1810 1810 (default: the unnamed (default) value)
1811 1811
1812 1812 ``regappend``
1813 1813 String to append to the value read from the registry, typically
1814 1814 the executable name of the tool.
1815 1815 (default: None)
1816 1816
1817 1817 ``pager``
1818 1818 ---------
1819 1819
1820 1820 Setting used to control when to paginate and with what external tool. See
1821 1821 :hg:`help pager` for details.
1822 1822
1823 1823 ``pager``
1824 1824 Define the external tool used as pager.
1825 1825
1826 1826 If no pager is set, Mercurial uses the environment variable $PAGER.
1827 1827 If neither pager.pager, nor $PAGER is set, a default pager will be
1828 1828 used, typically `less` on Unix and `more` on Windows. Example::
1829 1829
1830 1830 [pager]
1831 1831 pager = less -FRX
1832 1832
1833 1833 ``ignore``
1834 1834 List of commands to disable the pager for. Example::
1835 1835
1836 1836 [pager]
1837 1837 ignore = version, help, update
1838 1838
1839 1839 ``patch``
1840 1840 ---------
1841 1841
1842 1842 Settings used when applying patches, for instance through the 'import'
1843 1843 command or with Mercurial Queues extension.
1844 1844
1845 1845 ``eol``
1846 1846 When set to 'strict' patch content and patched files end of lines
1847 1847 are preserved. When set to ``lf`` or ``crlf``, both files end of
1848 1848 lines are ignored when patching and the result line endings are
1849 1849 normalized to either LF (Unix) or CRLF (Windows). When set to
1850 1850 ``auto``, end of lines are again ignored while patching but line
1851 1851 endings in patched files are normalized to their original setting
1852 1852 on a per-file basis. If target file does not exist or has no end
1853 1853 of line, patch line endings are preserved.
1854 1854 (default: strict)
1855 1855
1856 1856 ``fuzz``
1857 1857 The number of lines of 'fuzz' to allow when applying patches. This
1858 1858 controls how much context the patcher is allowed to ignore when
1859 1859 trying to apply a patch.
1860 1860 (default: 2)
1861 1861
1862 1862 ``paths``
1863 1863 ---------
1864 1864
1865 1865 Assigns symbolic names and behavior to repositories.
1866 1866
1867 1867 Options are symbolic names defining the URL or directory that is the
1868 1868 location of the repository. Example::
1869 1869
1870 1870 [paths]
1871 1871 my_server = https://example.com/my_repo
1872 1872 local_path = /home/me/repo
1873 1873
1874 1874 These symbolic names can be used from the command line. To pull
1875 1875 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1876 1876 :hg:`push local_path`. You can check :hg:`help urls` for details about
1877 1877 valid URLs.
1878 1878
1879 1879 Options containing colons (``:``) denote sub-options that can influence
1880 1880 behavior for that specific path. Example::
1881 1881
1882 1882 [paths]
1883 1883 my_server = https://example.com/my_path
1884 1884 my_server:pushurl = ssh://example.com/my_path
1885 1885
1886 1886 Paths using the `path://otherpath` scheme will inherit the sub-options value from
1887 1887 the path they point to.
1888 1888
1889 1889 The following sub-options can be defined:
1890 1890
1891 1891 ``multi-urls``
1892 1892 A boolean option. When enabled the value of the `[paths]` entry will be
1893 1893 parsed as a list and the alias will resolve to multiple destination. If some
1894 1894 of the list entry use the `path://` syntax, the suboption will be inherited
1895 1895 individually.
1896 1896
1897 1897 ``pushurl``
1898 1898 The URL to use for push operations. If not defined, the location
1899 1899 defined by the path's main entry is used.
1900 1900
1901 1901 ``pushrev``
1902 1902 A revset defining which revisions to push by default.
1903 1903
1904 1904 When :hg:`push` is executed without a ``-r`` argument, the revset
1905 1905 defined by this sub-option is evaluated to determine what to push.
1906 1906
1907 1907 For example, a value of ``.`` will push the working directory's
1908 1908 revision by default.
1909 1909
1910 1910 Revsets specifying bookmarks will not result in the bookmark being
1911 1911 pushed.
1912 1912
1913 1913 ``bookmarks.mode``
1914 1914 How bookmark will be dealt during the exchange. It support the following value
1915 1915
1916 1916 - ``default``: the default behavior, local and remote bookmarks are "merged"
1917 1917 on push/pull.
1918 1918
1919 1919 - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This
1920 1920 is useful to replicate a repository, or as an optimization.
1921 1921
1922 1922 - ``ignore``: ignore bookmarks during exchange.
1923 1923 (This currently only affect pulling)
1924 1924
1925 1925 The following special named paths exist:
1926 1926
1927 1927 ``default``
1928 1928 The URL or directory to use when no source or remote is specified.
1929 1929
1930 1930 :hg:`clone` will automatically define this path to the location the
1931 1931 repository was cloned from.
1932 1932
1933 1933 ``default-push``
1934 1934 (deprecated) The URL or directory for the default :hg:`push` location.
1935 1935 ``default:pushurl`` should be used instead.
1936 1936
1937 1937 ``phases``
1938 1938 ----------
1939 1939
1940 1940 Specifies default handling of phases. See :hg:`help phases` for more
1941 1941 information about working with phases.
1942 1942
1943 1943 ``publish``
1944 1944 Controls draft phase behavior when working as a server. When true,
1945 1945 pushed changesets are set to public in both client and server and
1946 1946 pulled or cloned changesets are set to public in the client.
1947 1947 (default: True)
1948 1948
1949 1949 ``new-commit``
1950 1950 Phase of newly-created commits.
1951 1951 (default: draft)
1952 1952
1953 1953 ``checksubrepos``
1954 1954 Check the phase of the current revision of each subrepository. Allowed
1955 1955 values are "ignore", "follow" and "abort". For settings other than
1956 1956 "ignore", the phase of the current revision of each subrepository is
1957 1957 checked before committing the parent repository. If any of those phases is
1958 1958 greater than the phase of the parent repository (e.g. if a subrepo is in a
1959 1959 "secret" phase while the parent repo is in "draft" phase), the commit is
1960 1960 either aborted (if checksubrepos is set to "abort") or the higher phase is
1961 1961 used for the parent repository commit (if set to "follow").
1962 1962 (default: follow)
1963 1963
1964 1964
1965 1965 ``profiling``
1966 1966 -------------
1967 1967
1968 1968 Specifies profiling type, format, and file output. Two profilers are
1969 1969 supported: an instrumenting profiler (named ``ls``), and a sampling
1970 1970 profiler (named ``stat``).
1971 1971
1972 1972 In this section description, 'profiling data' stands for the raw data
1973 1973 collected during profiling, while 'profiling report' stands for a
1974 1974 statistical text report generated from the profiling data.
1975 1975
1976 1976 ``enabled``
1977 1977 Enable the profiler.
1978 1978 (default: false)
1979 1979
1980 1980 This is equivalent to passing ``--profile`` on the command line.
1981 1981
1982 1982 ``type``
1983 1983 The type of profiler to use.
1984 1984 (default: stat)
1985 1985
1986 1986 ``ls``
1987 1987 Use Python's built-in instrumenting profiler. This profiler
1988 1988 works on all platforms, but each line number it reports is the
1989 1989 first line of a function. This restriction makes it difficult to
1990 1990 identify the expensive parts of a non-trivial function.
1991 1991 ``stat``
1992 1992 Use a statistical profiler, statprof. This profiler is most
1993 1993 useful for profiling commands that run for longer than about 0.1
1994 1994 seconds.
1995 1995
1996 1996 ``format``
1997 1997 Profiling format. Specific to the ``ls`` instrumenting profiler.
1998 1998 (default: text)
1999 1999
2000 2000 ``text``
2001 2001 Generate a profiling report. When saving to a file, it should be
2002 2002 noted that only the report is saved, and the profiling data is
2003 2003 not kept.
2004 2004 ``kcachegrind``
2005 2005 Format profiling data for kcachegrind use: when saving to a
2006 2006 file, the generated file can directly be loaded into
2007 2007 kcachegrind.
2008 2008
2009 2009 ``statformat``
2010 2010 Profiling format for the ``stat`` profiler.
2011 2011 (default: hotpath)
2012 2012
2013 2013 ``hotpath``
2014 2014 Show a tree-based display containing the hot path of execution (where
2015 2015 most time was spent).
2016 2016 ``bymethod``
2017 2017 Show a table of methods ordered by how frequently they are active.
2018 2018 ``byline``
2019 2019 Show a table of lines in files ordered by how frequently they are active.
2020 2020 ``json``
2021 2021 Render profiling data as JSON.
2022 2022
2023 2023 ``freq``
2024 2024 Sampling frequency. Specific to the ``stat`` sampling profiler.
2025 2025 (default: 1000)
2026 2026
2027 2027 ``output``
2028 2028 File path where profiling data or report should be saved. If the
2029 2029 file exists, it is replaced. (default: None, data is printed on
2030 2030 stderr)
2031 2031
2032 2032 ``sort``
2033 2033 Sort field. Specific to the ``ls`` instrumenting profiler.
2034 2034 One of ``callcount``, ``reccallcount``, ``totaltime`` and
2035 2035 ``inlinetime``.
2036 2036 (default: inlinetime)
2037 2037
2038 2038 ``time-track``
2039 2039 Control if the stat profiler track ``cpu`` or ``real`` time.
2040 2040 (default: ``cpu`` on Windows, otherwise ``real``)
2041 2041
2042 2042 ``limit``
2043 2043 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
2044 2044 (default: 30)
2045 2045
2046 2046 ``nested``
2047 2047 Show at most this number of lines of drill-down info after each main entry.
2048 2048 This can help explain the difference between Total and Inline.
2049 2049 Specific to the ``ls`` instrumenting profiler.
2050 2050 (default: 0)
2051 2051
2052 2052 ``showmin``
2053 2053 Minimum fraction of samples an entry must have for it to be displayed.
2054 2054 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
2055 2055 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
2056 2056
2057 2057 Only used by the ``stat`` profiler.
2058 2058
2059 2059 For the ``hotpath`` format, default is ``0.05``.
2060 2060 For the ``chrome`` format, default is ``0.005``.
2061 2061
2062 2062 The option is unused on other formats.
2063 2063
2064 2064 ``showmax``
2065 2065 Maximum fraction of samples an entry can have before it is ignored in
2066 2066 display. Values format is the same as ``showmin``.
2067 2067
2068 2068 Only used by the ``stat`` profiler.
2069 2069
2070 2070 For the ``chrome`` format, default is ``0.999``.
2071 2071
2072 2072 The option is unused on other formats.
2073 2073
2074 2074 ``showtime``
2075 2075 Show time taken as absolute durations, in addition to percentages.
2076 2076 Only used by the ``hotpath`` format.
2077 2077 (default: true)
2078 2078
2079 2079 ``progress``
2080 2080 ------------
2081 2081
2082 2082 Mercurial commands can draw progress bars that are as informative as
2083 2083 possible. Some progress bars only offer indeterminate information, while others
2084 2084 have a definite end point.
2085 2085
2086 2086 ``debug``
2087 2087 Whether to print debug info when updating the progress bar. (default: False)
2088 2088
2089 2089 ``delay``
2090 2090 Number of seconds (float) before showing the progress bar. (default: 3)
2091 2091
2092 2092 ``changedelay``
2093 2093 Minimum delay before showing a new topic. When set to less than 3 * refresh,
2094 2094 that value will be used instead. (default: 1)
2095 2095
2096 2096 ``estimateinterval``
2097 2097 Maximum sampling interval in seconds for speed and estimated time
2098 2098 calculation. (default: 60)
2099 2099
2100 2100 ``refresh``
2101 2101 Time in seconds between refreshes of the progress bar. (default: 0.1)
2102 2102
2103 2103 ``format``
2104 2104 Format of the progress bar.
2105 2105
2106 2106 Valid entries for the format field are ``topic``, ``bar``, ``number``,
2107 2107 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
2108 2108 last 20 characters of the item, but this can be changed by adding either
2109 2109 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
2110 2110 first num characters.
2111 2111
2112 2112 (default: topic bar number estimate)
2113 2113
2114 2114 ``width``
2115 2115 If set, the maximum width of the progress information (that is, min(width,
2116 2116 term width) will be used).
2117 2117
2118 2118 ``clear-complete``
2119 2119 Clear the progress bar after it's done. (default: True)
2120 2120
2121 2121 ``disable``
2122 2122 If true, don't show a progress bar.
2123 2123
2124 2124 ``assume-tty``
2125 2125 If true, ALWAYS show a progress bar, unless disable is given.
2126 2126
2127 2127 ``rebase``
2128 2128 ----------
2129 2129
2130 2130 ``evolution.allowdivergence``
2131 2131 Default to False, when True allow creating divergence when performing
2132 2132 rebase of obsolete changesets.
2133 2133
2134 2134 ``revsetalias``
2135 2135 ---------------
2136 2136
2137 2137 Alias definitions for revsets. See :hg:`help revsets` for details.
2138 2138
2139 2139 ``rewrite``
2140 2140 -----------
2141 2141
2142 2142 ``backup-bundle``
2143 2143 Whether to save stripped changesets to a bundle file. (default: True)
2144 2144
2145 2145 ``update-timestamp``
2146 2146 If true, updates the date and time of the changeset to current. It is only
2147 2147 applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
2148 2148 current version.
2149 2149
2150 2150 ``empty-successor``
2151 2151
2152 2152 Control what happens with empty successors that are the result of rewrite
2153 2153 operations. If set to ``skip``, the successor is not created. If set to
2154 2154 ``keep``, the empty successor is created and kept.
2155 2155
2156 2156 Currently, only the rebase and absorb commands consider this configuration.
2157 2157 (EXPERIMENTAL)
2158 2158
2159 2159 ``rhg``
2160 2160 -------
2161 2161
2162 2162 The pure Rust fast-path for Mercurial. See `rust/README.rst` in the Mercurial repository.
2163 2163
2164 2164 ``fallback-executable``
2165 2165 Path to the executable to run in a sub-process when falling back to
2166 2166 another implementation of Mercurial.
2167 2167
2168 2168 ``fallback-immediately``
2169 2169 Fall back to ``fallback-executable`` as soon as possible, regardless of
2170 2170 the `rhg.on-unsupported` configuration. Useful for debugging, for example to
2171 2171 bypass `rhg` if the deault `hg` points to `rhg`.
2172 2172
2173 2173 Note that because this requires loading the configuration, it is possible
2174 2174 that `rhg` error out before being able to fall back.
2175 2175
2176 2176 ``ignored-extensions``
2177 2177 Controls which extensions should be ignored by `rhg`. By default, `rhg`
2178 2178 triggers the `rhg.on-unsupported` behavior any unsupported extensions.
2179 2179 Users can disable that behavior when they know that a given extension
2180 2180 does not need support from `rhg`.
2181 2181
2182 2182 Expects a list of extension names, or ``*`` to ignore all extensions.
2183 2183
2184 2184 Note: ``*:<suboption>`` is also a valid extension name for this
2185 2185 configuration option.
2186 2186 As of this writing, the only valid "global" suboption is ``required``.
2187 2187
2188 2188 ``on-unsupported``
2189 2189 Controls the behavior of `rhg` when detecting unsupported features.
2190 2190
2191 2191 Possible values are `abort` (default), `abort-silent` and `fallback`.
2192 2192
2193 2193 ``abort``
2194 2194 Print an error message describing what feature is not supported,
2195 2195 and exit with code 252
2196 2196
2197 2197 ``abort-silent``
2198 2198 Silently exit with code 252
2199 2199
2200 2200 ``fallback``
2201 2201 Try running the fallback executable with the same parameters
2202 2202 (and trace the fallback reason, use `RUST_LOG=trace` to see).
2203 2203
2204 2204 ``share``
2205 2205 ---------
2206 2206
2207 2207 ``safe-mismatch.source-safe``
2208 2208 Controls what happens when the shared repository does not use the
2209 2209 share-safe mechanism but its source repository does.
2210 2210
2211 2211 Possible values are `abort` (default), `allow`, `upgrade-abort` and
2212 2212 `upgrade-allow`.
2213 2213
2214 2214 ``abort``
2215 2215 Disallows running any command and aborts
2216 2216 ``allow``
2217 2217 Respects the feature presence in the share source
2218 2218 ``upgrade-abort``
2219 2219 Tries to upgrade the share to use share-safe; if it fails, aborts
2220 2220 ``upgrade-allow``
2221 2221 Tries to upgrade the share; if it fails, continue by
2222 2222 respecting the share source setting
2223 2223
2224 2224 Check :hg:`help config.format.use-share-safe` for details about the
2225 2225 share-safe feature.
2226 2226
2227 2227 ``safe-mismatch.source-safe:verbose-upgrade``
2228 2228 Display a message when upgrading, (default: True)
2229 2229
2230 2230 ``safe-mismatch.source-safe.warn``
2231 2231 Shows a warning on operations if the shared repository does not use
2232 2232 share-safe, but the source repository does.
2233 2233 (default: True)
2234 2234
2235 2235 ``safe-mismatch.source-not-safe``
2236 2236 Controls what happens when the shared repository uses the share-safe
2237 2237 mechanism but its source does not.
2238 2238
2239 2239 Possible values are `abort` (default), `allow`, `downgrade-abort` and
2240 2240 `downgrade-allow`.
2241 2241
2242 2242 ``abort``
2243 2243 Disallows running any command and aborts
2244 2244 ``allow``
2245 2245 Respects the feature presence in the share source
2246 2246 ``downgrade-abort``
2247 2247 Tries to downgrade the share to not use share-safe; if it fails, aborts
2248 2248 ``downgrade-allow``
2249 2249 Tries to downgrade the share to not use share-safe;
2250 2250 if it fails, continue by respecting the shared source setting
2251 2251
2252 2252 Check :hg:`help config.format.use-share-safe` for details about the
2253 2253 share-safe feature.
2254 2254
2255 2255 ``safe-mismatch.source-not-safe:verbose-upgrade``
2256 2256 Display a message when upgrading, (default: True)
2257 2257
2258 2258 ``safe-mismatch.source-not-safe.warn``
2259 2259 Shows a warning on operations if the shared repository uses share-safe,
2260 2260 but the source repository does not.
2261 2261 (default: True)
2262 2262
2263 2263 ``storage``
2264 2264 -----------
2265 2265
2266 2266 Control the strategy Mercurial uses internally to store history. Options in this
2267 2267 category impact performance and repository size.
2268 2268
2269 2269 ``revlog.issue6528.fix-incoming``
2270 2270 Version 5.8 of Mercurial had a bug leading to altering the parent of file
2271 2271 revision with copy information (or any other metadata) on exchange. This
2272 2272 leads to the copy metadata to be overlooked by various internal logic. The
2273 2273 issue was fixed in Mercurial 5.8.1.
2274 2274 (See https://bz.mercurial-scm.org/show_bug.cgi?id=6528 for details)
2275 2275
2276 2276 As a result Mercurial is now checking and fixing incoming file revisions to
2277 2277 make sure there parents are in the right order. This behavior can be
2278 2278 disabled by setting this option to `no`. This apply to revisions added
2279 2279 through push, pull, clone and unbundle.
2280 2280
2281 2281 To fix affected revisions that already exist within the repository, one can
2282 2282 use :hg:`debug-repair-issue-6528`.
2283 2283
2284 .. container:: verbose
2285
2286 ``revlog.delta-parent-search.candidate-group-chunk-size``
2287 Tune the number of delta bases the storage will consider in the
2288 same "round" of search. In some very rare cases, using a smaller value
2289 might result in faster processing at the possible expense of storage
2290 space, while using larger values might result in slower processing at the
2291 possible benefit of storage space. A value of "0" means no limitation.
2292
2293 default: no limitation
2294
2295 This is unlikely that you'll have to tune this configuration. If you think
2296 you do, consider talking with the mercurial developer community about your
2297 repositories.
2298
2284 2299 ``revlog.optimize-delta-parent-choice``
2285 2300 When storing a merge revision, both parents will be equally considered as
2286 2301 a possible delta base. This results in better delta selection and improved
2287 2302 revlog compression. This option is enabled by default.
2288 2303
2289 2304 Turning this option off can result in large increase of repository size for
2290 2305 repository with many merges.
2291 2306
2292 2307 ``revlog.persistent-nodemap.mmap``
2293 2308 Whether to use the Operating System "memory mapping" feature (when
2294 2309 possible) to access the persistent nodemap data. This improve performance
2295 2310 and reduce memory pressure.
2296 2311
2297 2312 Default to True.
2298 2313
2299 2314 For details on the "persistent-nodemap" feature, see:
2300 2315 :hg:`help config.format.use-persistent-nodemap`.
2301 2316
2302 2317 ``revlog.persistent-nodemap.slow-path``
2303 2318 Control the behavior of Merucrial when using a repository with "persistent"
2304 2319 nodemap with an installation of Mercurial without a fast implementation for
2305 2320 the feature:
2306 2321
2307 2322 ``allow``: Silently use the slower implementation to access the repository.
2308 2323 ``warn``: Warn, but use the slower implementation to access the repository.
2309 2324 ``abort``: Prevent access to such repositories. (This is the default)
2310 2325
2311 2326 For details on the "persistent-nodemap" feature, see:
2312 2327 :hg:`help config.format.use-persistent-nodemap`.
2313 2328
2314 2329 ``revlog.reuse-external-delta-parent``
2315 2330 Control the order in which delta parents are considered when adding new
2316 2331 revisions from an external source.
2317 2332 (typically: apply bundle from `hg pull` or `hg push`).
2318 2333
2319 2334 New revisions are usually provided as a delta against other revisions. By
2320 2335 default, Mercurial will try to reuse this delta first, therefore using the
2321 2336 same "delta parent" as the source. Directly using delta's from the source
2322 2337 reduces CPU usage and usually speeds up operation. However, in some case,
2323 2338 the source might have sub-optimal delta bases and forcing their reevaluation
2324 2339 is useful. For example, pushes from an old client could have sub-optimal
2325 2340 delta's parent that the server want to optimize. (lack of general delta, bad
2326 2341 parents, choice, lack of sparse-revlog, etc).
2327 2342
2328 2343 This option is enabled by default. Turning it off will ensure bad delta
2329 2344 parent choices from older client do not propagate to this repository, at
2330 2345 the cost of a small increase in CPU consumption.
2331 2346
2332 2347 Note: this option only control the order in which delta parents are
2333 2348 considered. Even when disabled, the existing delta from the source will be
2334 2349 reused if the same delta parent is selected.
2335 2350
2336 2351 ``revlog.reuse-external-delta``
2337 2352 Control the reuse of delta from external source.
2338 2353 (typically: apply bundle from `hg pull` or `hg push`).
2339 2354
2340 2355 New revisions are usually provided as a delta against another revision. By
2341 2356 default, Mercurial will not recompute the same delta again, trusting
2342 2357 externally provided deltas. There have been rare cases of small adjustment
2343 2358 to the diffing algorithm in the past. So in some rare case, recomputing
2344 2359 delta provided by ancient clients can provides better results. Disabling
2345 2360 this option means going through a full delta recomputation for all incoming
2346 2361 revisions. It means a large increase in CPU usage and will slow operations
2347 2362 down.
2348 2363
2349 2364 This option is enabled by default. When disabled, it also disables the
2350 2365 related ``storage.revlog.reuse-external-delta-parent`` option.
2351 2366
2352 2367 ``revlog.zlib.level``
2353 2368 Zlib compression level used when storing data into the repository. Accepted
2354 2369 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
2355 2370 default value is 6.
2356 2371
2357 2372
2358 2373 ``revlog.zstd.level``
2359 2374 zstd compression level used when storing data into the repository. Accepted
2360 2375 Value range from 1 (lowest compression) to 22 (highest compression).
2361 2376 (default 3)
2362 2377
2363 2378 ``server``
2364 2379 ----------
2365 2380
2366 2381 Controls generic server settings.
2367 2382
2368 2383 ``bookmarks-pushkey-compat``
2369 2384 Trigger pushkey hook when being pushed bookmark updates. This config exist
2370 2385 for compatibility purpose (default to True)
2371 2386
2372 2387 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
2373 2388 movement we recommend you migrate them to ``txnclose-bookmark`` and
2374 2389 ``pretxnclose-bookmark``.
2375 2390
2376 2391 ``compressionengines``
2377 2392 List of compression engines and their relative priority to advertise
2378 2393 to clients.
2379 2394
2380 2395 The order of compression engines determines their priority, the first
2381 2396 having the highest priority. If a compression engine is not listed
2382 2397 here, it won't be advertised to clients.
2383 2398
2384 2399 If not set (the default), built-in defaults are used. Run
2385 2400 :hg:`debuginstall` to list available compression engines and their
2386 2401 default wire protocol priority.
2387 2402
2388 2403 Older Mercurial clients only support zlib compression and this setting
2389 2404 has no effect for legacy clients.
2390 2405
2391 2406 ``uncompressed``
2392 2407 Whether to allow clients to clone a repository using the
2393 2408 uncompressed streaming protocol. This transfers about 40% more
2394 2409 data than a regular clone, but uses less memory and CPU on both
2395 2410 server and client. Over a LAN (100 Mbps or better) or a very fast
2396 2411 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
2397 2412 regular clone. Over most WAN connections (anything slower than
2398 2413 about 6 Mbps), uncompressed streaming is slower, because of the
2399 2414 extra data transfer overhead. This mode will also temporarily hold
2400 2415 the write lock while determining what data to transfer.
2401 2416 (default: True)
2402 2417
2403 2418 ``uncompressedallowsecret``
2404 2419 Whether to allow stream clones when the repository contains secret
2405 2420 changesets. (default: False)
2406 2421
2407 2422 ``preferuncompressed``
2408 2423 When set, clients will try to use the uncompressed streaming
2409 2424 protocol. (default: False)
2410 2425
2411 2426 ``disablefullbundle``
2412 2427 When set, servers will refuse attempts to do pull-based clones.
2413 2428 If this option is set, ``preferuncompressed`` and/or clone bundles
2414 2429 are highly recommended. Partial clones will still be allowed.
2415 2430 (default: False)
2416 2431
2417 2432 ``streamunbundle``
2418 2433 When set, servers will apply data sent from the client directly,
2419 2434 otherwise it will be written to a temporary file first. This option
2420 2435 effectively prevents concurrent pushes.
2421 2436
2422 2437 ``pullbundle``
2423 2438 When set, the server will check pullbundles.manifest for bundles
2424 2439 covering the requested heads and common nodes. The first matching
2425 2440 entry will be streamed to the client.
2426 2441
2427 2442 For HTTP transport, the stream will still use zlib compression
2428 2443 for older clients.
2429 2444
2430 2445 ``concurrent-push-mode``
2431 2446 Level of allowed race condition between two pushing clients.
2432 2447
2433 2448 - 'strict': push is abort if another client touched the repository
2434 2449 while the push was preparing.
2435 2450 - 'check-related': push is only aborted if it affects head that got also
2436 2451 affected while the push was preparing. (default since 5.4)
2437 2452
2438 2453 'check-related' only takes effect for compatible clients (version
2439 2454 4.3 and later). Older clients will use 'strict'.
2440 2455
2441 2456 ``validate``
2442 2457 Whether to validate the completeness of pushed changesets by
2443 2458 checking that all new file revisions specified in manifests are
2444 2459 present. (default: False)
2445 2460
2446 2461 ``maxhttpheaderlen``
2447 2462 Instruct HTTP clients not to send request headers longer than this
2448 2463 many bytes. (default: 1024)
2449 2464
2450 2465 ``bundle1``
2451 2466 Whether to allow clients to push and pull using the legacy bundle1
2452 2467 exchange format. (default: True)
2453 2468
2454 2469 ``bundle1gd``
2455 2470 Like ``bundle1`` but only used if the repository is using the
2456 2471 *generaldelta* storage format. (default: True)
2457 2472
2458 2473 ``bundle1.push``
2459 2474 Whether to allow clients to push using the legacy bundle1 exchange
2460 2475 format. (default: True)
2461 2476
2462 2477 ``bundle1gd.push``
2463 2478 Like ``bundle1.push`` but only used if the repository is using the
2464 2479 *generaldelta* storage format. (default: True)
2465 2480
2466 2481 ``bundle1.pull``
2467 2482 Whether to allow clients to pull using the legacy bundle1 exchange
2468 2483 format. (default: True)
2469 2484
2470 2485 ``bundle1gd.pull``
2471 2486 Like ``bundle1.pull`` but only used if the repository is using the
2472 2487 *generaldelta* storage format. (default: True)
2473 2488
2474 2489 Large repositories using the *generaldelta* storage format should
2475 2490 consider setting this option because converting *generaldelta*
2476 2491 repositories to the exchange format required by the bundle1 data
2477 2492 format can consume a lot of CPU.
2478 2493
2479 2494 ``bundle2.stream``
2480 2495 Whether to allow clients to pull using the bundle2 streaming protocol.
2481 2496 (default: True)
2482 2497
2483 2498 ``zliblevel``
2484 2499 Integer between ``-1`` and ``9`` that controls the zlib compression level
2485 2500 for wire protocol commands that send zlib compressed output (notably the
2486 2501 commands that send repository history data).
2487 2502
2488 2503 The default (``-1``) uses the default zlib compression level, which is
2489 2504 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2490 2505 maximum compression.
2491 2506
2492 2507 Setting this option allows server operators to make trade-offs between
2493 2508 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2494 2509 but sends more bytes to clients.
2495 2510
2496 2511 This option only impacts the HTTP server.
2497 2512
2498 2513 ``zstdlevel``
2499 2514 Integer between ``1`` and ``22`` that controls the zstd compression level
2500 2515 for wire protocol commands. ``1`` is the minimal amount of compression and
2501 2516 ``22`` is the highest amount of compression.
2502 2517
2503 2518 The default (``3``) should be significantly faster than zlib while likely
2504 2519 delivering better compression ratios.
2505 2520
2506 2521 This option only impacts the HTTP server.
2507 2522
2508 2523 See also ``server.zliblevel``.
2509 2524
2510 2525 ``view``
2511 2526 Repository filter used when exchanging revisions with the peer.
2512 2527
2513 2528 The default view (``served``) excludes secret and hidden changesets.
2514 2529 Another useful value is ``immutable`` (no draft, secret or hidden
2515 2530 changesets). (EXPERIMENTAL)
2516 2531
2517 2532 ``smtp``
2518 2533 --------
2519 2534
2520 2535 Configuration for extensions that need to send email messages.
2521 2536
2522 2537 ``host``
2523 2538 Host name of mail server, e.g. "mail.example.com".
2524 2539
2525 2540 ``port``
2526 2541 Optional. Port to connect to on mail server. (default: 465 if
2527 2542 ``tls`` is smtps; 25 otherwise)
2528 2543
2529 2544 ``tls``
2530 2545 Optional. Method to enable TLS when connecting to mail server: starttls,
2531 2546 smtps or none. (default: none)
2532 2547
2533 2548 ``username``
2534 2549 Optional. User name for authenticating with the SMTP server.
2535 2550 (default: None)
2536 2551
2537 2552 ``password``
2538 2553 Optional. Password for authenticating with the SMTP server. If not
2539 2554 specified, interactive sessions will prompt the user for a
2540 2555 password; non-interactive sessions will fail. (default: None)
2541 2556
2542 2557 ``local_hostname``
2543 2558 Optional. The hostname that the sender can use to identify
2544 2559 itself to the MTA.
2545 2560
2546 2561
2547 2562 ``subpaths``
2548 2563 ------------
2549 2564
2550 2565 Subrepository source URLs can go stale if a remote server changes name
2551 2566 or becomes temporarily unavailable. This section lets you define
2552 2567 rewrite rules of the form::
2553 2568
2554 2569 <pattern> = <replacement>
2555 2570
2556 2571 where ``pattern`` is a regular expression matching a subrepository
2557 2572 source URL and ``replacement`` is the replacement string used to
2558 2573 rewrite it. Groups can be matched in ``pattern`` and referenced in
2559 2574 ``replacements``. For instance::
2560 2575
2561 2576 http://server/(.*)-hg/ = http://hg.server/\1/
2562 2577
2563 2578 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2564 2579
2565 2580 Relative subrepository paths are first made absolute, and the
2566 2581 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2567 2582 doesn't match the full path, an attempt is made to apply it on the
2568 2583 relative path alone. The rules are applied in definition order.
2569 2584
2570 2585 ``subrepos``
2571 2586 ------------
2572 2587
2573 2588 This section contains options that control the behavior of the
2574 2589 subrepositories feature. See also :hg:`help subrepos`.
2575 2590
2576 2591 Security note: auditing in Mercurial is known to be insufficient to
2577 2592 prevent clone-time code execution with carefully constructed Git
2578 2593 subrepos. It is unknown if a similar detect is present in Subversion
2579 2594 subrepos. Both Git and Subversion subrepos are disabled by default
2580 2595 out of security concerns. These subrepo types can be enabled using
2581 2596 the respective options below.
2582 2597
2583 2598 ``allowed``
2584 2599 Whether subrepositories are allowed in the working directory.
2585 2600
2586 2601 When false, commands involving subrepositories (like :hg:`update`)
2587 2602 will fail for all subrepository types.
2588 2603 (default: true)
2589 2604
2590 2605 ``hg:allowed``
2591 2606 Whether Mercurial subrepositories are allowed in the working
2592 2607 directory. This option only has an effect if ``subrepos.allowed``
2593 2608 is true.
2594 2609 (default: true)
2595 2610
2596 2611 ``git:allowed``
2597 2612 Whether Git subrepositories are allowed in the working directory.
2598 2613 This option only has an effect if ``subrepos.allowed`` is true.
2599 2614
2600 2615 See the security note above before enabling Git subrepos.
2601 2616 (default: false)
2602 2617
2603 2618 ``svn:allowed``
2604 2619 Whether Subversion subrepositories are allowed in the working
2605 2620 directory. This option only has an effect if ``subrepos.allowed``
2606 2621 is true.
2607 2622
2608 2623 See the security note above before enabling Subversion subrepos.
2609 2624 (default: false)
2610 2625
2611 2626 ``templatealias``
2612 2627 -----------------
2613 2628
2614 2629 Alias definitions for templates. See :hg:`help templates` for details.
2615 2630
2616 2631 ``templates``
2617 2632 -------------
2618 2633
2619 2634 Use the ``[templates]`` section to define template strings.
2620 2635 See :hg:`help templates` for details.
2621 2636
2622 2637 ``trusted``
2623 2638 -----------
2624 2639
2625 2640 Mercurial will not use the settings in the
2626 2641 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2627 2642 user or to a trusted group, as various hgrc features allow arbitrary
2628 2643 commands to be run. This issue is often encountered when configuring
2629 2644 hooks or extensions for shared repositories or servers. However,
2630 2645 the web interface will use some safe settings from the ``[web]``
2631 2646 section.
2632 2647
2633 2648 This section specifies what users and groups are trusted. The
2634 2649 current user is always trusted. To trust everybody, list a user or a
2635 2650 group with name ``*``. These settings must be placed in an
2636 2651 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2637 2652 user or service running Mercurial.
2638 2653
2639 2654 ``users``
2640 2655 Comma-separated list of trusted users.
2641 2656
2642 2657 ``groups``
2643 2658 Comma-separated list of trusted groups.
2644 2659
2645 2660
2646 2661 ``ui``
2647 2662 ------
2648 2663
2649 2664 User interface controls.
2650 2665
2651 2666 ``archivemeta``
2652 2667 Whether to include the .hg_archival.txt file containing meta data
2653 2668 (hashes for the repository base and for tip) in archives created
2654 2669 by the :hg:`archive` command or downloaded via hgweb.
2655 2670 (default: True)
2656 2671
2657 2672 ``askusername``
2658 2673 Whether to prompt for a username when committing. If True, and
2659 2674 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2660 2675 be prompted to enter a username. If no username is entered, the
2661 2676 default ``USER@HOST`` is used instead.
2662 2677 (default: False)
2663 2678
2664 2679 ``clonebundles``
2665 2680 Whether the "clone bundles" feature is enabled.
2666 2681
2667 2682 When enabled, :hg:`clone` may download and apply a server-advertised
2668 2683 bundle file from a URL instead of using the normal exchange mechanism.
2669 2684
2670 2685 This can likely result in faster and more reliable clones.
2671 2686
2672 2687 (default: True)
2673 2688
2674 2689 ``clonebundlefallback``
2675 2690 Whether failure to apply an advertised "clone bundle" from a server
2676 2691 should result in fallback to a regular clone.
2677 2692
2678 2693 This is disabled by default because servers advertising "clone
2679 2694 bundles" often do so to reduce server load. If advertised bundles
2680 2695 start mass failing and clients automatically fall back to a regular
2681 2696 clone, this would add significant and unexpected load to the server
2682 2697 since the server is expecting clone operations to be offloaded to
2683 2698 pre-generated bundles. Failing fast (the default behavior) ensures
2684 2699 clients don't overwhelm the server when "clone bundle" application
2685 2700 fails.
2686 2701
2687 2702 (default: False)
2688 2703
2689 2704 ``clonebundleprefers``
2690 2705 Defines preferences for which "clone bundles" to use.
2691 2706
2692 2707 Servers advertising "clone bundles" may advertise multiple available
2693 2708 bundles. Each bundle may have different attributes, such as the bundle
2694 2709 type and compression format. This option is used to prefer a particular
2695 2710 bundle over another.
2696 2711
2697 2712 The following keys are defined by Mercurial:
2698 2713
2699 2714 BUNDLESPEC
2700 2715 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2701 2716 e.g. ``gzip-v2`` or ``bzip2-v1``.
2702 2717
2703 2718 COMPRESSION
2704 2719 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2705 2720
2706 2721 Server operators may define custom keys.
2707 2722
2708 2723 Example values: ``COMPRESSION=bzip2``,
2709 2724 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2710 2725
2711 2726 By default, the first bundle advertised by the server is used.
2712 2727
2713 2728 ``color``
2714 2729 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2715 2730 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2716 2731 seems possible. See :hg:`help color` for details.
2717 2732
2718 2733 ``commitsubrepos``
2719 2734 Whether to commit modified subrepositories when committing the
2720 2735 parent repository. If False and one subrepository has uncommitted
2721 2736 changes, abort the commit.
2722 2737 (default: False)
2723 2738
2724 2739 ``debug``
2725 2740 Print debugging information. (default: False)
2726 2741
2727 2742 ``editor``
2728 2743 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2729 2744
2730 2745 ``fallbackencoding``
2731 2746 Encoding to try if it's not possible to decode the changelog using
2732 2747 UTF-8. (default: ISO-8859-1)
2733 2748
2734 2749 ``graphnodetemplate``
2735 2750 (DEPRECATED) Use ``command-templates.graphnode`` instead.
2736 2751
2737 2752 ``ignore``
2738 2753 A file to read per-user ignore patterns from. This file should be
2739 2754 in the same format as a repository-wide .hgignore file. Filenames
2740 2755 are relative to the repository root. This option supports hook syntax,
2741 2756 so if you want to specify multiple ignore files, you can do so by
2742 2757 setting something like ``ignore.other = ~/.hgignore2``. For details
2743 2758 of the ignore file format, see the ``hgignore(5)`` man page.
2744 2759
2745 2760 ``interactive``
2746 2761 Allow to prompt the user. (default: True)
2747 2762
2748 2763 ``interface``
2749 2764 Select the default interface for interactive features (default: text).
2750 2765 Possible values are 'text' and 'curses'.
2751 2766
2752 2767 ``interface.chunkselector``
2753 2768 Select the interface for change recording (e.g. :hg:`commit -i`).
2754 2769 Possible values are 'text' and 'curses'.
2755 2770 This config overrides the interface specified by ui.interface.
2756 2771
2757 2772 ``large-file-limit``
2758 2773 Largest file size that gives no memory use warning.
2759 2774 Possible values are integers or 0 to disable the check.
2760 2775 Value is expressed in bytes by default, one can use standard units for
2761 2776 convenience (e.g. 10MB, 0.1GB, etc) (default: 10MB)
2762 2777
2763 2778 ``logtemplate``
2764 2779 (DEPRECATED) Use ``command-templates.log`` instead.
2765 2780
2766 2781 ``merge``
2767 2782 The conflict resolution program to use during a manual merge.
2768 2783 For more information on merge tools see :hg:`help merge-tools`.
2769 2784 For configuring merge tools see the ``[merge-tools]`` section.
2770 2785
2771 2786 ``mergemarkers``
2772 2787 Sets the merge conflict marker label styling. The ``detailed`` style
2773 2788 uses the ``command-templates.mergemarker`` setting to style the labels.
2774 2789 The ``basic`` style just uses 'local' and 'other' as the marker label.
2775 2790 One of ``basic`` or ``detailed``.
2776 2791 (default: ``basic``)
2777 2792
2778 2793 ``mergemarkertemplate``
2779 2794 (DEPRECATED) Use ``command-templates.mergemarker`` instead.
2780 2795
2781 2796 ``message-output``
2782 2797 Where to write status and error messages. (default: ``stdio``)
2783 2798
2784 2799 ``channel``
2785 2800 Use separate channel for structured output. (Command-server only)
2786 2801 ``stderr``
2787 2802 Everything to stderr.
2788 2803 ``stdio``
2789 2804 Status to stdout, and error to stderr.
2790 2805
2791 2806 ``origbackuppath``
2792 2807 The path to a directory used to store generated .orig files. If the path is
2793 2808 not a directory, one will be created. If set, files stored in this
2794 2809 directory have the same name as the original file and do not have a .orig
2795 2810 suffix.
2796 2811
2797 2812 ``paginate``
2798 2813 Control the pagination of command output (default: True). See :hg:`help pager`
2799 2814 for details.
2800 2815
2801 2816 ``patch``
2802 2817 An optional external tool that ``hg import`` and some extensions
2803 2818 will use for applying patches. By default Mercurial uses an
2804 2819 internal patch utility. The external tool must work as the common
2805 2820 Unix ``patch`` program. In particular, it must accept a ``-p``
2806 2821 argument to strip patch headers, a ``-d`` argument to specify the
2807 2822 current directory, a file name to patch, and a patch file to take
2808 2823 from stdin.
2809 2824
2810 2825 It is possible to specify a patch tool together with extra
2811 2826 arguments. For example, setting this option to ``patch --merge``
2812 2827 will use the ``patch`` program with its 2-way merge option.
2813 2828
2814 2829 ``portablefilenames``
2815 2830 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2816 2831 (default: ``warn``)
2817 2832
2818 2833 ``warn``
2819 2834 Print a warning message on POSIX platforms, if a file with a non-portable
2820 2835 filename is added (e.g. a file with a name that can't be created on
2821 2836 Windows because it contains reserved parts like ``AUX``, reserved
2822 2837 characters like ``:``, or would cause a case collision with an existing
2823 2838 file).
2824 2839
2825 2840 ``ignore``
2826 2841 Don't print a warning.
2827 2842
2828 2843 ``abort``
2829 2844 The command is aborted.
2830 2845
2831 2846 ``true``
2832 2847 Alias for ``warn``.
2833 2848
2834 2849 ``false``
2835 2850 Alias for ``ignore``.
2836 2851
2837 2852 .. container:: windows
2838 2853
2839 2854 On Windows, this configuration option is ignored and the command aborted.
2840 2855
2841 2856 ``pre-merge-tool-output-template``
2842 2857 (DEPRECATED) Use ``command-template.pre-merge-tool-output`` instead.
2843 2858
2844 2859 ``quiet``
2845 2860 Reduce the amount of output printed.
2846 2861 (default: False)
2847 2862
2848 2863 ``relative-paths``
2849 2864 Prefer relative paths in the UI.
2850 2865
2851 2866 ``remotecmd``
2852 2867 Remote command to use for clone/push/pull operations.
2853 2868 (default: ``hg``)
2854 2869
2855 2870 ``report_untrusted``
2856 2871 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2857 2872 trusted user or group.
2858 2873 (default: True)
2859 2874
2860 2875 ``slash``
2861 2876 (Deprecated. Use ``slashpath`` template filter instead.)
2862 2877
2863 2878 Display paths using a slash (``/``) as the path separator. This
2864 2879 only makes a difference on systems where the default path
2865 2880 separator is not the slash character (e.g. Windows uses the
2866 2881 backslash character (``\``)).
2867 2882 (default: False)
2868 2883
2869 2884 ``statuscopies``
2870 2885 Display copies in the status command.
2871 2886
2872 2887 ``ssh``
2873 2888 Command to use for SSH connections. (default: ``ssh``)
2874 2889
2875 2890 ``ssherrorhint``
2876 2891 A hint shown to the user in the case of SSH error (e.g.
2877 2892 ``Please see http://company/internalwiki/ssh.html``)
2878 2893
2879 2894 ``strict``
2880 2895 Require exact command names, instead of allowing unambiguous
2881 2896 abbreviations. (default: False)
2882 2897
2883 2898 ``style``
2884 2899 Name of style to use for command output.
2885 2900
2886 2901 ``supportcontact``
2887 2902 A URL where users should report a Mercurial traceback. Use this if you are a
2888 2903 large organisation with its own Mercurial deployment process and crash
2889 2904 reports should be addressed to your internal support.
2890 2905
2891 2906 ``textwidth``
2892 2907 Maximum width of help text. A longer line generated by ``hg help`` or
2893 2908 ``hg subcommand --help`` will be broken after white space to get this
2894 2909 width or the terminal width, whichever comes first.
2895 2910 A non-positive value will disable this and the terminal width will be
2896 2911 used. (default: 78)
2897 2912
2898 2913 ``timeout``
2899 2914 The timeout used when a lock is held (in seconds), a negative value
2900 2915 means no timeout. (default: 600)
2901 2916
2902 2917 ``timeout.warn``
2903 2918 Time (in seconds) before a warning is printed about held lock. A negative
2904 2919 value means no warning. (default: 0)
2905 2920
2906 2921 ``traceback``
2907 2922 Mercurial always prints a traceback when an unknown exception
2908 2923 occurs. Setting this to True will make Mercurial print a traceback
2909 2924 on all exceptions, even those recognized by Mercurial (such as
2910 2925 IOError or MemoryError). (default: False)
2911 2926
2912 2927 ``tweakdefaults``
2913 2928
2914 2929 By default Mercurial's behavior changes very little from release
2915 2930 to release, but over time the recommended config settings
2916 2931 shift. Enable this config to opt in to get automatic tweaks to
2917 2932 Mercurial's behavior over time. This config setting will have no
2918 2933 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2919 2934 not include ``tweakdefaults``. (default: False)
2920 2935
2921 2936 It currently means::
2922 2937
2923 2938 .. tweakdefaultsmarker
2924 2939
2925 2940 ``username``
2926 2941 The committer of a changeset created when running "commit".
2927 2942 Typically a person's name and email address, e.g. ``Fred Widget
2928 2943 <fred@example.com>``. Environment variables in the
2929 2944 username are expanded.
2930 2945
2931 2946 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2932 2947 hgrc is empty, e.g. if the system admin set ``username =`` in the
2933 2948 system hgrc, it has to be specified manually or in a different
2934 2949 hgrc file)
2935 2950
2936 2951 ``verbose``
2937 2952 Increase the amount of output printed. (default: False)
2938 2953
2939 2954
2940 2955 ``command-templates``
2941 2956 ---------------------
2942 2957
2943 2958 Templates used for customizing the output of commands.
2944 2959
2945 2960 ``graphnode``
2946 2961 The template used to print changeset nodes in an ASCII revision graph.
2947 2962 (default: ``{graphnode}``)
2948 2963
2949 2964 ``log``
2950 2965 Template string for commands that print changesets.
2951 2966
2952 2967 ``mergemarker``
2953 2968 The template used to print the commit description next to each conflict
2954 2969 marker during merge conflicts. See :hg:`help templates` for the template
2955 2970 format.
2956 2971
2957 2972 Defaults to showing the hash, tags, branches, bookmarks, author, and
2958 2973 the first line of the commit description.
2959 2974
2960 2975 If you use non-ASCII characters in names for tags, branches, bookmarks,
2961 2976 authors, and/or commit descriptions, you must pay attention to encodings of
2962 2977 managed files. At template expansion, non-ASCII characters use the encoding
2963 2978 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2964 2979 environment variables that govern your locale. If the encoding of the merge
2965 2980 markers is different from the encoding of the merged files,
2966 2981 serious problems may occur.
2967 2982
2968 2983 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2969 2984
2970 2985 ``oneline-summary``
2971 2986 A template used by `hg rebase` and other commands for showing a one-line
2972 2987 summary of a commit. If the template configured here is longer than one
2973 2988 line, then only the first line is used.
2974 2989
2975 2990 The template can be overridden per command by defining a template in
2976 2991 `oneline-summary.<command>`, where `<command>` can be e.g. "rebase".
2977 2992
2978 2993 ``pre-merge-tool-output``
2979 2994 A template that is printed before executing an external merge tool. This can
2980 2995 be used to print out additional context that might be useful to have during
2981 2996 the conflict resolution, such as the description of the various commits
2982 2997 involved or bookmarks/tags.
2983 2998
2984 2999 Additional information is available in the ``local`, ``base``, and ``other``
2985 3000 dicts. For example: ``{local.label}``, ``{base.name}``, or
2986 3001 ``{other.islink}``.
2987 3002
2988 3003
2989 3004 ``web``
2990 3005 -------
2991 3006
2992 3007 Web interface configuration. The settings in this section apply to
2993 3008 both the builtin webserver (started by :hg:`serve`) and the script you
2994 3009 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2995 3010 and WSGI).
2996 3011
2997 3012 The Mercurial webserver does no authentication (it does not prompt for
2998 3013 usernames and passwords to validate *who* users are), but it does do
2999 3014 authorization (it grants or denies access for *authenticated users*
3000 3015 based on settings in this section). You must either configure your
3001 3016 webserver to do authentication for you, or disable the authorization
3002 3017 checks.
3003 3018
3004 3019 For a quick setup in a trusted environment, e.g., a private LAN, where
3005 3020 you want it to accept pushes from anybody, you can use the following
3006 3021 command line::
3007 3022
3008 3023 $ hg --config web.allow-push=* --config web.push_ssl=False serve
3009 3024
3010 3025 Note that this will allow anybody to push anything to the server and
3011 3026 that this should not be used for public servers.
3012 3027
3013 3028 The full set of options is:
3014 3029
3015 3030 ``accesslog``
3016 3031 Where to output the access log. (default: stdout)
3017 3032
3018 3033 ``address``
3019 3034 Interface address to bind to. (default: all)
3020 3035
3021 3036 ``allow-archive``
3022 3037 List of archive format (bz2, gz, zip) allowed for downloading.
3023 3038 (default: empty)
3024 3039
3025 3040 ``allowbz2``
3026 3041 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
3027 3042 revisions.
3028 3043 (default: False)
3029 3044
3030 3045 ``allowgz``
3031 3046 (DEPRECATED) Whether to allow .tar.gz downloading of repository
3032 3047 revisions.
3033 3048 (default: False)
3034 3049
3035 3050 ``allow-pull``
3036 3051 Whether to allow pulling from the repository. (default: True)
3037 3052
3038 3053 ``allow-push``
3039 3054 Whether to allow pushing to the repository. If empty or not set,
3040 3055 pushing is not allowed. If the special value ``*``, any remote
3041 3056 user can push, including unauthenticated users. Otherwise, the
3042 3057 remote user must have been authenticated, and the authenticated
3043 3058 user name must be present in this list. The contents of the
3044 3059 allow-push list are examined after the deny_push list.
3045 3060
3046 3061 ``allow_read``
3047 3062 If the user has not already been denied repository access due to
3048 3063 the contents of deny_read, this list determines whether to grant
3049 3064 repository access to the user. If this list is not empty, and the
3050 3065 user is unauthenticated or not present in the list, then access is
3051 3066 denied for the user. If the list is empty or not set, then access
3052 3067 is permitted to all users by default. Setting allow_read to the
3053 3068 special value ``*`` is equivalent to it not being set (i.e. access
3054 3069 is permitted to all users). The contents of the allow_read list are
3055 3070 examined after the deny_read list.
3056 3071
3057 3072 ``allowzip``
3058 3073 (DEPRECATED) Whether to allow .zip downloading of repository
3059 3074 revisions. This feature creates temporary files.
3060 3075 (default: False)
3061 3076
3062 3077 ``archivesubrepos``
3063 3078 Whether to recurse into subrepositories when archiving.
3064 3079 (default: False)
3065 3080
3066 3081 ``baseurl``
3067 3082 Base URL to use when publishing URLs in other locations, so
3068 3083 third-party tools like email notification hooks can construct
3069 3084 URLs. Example: ``http://hgserver/repos/``.
3070 3085
3071 3086 ``cacerts``
3072 3087 Path to file containing a list of PEM encoded certificate
3073 3088 authority certificates. Environment variables and ``~user``
3074 3089 constructs are expanded in the filename. If specified on the
3075 3090 client, then it will verify the identity of remote HTTPS servers
3076 3091 with these certificates.
3077 3092
3078 3093 To disable SSL verification temporarily, specify ``--insecure`` from
3079 3094 command line.
3080 3095
3081 3096 You can use OpenSSL's CA certificate file if your platform has
3082 3097 one. On most Linux systems this will be
3083 3098 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
3084 3099 generate this file manually. The form must be as follows::
3085 3100
3086 3101 -----BEGIN CERTIFICATE-----
3087 3102 ... (certificate in base64 PEM encoding) ...
3088 3103 -----END CERTIFICATE-----
3089 3104 -----BEGIN CERTIFICATE-----
3090 3105 ... (certificate in base64 PEM encoding) ...
3091 3106 -----END CERTIFICATE-----
3092 3107
3093 3108 ``cache``
3094 3109 Whether to support caching in hgweb. (default: True)
3095 3110
3096 3111 ``certificate``
3097 3112 Certificate to use when running :hg:`serve`.
3098 3113
3099 3114 ``collapse``
3100 3115 With ``descend`` enabled, repositories in subdirectories are shown at
3101 3116 a single level alongside repositories in the current path. With
3102 3117 ``collapse`` also enabled, repositories residing at a deeper level than
3103 3118 the current path are grouped behind navigable directory entries that
3104 3119 lead to the locations of these repositories. In effect, this setting
3105 3120 collapses each collection of repositories found within a subdirectory
3106 3121 into a single entry for that subdirectory. (default: False)
3107 3122
3108 3123 ``comparisoncontext``
3109 3124 Number of lines of context to show in side-by-side file comparison. If
3110 3125 negative or the value ``full``, whole files are shown. (default: 5)
3111 3126
3112 3127 This setting can be overridden by a ``context`` request parameter to the
3113 3128 ``comparison`` command, taking the same values.
3114 3129
3115 3130 ``contact``
3116 3131 Name or email address of the person in charge of the repository.
3117 3132 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
3118 3133
3119 3134 ``csp``
3120 3135 Send a ``Content-Security-Policy`` HTTP header with this value.
3121 3136
3122 3137 The value may contain a special string ``%nonce%``, which will be replaced
3123 3138 by a randomly-generated one-time use value. If the value contains
3124 3139 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
3125 3140 one-time property of the nonce. This nonce will also be inserted into
3126 3141 ``<script>`` elements containing inline JavaScript.
3127 3142
3128 3143 Note: lots of HTML content sent by the server is derived from repository
3129 3144 data. Please consider the potential for malicious repository data to
3130 3145 "inject" itself into generated HTML content as part of your security
3131 3146 threat model.
3132 3147
3133 3148 ``deny_push``
3134 3149 Whether to deny pushing to the repository. If empty or not set,
3135 3150 push is not denied. If the special value ``*``, all remote users are
3136 3151 denied push. Otherwise, unauthenticated users are all denied, and
3137 3152 any authenticated user name present in this list is also denied. The
3138 3153 contents of the deny_push list are examined before the allow-push list.
3139 3154
3140 3155 ``deny_read``
3141 3156 Whether to deny reading/viewing of the repository. If this list is
3142 3157 not empty, unauthenticated users are all denied, and any
3143 3158 authenticated user name present in this list is also denied access to
3144 3159 the repository. If set to the special value ``*``, all remote users
3145 3160 are denied access (rarely needed ;). If deny_read is empty or not set,
3146 3161 the determination of repository access depends on the presence and
3147 3162 content of the allow_read list (see description). If both
3148 3163 deny_read and allow_read are empty or not set, then access is
3149 3164 permitted to all users by default. If the repository is being
3150 3165 served via hgwebdir, denied users will not be able to see it in
3151 3166 the list of repositories. The contents of the deny_read list have
3152 3167 priority over (are examined before) the contents of the allow_read
3153 3168 list.
3154 3169
3155 3170 ``descend``
3156 3171 hgwebdir indexes will not descend into subdirectories. Only repositories
3157 3172 directly in the current path will be shown (other repositories are still
3158 3173 available from the index corresponding to their containing path).
3159 3174
3160 3175 ``description``
3161 3176 Textual description of the repository's purpose or contents.
3162 3177 (default: "unknown")
3163 3178
3164 3179 ``encoding``
3165 3180 Character encoding name. (default: the current locale charset)
3166 3181 Example: "UTF-8".
3167 3182
3168 3183 ``errorlog``
3169 3184 Where to output the error log. (default: stderr)
3170 3185
3171 3186 ``guessmime``
3172 3187 Control MIME types for raw download of file content.
3173 3188 Set to True to let hgweb guess the content type from the file
3174 3189 extension. This will serve HTML files as ``text/html`` and might
3175 3190 allow cross-site scripting attacks when serving untrusted
3176 3191 repositories. (default: False)
3177 3192
3178 3193 ``hidden``
3179 3194 Whether to hide the repository in the hgwebdir index.
3180 3195 (default: False)
3181 3196
3182 3197 ``ipv6``
3183 3198 Whether to use IPv6. (default: False)
3184 3199
3185 3200 ``labels``
3186 3201 List of string *labels* associated with the repository.
3187 3202
3188 3203 Labels are exposed as a template keyword and can be used to customize
3189 3204 output. e.g. the ``index`` template can group or filter repositories
3190 3205 by labels and the ``summary`` template can display additional content
3191 3206 if a specific label is present.
3192 3207
3193 3208 ``logoimg``
3194 3209 File name of the logo image that some templates display on each page.
3195 3210 The file name is relative to ``staticurl``. That is, the full path to
3196 3211 the logo image is "staticurl/logoimg".
3197 3212 If unset, ``hglogo.png`` will be used.
3198 3213
3199 3214 ``logourl``
3200 3215 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
3201 3216 will be used.
3202 3217
3203 3218 ``maxchanges``
3204 3219 Maximum number of changes to list on the changelog. (default: 10)
3205 3220
3206 3221 ``maxfiles``
3207 3222 Maximum number of files to list per changeset. (default: 10)
3208 3223
3209 3224 ``maxshortchanges``
3210 3225 Maximum number of changes to list on the shortlog, graph or filelog
3211 3226 pages. (default: 60)
3212 3227
3213 3228 ``name``
3214 3229 Repository name to use in the web interface.
3215 3230 (default: current working directory)
3216 3231
3217 3232 ``port``
3218 3233 Port to listen on. (default: 8000)
3219 3234
3220 3235 ``prefix``
3221 3236 Prefix path to serve from. (default: '' (server root))
3222 3237
3223 3238 ``push_ssl``
3224 3239 Whether to require that inbound pushes be transported over SSL to
3225 3240 prevent password sniffing. (default: True)
3226 3241
3227 3242 ``refreshinterval``
3228 3243 How frequently directory listings re-scan the filesystem for new
3229 3244 repositories, in seconds. This is relevant when wildcards are used
3230 3245 to define paths. Depending on how much filesystem traversal is
3231 3246 required, refreshing may negatively impact performance.
3232 3247
3233 3248 Values less than or equal to 0 always refresh.
3234 3249 (default: 20)
3235 3250
3236 3251 ``server-header``
3237 3252 Value for HTTP ``Server`` response header.
3238 3253
3239 3254 ``static``
3240 3255 Directory where static files are served from.
3241 3256
3242 3257 ``staticurl``
3243 3258 Base URL to use for static files. If unset, static files (e.g. the
3244 3259 hgicon.png favicon) will be served by the CGI script itself. Use
3245 3260 this setting to serve them directly with the HTTP server.
3246 3261 Example: ``http://hgserver/static/``.
3247 3262
3248 3263 ``stripes``
3249 3264 How many lines a "zebra stripe" should span in multi-line output.
3250 3265 Set to 0 to disable. (default: 1)
3251 3266
3252 3267 ``style``
3253 3268 Which template map style to use. The available options are the names of
3254 3269 subdirectories in the HTML templates path. (default: ``paper``)
3255 3270 Example: ``monoblue``.
3256 3271
3257 3272 ``templates``
3258 3273 Where to find the HTML templates. The default path to the HTML templates
3259 3274 can be obtained from ``hg debuginstall``.
3260 3275
3261 3276 ``websub``
3262 3277 ----------
3263 3278
3264 3279 Web substitution filter definition. You can use this section to
3265 3280 define a set of regular expression substitution patterns which
3266 3281 let you automatically modify the hgweb server output.
3267 3282
3268 3283 The default hgweb templates only apply these substitution patterns
3269 3284 on the revision description fields. You can apply them anywhere
3270 3285 you want when you create your own templates by adding calls to the
3271 3286 "websub" filter (usually after calling the "escape" filter).
3272 3287
3273 3288 This can be used, for example, to convert issue references to links
3274 3289 to your issue tracker, or to convert "markdown-like" syntax into
3275 3290 HTML (see the examples below).
3276 3291
3277 3292 Each entry in this section names a substitution filter.
3278 3293 The value of each entry defines the substitution expression itself.
3279 3294 The websub expressions follow the old interhg extension syntax,
3280 3295 which in turn imitates the Unix sed replacement syntax::
3281 3296
3282 3297 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
3283 3298
3284 3299 You can use any separator other than "/". The final "i" is optional
3285 3300 and indicates that the search must be case insensitive.
3286 3301
3287 3302 Examples::
3288 3303
3289 3304 [websub]
3290 3305 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
3291 3306 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
3292 3307 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
3293 3308
3294 3309 ``worker``
3295 3310 ----------
3296 3311
3297 3312 Parallel master/worker configuration. We currently perform working
3298 3313 directory updates in parallel on Unix-like systems, which greatly
3299 3314 helps performance.
3300 3315
3301 3316 ``enabled``
3302 3317 Whether to enable workers code to be used.
3303 3318 (default: true)
3304 3319
3305 3320 ``numcpus``
3306 3321 Number of CPUs to use for parallel operations. A zero or
3307 3322 negative value is treated as ``use the default``.
3308 3323 (default: 4 or the number of CPUs on the system, whichever is larger)
3309 3324
3310 3325 ``backgroundclose``
3311 3326 Whether to enable closing file handles on background threads during certain
3312 3327 operations. Some platforms aren't very efficient at closing file
3313 3328 handles that have been written or appended to. By performing file closing
3314 3329 on background threads, file write rate can increase substantially.
3315 3330 (default: true on Windows, false elsewhere)
3316 3331
3317 3332 ``backgroundcloseminfilecount``
3318 3333 Minimum number of files required to trigger background file closing.
3319 3334 Operations not writing this many files won't start background close
3320 3335 threads.
3321 3336 (default: 2048)
3322 3337
3323 3338 ``backgroundclosemaxqueue``
3324 3339 The maximum number of opened file handles waiting to be closed in the
3325 3340 background. This option only has an effect if ``backgroundclose`` is
3326 3341 enabled.
3327 3342 (default: 384)
3328 3343
3329 3344 ``backgroundclosethreadcount``
3330 3345 Number of threads to process background file closes. Only relevant if
3331 3346 ``backgroundclose`` is enabled.
3332 3347 (default: 4)
@@ -1,3973 +1,3978 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from concurrent import futures
18 18 from typing import (
19 19 Optional,
20 20 )
21 21
22 22 from .i18n import _
23 23 from .node import (
24 24 bin,
25 25 hex,
26 26 nullrev,
27 27 sha1nodeconstants,
28 28 short,
29 29 )
30 30 from .pycompat import (
31 31 delattr,
32 32 getattr,
33 33 )
34 34 from . import (
35 35 bookmarks,
36 36 branchmap,
37 37 bundle2,
38 38 bundlecaches,
39 39 changegroup,
40 40 color,
41 41 commit,
42 42 context,
43 43 dirstate,
44 44 dirstateguard,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 pushkey,
62 62 pycompat,
63 63 rcutil,
64 64 repoview,
65 65 requirements as requirementsmod,
66 66 revlog,
67 67 revset,
68 68 revsetlang,
69 69 scmutil,
70 70 sparse,
71 71 store as storemod,
72 72 subrepoutil,
73 73 tags as tagsmod,
74 74 transaction,
75 75 txnutil,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprototypes,
79 79 )
80 80
81 81 from .interfaces import (
82 82 repository,
83 83 util as interfaceutil,
84 84 )
85 85
86 86 from .utils import (
87 87 hashutil,
88 88 procutil,
89 89 stringutil,
90 90 urlutil,
91 91 )
92 92
93 93 from .revlogutils import (
94 94 concurrency_checker as revlogchecker,
95 95 constants as revlogconst,
96 96 sidedata as sidedatamod,
97 97 )
98 98
99 99 release = lockmod.release
100 100 urlerr = util.urlerr
101 101 urlreq = util.urlreq
102 102
103 103 # set of (path, vfs-location) tuples. vfs-location is:
104 104 # - 'plain for vfs relative paths
105 105 # - '' for svfs relative paths
106 106 _cachedfiles = set()
107 107
108 108
109 109 class _basefilecache(scmutil.filecache):
110 110 """All filecache usage on repo are done for logic that should be unfiltered"""
111 111
112 112 def __get__(self, repo, type=None):
113 113 if repo is None:
114 114 return self
115 115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 116 unfi = repo.unfiltered()
117 117 try:
118 118 return unfi.__dict__[self.sname]
119 119 except KeyError:
120 120 pass
121 121 return super(_basefilecache, self).__get__(unfi, type)
122 122
123 123 def set(self, repo, value):
124 124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125 125
126 126
127 127 class repofilecache(_basefilecache):
128 128 """filecache for files in .hg but outside of .hg/store"""
129 129
130 130 def __init__(self, *paths):
131 131 super(repofilecache, self).__init__(*paths)
132 132 for path in paths:
133 133 _cachedfiles.add((path, b'plain'))
134 134
135 135 def join(self, obj, fname):
136 136 return obj.vfs.join(fname)
137 137
138 138
139 139 class storecache(_basefilecache):
140 140 """filecache for files in the store"""
141 141
142 142 def __init__(self, *paths):
143 143 super(storecache, self).__init__(*paths)
144 144 for path in paths:
145 145 _cachedfiles.add((path, b''))
146 146
147 147 def join(self, obj, fname):
148 148 return obj.sjoin(fname)
149 149
150 150
151 151 class changelogcache(storecache):
152 152 """filecache for the changelog"""
153 153
154 154 def __init__(self):
155 155 super(changelogcache, self).__init__()
156 156 _cachedfiles.add((b'00changelog.i', b''))
157 157 _cachedfiles.add((b'00changelog.n', b''))
158 158
159 159 def tracked_paths(self, obj):
160 160 paths = [self.join(obj, b'00changelog.i')]
161 161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 162 paths.append(self.join(obj, b'00changelog.n'))
163 163 return paths
164 164
165 165
166 166 class manifestlogcache(storecache):
167 167 """filecache for the manifestlog"""
168 168
169 169 def __init__(self):
170 170 super(manifestlogcache, self).__init__()
171 171 _cachedfiles.add((b'00manifest.i', b''))
172 172 _cachedfiles.add((b'00manifest.n', b''))
173 173
174 174 def tracked_paths(self, obj):
175 175 paths = [self.join(obj, b'00manifest.i')]
176 176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 177 paths.append(self.join(obj, b'00manifest.n'))
178 178 return paths
179 179
180 180
181 181 class mixedrepostorecache(_basefilecache):
182 182 """filecache for a mix files in .hg/store and outside"""
183 183
184 184 def __init__(self, *pathsandlocations):
185 185 # scmutil.filecache only uses the path for passing back into our
186 186 # join(), so we can safely pass a list of paths and locations
187 187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 188 _cachedfiles.update(pathsandlocations)
189 189
190 190 def join(self, obj, fnameandlocation):
191 191 fname, location = fnameandlocation
192 192 if location == b'plain':
193 193 return obj.vfs.join(fname)
194 194 else:
195 195 if location != b'':
196 196 raise error.ProgrammingError(
197 197 b'unexpected location: %s' % location
198 198 )
199 199 return obj.sjoin(fname)
200 200
201 201
202 202 def isfilecached(repo, name):
203 203 """check if a repo has already cached "name" filecache-ed property
204 204
205 205 This returns (cachedobj-or-None, iscached) tuple.
206 206 """
207 207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 208 if not cacheentry:
209 209 return None, False
210 210 return cacheentry.obj, True
211 211
212 212
213 213 class unfilteredpropertycache(util.propertycache):
214 214 """propertycache that apply to unfiltered repo only"""
215 215
216 216 def __get__(self, repo, type=None):
217 217 unfi = repo.unfiltered()
218 218 if unfi is repo:
219 219 return super(unfilteredpropertycache, self).__get__(unfi)
220 220 return getattr(unfi, self.name)
221 221
222 222
223 223 class filteredpropertycache(util.propertycache):
224 224 """propertycache that must take filtering in account"""
225 225
226 226 def cachevalue(self, obj, value):
227 227 object.__setattr__(obj, self.name, value)
228 228
229 229
230 230 def hasunfilteredcache(repo, name):
231 231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 232 return name in vars(repo.unfiltered())
233 233
234 234
235 235 def unfilteredmethod(orig):
236 236 """decorate method that always need to be run on unfiltered version"""
237 237
238 238 @functools.wraps(orig)
239 239 def wrapper(repo, *args, **kwargs):
240 240 return orig(repo.unfiltered(), *args, **kwargs)
241 241
242 242 return wrapper
243 243
244 244
245 245 moderncaps = {
246 246 b'lookup',
247 247 b'branchmap',
248 248 b'pushkey',
249 249 b'known',
250 250 b'getbundle',
251 251 b'unbundle',
252 252 }
253 253 legacycaps = moderncaps.union({b'changegroupsubset'})
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 257 class localcommandexecutor:
258 258 def __init__(self, peer):
259 259 self._peer = peer
260 260 self._sent = False
261 261 self._closed = False
262 262
263 263 def __enter__(self):
264 264 return self
265 265
266 266 def __exit__(self, exctype, excvalue, exctb):
267 267 self.close()
268 268
269 269 def callcommand(self, command, args):
270 270 if self._sent:
271 271 raise error.ProgrammingError(
272 272 b'callcommand() cannot be used after sendcommands()'
273 273 )
274 274
275 275 if self._closed:
276 276 raise error.ProgrammingError(
277 277 b'callcommand() cannot be used after close()'
278 278 )
279 279
280 280 # We don't need to support anything fancy. Just call the named
281 281 # method on the peer and return a resolved future.
282 282 fn = getattr(self._peer, pycompat.sysstr(command))
283 283
284 284 f = futures.Future()
285 285
286 286 try:
287 287 result = fn(**pycompat.strkwargs(args))
288 288 except Exception:
289 289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 290 else:
291 291 f.set_result(result)
292 292
293 293 return f
294 294
295 295 def sendcommands(self):
296 296 self._sent = True
297 297
298 298 def close(self):
299 299 self._closed = True
300 300
301 301
302 302 @interfaceutil.implementer(repository.ipeercommands)
303 303 class localpeer(repository.peer):
304 304 '''peer for a local repo; reflects only the most recent API'''
305 305
306 306 def __init__(self, repo, caps=None):
307 307 super(localpeer, self).__init__()
308 308
309 309 if caps is None:
310 310 caps = moderncaps.copy()
311 311 self._repo = repo.filtered(b'served')
312 312 self.ui = repo.ui
313 313
314 314 if repo._wanted_sidedata:
315 315 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 316 caps.add(b'exp-wanted-sidedata=' + formatted)
317 317
318 318 self._caps = repo._restrictcapabilities(caps)
319 319
320 320 # Begin of _basepeer interface.
321 321
322 322 def url(self):
323 323 return self._repo.url()
324 324
325 325 def local(self):
326 326 return self._repo
327 327
328 328 def peer(self):
329 329 return self
330 330
331 331 def canpush(self):
332 332 return True
333 333
334 334 def close(self):
335 335 self._repo.close()
336 336
337 337 # End of _basepeer interface.
338 338
339 339 # Begin of _basewirecommands interface.
340 340
341 341 def branchmap(self):
342 342 return self._repo.branchmap()
343 343
344 344 def capabilities(self):
345 345 return self._caps
346 346
347 347 def clonebundles(self):
348 348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349 349
350 350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 351 """Used to test argument passing over the wire"""
352 352 return b"%s %s %s %s %s" % (
353 353 one,
354 354 two,
355 355 pycompat.bytestr(three),
356 356 pycompat.bytestr(four),
357 357 pycompat.bytestr(five),
358 358 )
359 359
360 360 def getbundle(
361 361 self,
362 362 source,
363 363 heads=None,
364 364 common=None,
365 365 bundlecaps=None,
366 366 remote_sidedata=None,
367 367 **kwargs
368 368 ):
369 369 chunks = exchange.getbundlechunks(
370 370 self._repo,
371 371 source,
372 372 heads=heads,
373 373 common=common,
374 374 bundlecaps=bundlecaps,
375 375 remote_sidedata=remote_sidedata,
376 376 **kwargs
377 377 )[1]
378 378 cb = util.chunkbuffer(chunks)
379 379
380 380 if exchange.bundle2requested(bundlecaps):
381 381 # When requesting a bundle2, getbundle returns a stream to make the
382 382 # wire level function happier. We need to build a proper object
383 383 # from it in local peer.
384 384 return bundle2.getunbundler(self.ui, cb)
385 385 else:
386 386 return changegroup.getunbundler(b'01', cb, None)
387 387
388 388 def heads(self):
389 389 return self._repo.heads()
390 390
391 391 def known(self, nodes):
392 392 return self._repo.known(nodes)
393 393
394 394 def listkeys(self, namespace):
395 395 return self._repo.listkeys(namespace)
396 396
397 397 def lookup(self, key):
398 398 return self._repo.lookup(key)
399 399
400 400 def pushkey(self, namespace, key, old, new):
401 401 return self._repo.pushkey(namespace, key, old, new)
402 402
403 403 def stream_out(self):
404 404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405 405
406 406 def unbundle(self, bundle, heads, url):
407 407 """apply a bundle on a repo
408 408
409 409 This function handles the repo locking itself."""
410 410 try:
411 411 try:
412 412 bundle = exchange.readbundle(self.ui, bundle, None)
413 413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 414 if util.safehasattr(ret, b'getchunks'):
415 415 # This is a bundle20 object, turn it into an unbundler.
416 416 # This little dance should be dropped eventually when the
417 417 # API is finally improved.
418 418 stream = util.chunkbuffer(ret.getchunks())
419 419 ret = bundle2.getunbundler(self.ui, stream)
420 420 return ret
421 421 except Exception as exc:
422 422 # If the exception contains output salvaged from a bundle2
423 423 # reply, we need to make sure it is printed before continuing
424 424 # to fail. So we build a bundle2 with such output and consume
425 425 # it directly.
426 426 #
427 427 # This is not very elegant but allows a "simple" solution for
428 428 # issue4594
429 429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 430 if output:
431 431 bundler = bundle2.bundle20(self._repo.ui)
432 432 for out in output:
433 433 bundler.addpart(out)
434 434 stream = util.chunkbuffer(bundler.getchunks())
435 435 b = bundle2.getunbundler(self.ui, stream)
436 436 bundle2.processbundle(self._repo, b)
437 437 raise
438 438 except error.PushRaced as exc:
439 439 raise error.ResponseError(
440 440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 441 )
442 442
443 443 # End of _basewirecommands interface.
444 444
445 445 # Begin of peer interface.
446 446
447 447 def commandexecutor(self):
448 448 return localcommandexecutor(self)
449 449
450 450 # End of peer interface.
451 451
452 452
453 453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 454 class locallegacypeer(localpeer):
455 455 """peer extension which implements legacy methods too; used for tests with
456 456 restricted capabilities"""
457 457
458 458 def __init__(self, repo):
459 459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
460 460
461 461 # Begin of baselegacywirecommands interface.
462 462
463 463 def between(self, pairs):
464 464 return self._repo.between(pairs)
465 465
466 466 def branches(self, nodes):
467 467 return self._repo.branches(nodes)
468 468
469 469 def changegroup(self, nodes, source):
470 470 outgoing = discovery.outgoing(
471 471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 472 )
473 473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474 474
475 475 def changegroupsubset(self, bases, heads, source):
476 476 outgoing = discovery.outgoing(
477 477 self._repo, missingroots=bases, ancestorsof=heads
478 478 )
479 479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 480
481 481 # End of baselegacywirecommands interface.
482 482
483 483
484 484 # Functions receiving (ui, features) that extensions can register to impact
485 485 # the ability to load repositories with custom requirements. Only
486 486 # functions defined in loaded extensions are called.
487 487 #
488 488 # The function receives a set of requirement strings that the repository
489 489 # is capable of opening. Functions will typically add elements to the
490 490 # set to reflect that the extension knows how to handle that requirements.
491 491 featuresetupfuncs = set()
492 492
493 493
494 494 def _getsharedvfs(hgvfs, requirements):
495 495 """returns the vfs object pointing to root of shared source
496 496 repo for a shared repository
497 497
498 498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 499 requirements is a set of requirements of current repo (shared one)
500 500 """
501 501 # The ``shared`` or ``relshared`` requirements indicate the
502 502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 503 # This is an absolute path for ``shared`` and relative to
504 504 # ``.hg/`` for ``relshared``.
505 505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508 508
509 509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510 510
511 511 if not sharedvfs.exists():
512 512 raise error.RepoError(
513 513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 514 % sharedvfs.base
515 515 )
516 516 return sharedvfs
517 517
518 518
519 519 def _readrequires(vfs, allowmissing):
520 520 """reads the require file present at root of this vfs
521 521 and return a set of requirements
522 522
523 523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 524 # requires file contains a newline-delimited list of
525 525 # features/capabilities the opener (us) must have in order to use
526 526 # the repository. This file was introduced in Mercurial 0.9.2,
527 527 # which means very old repositories may not have one. We assume
528 528 # a missing file translates to no requirements.
529 529 read = vfs.tryread if allowmissing else vfs.read
530 530 return set(read(b'requires').splitlines())
531 531
532 532
533 533 def makelocalrepository(baseui, path: bytes, intents=None):
534 534 """Create a local repository object.
535 535
536 536 Given arguments needed to construct a local repository, this function
537 537 performs various early repository loading functionality (such as
538 538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 539 the repository can be opened, derives a type suitable for representing
540 540 that repository, and returns an instance of it.
541 541
542 542 The returned object conforms to the ``repository.completelocalrepository``
543 543 interface.
544 544
545 545 The repository type is derived by calling a series of factory functions
546 546 for each aspect/interface of the final repository. These are defined by
547 547 ``REPO_INTERFACES``.
548 548
549 549 Each factory function is called to produce a type implementing a specific
550 550 interface. The cumulative list of returned types will be combined into a
551 551 new type and that type will be instantiated to represent the local
552 552 repository.
553 553
554 554 The factory functions each receive various state that may be consulted
555 555 as part of deriving a type.
556 556
557 557 Extensions should wrap these factory functions to customize repository type
558 558 creation. Note that an extension's wrapped function may be called even if
559 559 that extension is not loaded for the repo being constructed. Extensions
560 560 should check if their ``__name__`` appears in the
561 561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 562 not.
563 563 """
564 564 ui = baseui.copy()
565 565 # Prevent copying repo configuration.
566 566 ui.copy = baseui.copy
567 567
568 568 # Working directory VFS rooted at repository root.
569 569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 570
571 571 # Main VFS for .hg/ directory.
572 572 hgpath = wdirvfs.join(b'.hg')
573 573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 574 # Whether this repository is shared one or not
575 575 shared = False
576 576 # If this repository is shared, vfs pointing to shared repo
577 577 sharedvfs = None
578 578
579 579 # The .hg/ path should exist and should be a directory. All other
580 580 # cases are errors.
581 581 if not hgvfs.isdir():
582 582 try:
583 583 hgvfs.stat()
584 584 except FileNotFoundError:
585 585 pass
586 586 except ValueError as e:
587 587 # Can be raised on Python 3.8 when path is invalid.
588 588 raise error.Abort(
589 589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 590 )
591 591
592 592 raise error.RepoError(_(b'repository %s not found') % path)
593 593
594 594 requirements = _readrequires(hgvfs, True)
595 595 shared = (
596 596 requirementsmod.SHARED_REQUIREMENT in requirements
597 597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 598 )
599 599 storevfs = None
600 600 if shared:
601 601 # This is a shared repo
602 602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 604 else:
605 605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606 606
607 607 # if .hg/requires contains the sharesafe requirement, it means
608 608 # there exists a `.hg/store/requires` too and we should read it
609 609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 611 # is not present, refer checkrequirementscompat() for that
612 612 #
613 613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 614 # repository was shared the old way. We check the share source .hg/requires
615 615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 616 # to be reshared
617 617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 619
620 620 if (
621 621 shared
622 622 and requirementsmod.SHARESAFE_REQUIREMENT
623 623 not in _readrequires(sharedvfs, True)
624 624 ):
625 625 mismatch_warn = ui.configbool(
626 626 b'share', b'safe-mismatch.source-not-safe.warn'
627 627 )
628 628 mismatch_config = ui.config(
629 629 b'share', b'safe-mismatch.source-not-safe'
630 630 )
631 631 mismatch_verbose_upgrade = ui.configbool(
632 632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 633 )
634 634 if mismatch_config in (
635 635 b'downgrade-allow',
636 636 b'allow',
637 637 b'downgrade-abort',
638 638 ):
639 639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 640 from . import upgrade
641 641
642 642 upgrade.downgrade_share_to_non_safe(
643 643 ui,
644 644 hgvfs,
645 645 sharedvfs,
646 646 requirements,
647 647 mismatch_config,
648 648 mismatch_warn,
649 649 mismatch_verbose_upgrade,
650 650 )
651 651 elif mismatch_config == b'abort':
652 652 raise error.Abort(
653 653 _(b"share source does not support share-safe requirement"),
654 654 hint=hint,
655 655 )
656 656 else:
657 657 raise error.Abort(
658 658 _(
659 659 b"share-safe mismatch with source.\nUnrecognized"
660 660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 661 b" set."
662 662 )
663 663 % mismatch_config,
664 664 hint=hint,
665 665 )
666 666 else:
667 667 requirements |= _readrequires(storevfs, False)
668 668 elif shared:
669 669 sourcerequires = _readrequires(sharedvfs, False)
670 670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 672 mismatch_warn = ui.configbool(
673 673 b'share', b'safe-mismatch.source-safe.warn'
674 674 )
675 675 mismatch_verbose_upgrade = ui.configbool(
676 676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 677 )
678 678 if mismatch_config in (
679 679 b'upgrade-allow',
680 680 b'allow',
681 681 b'upgrade-abort',
682 682 ):
683 683 # prevent cyclic import localrepo -> upgrade -> localrepo
684 684 from . import upgrade
685 685
686 686 upgrade.upgrade_share_to_safe(
687 687 ui,
688 688 hgvfs,
689 689 storevfs,
690 690 requirements,
691 691 mismatch_config,
692 692 mismatch_warn,
693 693 mismatch_verbose_upgrade,
694 694 )
695 695 elif mismatch_config == b'abort':
696 696 raise error.Abort(
697 697 _(
698 698 b'version mismatch: source uses share-safe'
699 699 b' functionality while the current share does not'
700 700 ),
701 701 hint=hint,
702 702 )
703 703 else:
704 704 raise error.Abort(
705 705 _(
706 706 b"share-safe mismatch with source.\nUnrecognized"
707 707 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 708 )
709 709 % mismatch_config,
710 710 hint=hint,
711 711 )
712 712
713 713 # The .hg/hgrc file may load extensions or contain config options
714 714 # that influence repository construction. Attempt to load it and
715 715 # process any new extensions that it may have pulled in.
716 716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 718 extensions.loadall(ui)
719 719 extensions.populateui(ui)
720 720
721 721 # Set of module names of extensions loaded for this repository.
722 722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723 723
724 724 supportedrequirements = gathersupportedrequirements(ui)
725 725
726 726 # We first validate the requirements are known.
727 727 ensurerequirementsrecognized(requirements, supportedrequirements)
728 728
729 729 # Then we validate that the known set is reasonable to use together.
730 730 ensurerequirementscompatible(ui, requirements)
731 731
732 732 # TODO there are unhandled edge cases related to opening repositories with
733 733 # shared storage. If storage is shared, we should also test for requirements
734 734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 735 # that repo, as that repo may load extensions needed to open it. This is a
736 736 # bit complicated because we don't want the other hgrc to overwrite settings
737 737 # in this hgrc.
738 738 #
739 739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 740 # file when sharing repos. But if a requirement is added after the share is
741 741 # performed, thereby introducing a new requirement for the opener, we may
742 742 # will not see that and could encounter a run-time error interacting with
743 743 # that shared store since it has an unknown-to-us requirement.
744 744
745 745 # At this point, we know we should be capable of opening the repository.
746 746 # Now get on with doing that.
747 747
748 748 features = set()
749 749
750 750 # The "store" part of the repository holds versioned data. How it is
751 751 # accessed is determined by various requirements. If `shared` or
752 752 # `relshared` requirements are present, this indicates current repository
753 753 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 754 if shared:
755 755 storebasepath = sharedvfs.base
756 756 cachepath = sharedvfs.join(b'cache')
757 757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 758 else:
759 759 storebasepath = hgvfs.base
760 760 cachepath = hgvfs.join(b'cache')
761 761 wcachepath = hgvfs.join(b'wcache')
762 762
763 763 # The store has changed over time and the exact layout is dictated by
764 764 # requirements. The store interface abstracts differences across all
765 765 # of them.
766 766 store = makestore(
767 767 requirements,
768 768 storebasepath,
769 769 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 770 )
771 771 hgvfs.createmode = store.createmode
772 772
773 773 storevfs = store.vfs
774 774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775 775
776 776 if (
777 777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 779 ):
780 780 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 781 # the revlogv2 docket introduced race condition that we need to fix
782 782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783 783
784 784 # The cache vfs is used to manage cache files.
785 785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 786 cachevfs.createmode = store.createmode
787 787 # The cache vfs is used to manage cache files related to the working copy
788 788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 789 wcachevfs.createmode = store.createmode
790 790
791 791 # Now resolve the type for the repository object. We do this by repeatedly
792 792 # calling a factory function to produces types for specific aspects of the
793 793 # repo's operation. The aggregate returned types are used as base classes
794 794 # for a dynamically-derived type, which will represent our new repository.
795 795
796 796 bases = []
797 797 extrastate = {}
798 798
799 799 for iface, fn in REPO_INTERFACES:
800 800 # We pass all potentially useful state to give extensions tons of
801 801 # flexibility.
802 802 typ = fn()(
803 803 ui=ui,
804 804 intents=intents,
805 805 requirements=requirements,
806 806 features=features,
807 807 wdirvfs=wdirvfs,
808 808 hgvfs=hgvfs,
809 809 store=store,
810 810 storevfs=storevfs,
811 811 storeoptions=storevfs.options,
812 812 cachevfs=cachevfs,
813 813 wcachevfs=wcachevfs,
814 814 extensionmodulenames=extensionmodulenames,
815 815 extrastate=extrastate,
816 816 baseclasses=bases,
817 817 )
818 818
819 819 if not isinstance(typ, type):
820 820 raise error.ProgrammingError(
821 821 b'unable to construct type for %s' % iface
822 822 )
823 823
824 824 bases.append(typ)
825 825
826 826 # type() allows you to use characters in type names that wouldn't be
827 827 # recognized as Python symbols in source code. We abuse that to add
828 828 # rich information about our constructed repo.
829 829 name = pycompat.sysstr(
830 830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 831 )
832 832
833 833 cls = type(name, tuple(bases), {})
834 834
835 835 return cls(
836 836 baseui=baseui,
837 837 ui=ui,
838 838 origroot=path,
839 839 wdirvfs=wdirvfs,
840 840 hgvfs=hgvfs,
841 841 requirements=requirements,
842 842 supportedrequirements=supportedrequirements,
843 843 sharedpath=storebasepath,
844 844 store=store,
845 845 cachevfs=cachevfs,
846 846 wcachevfs=wcachevfs,
847 847 features=features,
848 848 intents=intents,
849 849 )
850 850
851 851
852 852 def loadhgrc(
853 853 ui,
854 854 wdirvfs: vfsmod.vfs,
855 855 hgvfs: vfsmod.vfs,
856 856 requirements,
857 857 sharedvfs: Optional[vfsmod.vfs] = None,
858 858 ):
859 859 """Load hgrc files/content into a ui instance.
860 860
861 861 This is called during repository opening to load any additional
862 862 config files or settings relevant to the current repository.
863 863
864 864 Returns a bool indicating whether any additional configs were loaded.
865 865
866 866 Extensions should monkeypatch this function to modify how per-repo
867 867 configs are loaded. For example, an extension may wish to pull in
868 868 configs from alternate files or sources.
869 869
870 870 sharedvfs is vfs object pointing to source repo if the current one is a
871 871 shared one
872 872 """
873 873 if not rcutil.use_repo_hgrc():
874 874 return False
875 875
876 876 ret = False
877 877 # first load config from shared source if we has to
878 878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
879 879 try:
880 880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
881 881 ret = True
882 882 except IOError:
883 883 pass
884 884
885 885 try:
886 886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
887 887 ret = True
888 888 except IOError:
889 889 pass
890 890
891 891 try:
892 892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
893 893 ret = True
894 894 except IOError:
895 895 pass
896 896
897 897 return ret
898 898
899 899
900 900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
901 901 """Perform additional actions after .hg/hgrc is loaded.
902 902
903 903 This function is called during repository loading immediately after
904 904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
905 905
906 906 The function can be used to validate configs, automatically add
907 907 options (including extensions) based on requirements, etc.
908 908 """
909 909
910 910 # Map of requirements to list of extensions to load automatically when
911 911 # requirement is present.
912 912 autoextensions = {
913 913 b'git': [b'git'],
914 914 b'largefiles': [b'largefiles'],
915 915 b'lfs': [b'lfs'],
916 916 }
917 917
918 918 for requirement, names in sorted(autoextensions.items()):
919 919 if requirement not in requirements:
920 920 continue
921 921
922 922 for name in names:
923 923 if not ui.hasconfig(b'extensions', name):
924 924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
925 925
926 926
927 927 def gathersupportedrequirements(ui):
928 928 """Determine the complete set of recognized requirements."""
929 929 # Start with all requirements supported by this file.
930 930 supported = set(localrepository._basesupported)
931 931
932 932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
933 933 # relevant to this ui instance.
934 934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
935 935
936 936 for fn in featuresetupfuncs:
937 937 if fn.__module__ in modules:
938 938 fn(ui, supported)
939 939
940 940 # Add derived requirements from registered compression engines.
941 941 for name in util.compengines:
942 942 engine = util.compengines[name]
943 943 if engine.available() and engine.revlogheader():
944 944 supported.add(b'exp-compression-%s' % name)
945 945 if engine.name() == b'zstd':
946 946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
947 947
948 948 return supported
949 949
950 950
951 951 def ensurerequirementsrecognized(requirements, supported):
952 952 """Validate that a set of local requirements is recognized.
953 953
954 954 Receives a set of requirements. Raises an ``error.RepoError`` if there
955 955 exists any requirement in that set that currently loaded code doesn't
956 956 recognize.
957 957
958 958 Returns a set of supported requirements.
959 959 """
960 960 missing = set()
961 961
962 962 for requirement in requirements:
963 963 if requirement in supported:
964 964 continue
965 965
966 966 if not requirement or not requirement[0:1].isalnum():
967 967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
968 968
969 969 missing.add(requirement)
970 970
971 971 if missing:
972 972 raise error.RequirementError(
973 973 _(b'repository requires features unknown to this Mercurial: %s')
974 974 % b' '.join(sorted(missing)),
975 975 hint=_(
976 976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
977 977 b'for more information'
978 978 ),
979 979 )
980 980
981 981
982 982 def ensurerequirementscompatible(ui, requirements):
983 983 """Validates that a set of recognized requirements is mutually compatible.
984 984
985 985 Some requirements may not be compatible with others or require
986 986 config options that aren't enabled. This function is called during
987 987 repository opening to ensure that the set of requirements needed
988 988 to open a repository is sane and compatible with config options.
989 989
990 990 Extensions can monkeypatch this function to perform additional
991 991 checking.
992 992
993 993 ``error.RepoError`` should be raised on failure.
994 994 """
995 995 if (
996 996 requirementsmod.SPARSE_REQUIREMENT in requirements
997 997 and not sparse.enabled
998 998 ):
999 999 raise error.RepoError(
1000 1000 _(
1001 1001 b'repository is using sparse feature but '
1002 1002 b'sparse is not enabled; enable the '
1003 1003 b'"sparse" extensions to access'
1004 1004 )
1005 1005 )
1006 1006
1007 1007
1008 1008 def makestore(requirements, path, vfstype):
1009 1009 """Construct a storage object for a repository."""
1010 1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1011 1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1012 1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1013 1013 return storemod.fncachestore(path, vfstype, dotencode)
1014 1014
1015 1015 return storemod.encodedstore(path, vfstype)
1016 1016
1017 1017 return storemod.basicstore(path, vfstype)
1018 1018
1019 1019
1020 1020 def resolvestorevfsoptions(ui, requirements, features):
1021 1021 """Resolve the options to pass to the store vfs opener.
1022 1022
1023 1023 The returned dict is used to influence behavior of the storage layer.
1024 1024 """
1025 1025 options = {}
1026 1026
1027 1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1028 1028 options[b'treemanifest'] = True
1029 1029
1030 1030 # experimental config: format.manifestcachesize
1031 1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1032 1032 if manifestcachesize is not None:
1033 1033 options[b'manifestcachesize'] = manifestcachesize
1034 1034
1035 1035 # In the absence of another requirement superseding a revlog-related
1036 1036 # requirement, we have to assume the repo is using revlog version 0.
1037 1037 # This revlog format is super old and we don't bother trying to parse
1038 1038 # opener options for it because those options wouldn't do anything
1039 1039 # meaningful on such old repos.
1040 1040 if (
1041 1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1042 1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1043 1043 ):
1044 1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1045 1045 else: # explicitly mark repo as using revlogv0
1046 1046 options[b'revlogv0'] = True
1047 1047
1048 1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1049 1049 options[b'copies-storage'] = b'changeset-sidedata'
1050 1050 else:
1051 1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1052 1052 copiesextramode = (b'changeset-only', b'compatibility')
1053 1053 if writecopiesto in copiesextramode:
1054 1054 options[b'copies-storage'] = b'extra'
1055 1055
1056 1056 return options
1057 1057
1058 1058
1059 1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1060 1060 """Resolve opener options specific to revlogs."""
1061 1061
1062 1062 options = {}
1063 1063 options[b'flagprocessors'] = {}
1064 1064
1065 1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1066 1066 options[b'revlogv1'] = True
1067 1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1068 1068 options[b'revlogv2'] = True
1069 1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1070 1070 options[b'changelogv2'] = True
1071 1071
1072 1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 1073 options[b'generaldelta'] = True
1074 1074
1075 1075 # experimental config: format.chunkcachesize
1076 1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 1077 if chunkcachesize is not None:
1078 1078 options[b'chunkcachesize'] = chunkcachesize
1079 1079
1080 1080 deltabothparents = ui.configbool(
1081 1081 b'storage', b'revlog.optimize-delta-parent-choice'
1082 1082 )
1083 1083 options[b'deltabothparents'] = deltabothparents
1084 dps_cgds = ui.configint(
1085 b'storage',
1086 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 )
1088 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1084 1089 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1085 1090
1086 1091 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1087 1092 options[b'issue6528.fix-incoming'] = issue6528
1088 1093
1089 1094 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1090 1095 lazydeltabase = False
1091 1096 if lazydelta:
1092 1097 lazydeltabase = ui.configbool(
1093 1098 b'storage', b'revlog.reuse-external-delta-parent'
1094 1099 )
1095 1100 if lazydeltabase is None:
1096 1101 lazydeltabase = not scmutil.gddeltaconfig(ui)
1097 1102 options[b'lazydelta'] = lazydelta
1098 1103 options[b'lazydeltabase'] = lazydeltabase
1099 1104
1100 1105 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1101 1106 if 0 <= chainspan:
1102 1107 options[b'maxdeltachainspan'] = chainspan
1103 1108
1104 1109 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1105 1110 if mmapindexthreshold is not None:
1106 1111 options[b'mmapindexthreshold'] = mmapindexthreshold
1107 1112
1108 1113 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1109 1114 srdensitythres = float(
1110 1115 ui.config(b'experimental', b'sparse-read.density-threshold')
1111 1116 )
1112 1117 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1113 1118 options[b'with-sparse-read'] = withsparseread
1114 1119 options[b'sparse-read-density-threshold'] = srdensitythres
1115 1120 options[b'sparse-read-min-gap-size'] = srmingapsize
1116 1121
1117 1122 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1118 1123 options[b'sparse-revlog'] = sparserevlog
1119 1124 if sparserevlog:
1120 1125 options[b'generaldelta'] = True
1121 1126
1122 1127 maxchainlen = None
1123 1128 if sparserevlog:
1124 1129 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1125 1130 # experimental config: format.maxchainlen
1126 1131 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1127 1132 if maxchainlen is not None:
1128 1133 options[b'maxchainlen'] = maxchainlen
1129 1134
1130 1135 for r in requirements:
1131 1136 # we allow multiple compression engine requirement to co-exist because
1132 1137 # strickly speaking, revlog seems to support mixed compression style.
1133 1138 #
1134 1139 # The compression used for new entries will be "the last one"
1135 1140 prefix = r.startswith
1136 1141 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1137 1142 options[b'compengine'] = r.split(b'-', 2)[2]
1138 1143
1139 1144 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1140 1145 if options[b'zlib.level'] is not None:
1141 1146 if not (0 <= options[b'zlib.level'] <= 9):
1142 1147 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1143 1148 raise error.Abort(msg % options[b'zlib.level'])
1144 1149 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1145 1150 if options[b'zstd.level'] is not None:
1146 1151 if not (0 <= options[b'zstd.level'] <= 22):
1147 1152 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1148 1153 raise error.Abort(msg % options[b'zstd.level'])
1149 1154
1150 1155 if requirementsmod.NARROW_REQUIREMENT in requirements:
1151 1156 options[b'enableellipsis'] = True
1152 1157
1153 1158 if ui.configbool(b'experimental', b'rust.index'):
1154 1159 options[b'rust.index'] = True
1155 1160 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1156 1161 slow_path = ui.config(
1157 1162 b'storage', b'revlog.persistent-nodemap.slow-path'
1158 1163 )
1159 1164 if slow_path not in (b'allow', b'warn', b'abort'):
1160 1165 default = ui.config_default(
1161 1166 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 1167 )
1163 1168 msg = _(
1164 1169 b'unknown value for config '
1165 1170 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1166 1171 )
1167 1172 ui.warn(msg % slow_path)
1168 1173 if not ui.quiet:
1169 1174 ui.warn(_(b'falling back to default value: %s\n') % default)
1170 1175 slow_path = default
1171 1176
1172 1177 msg = _(
1173 1178 b"accessing `persistent-nodemap` repository without associated "
1174 1179 b"fast implementation."
1175 1180 )
1176 1181 hint = _(
1177 1182 b"check `hg help config.format.use-persistent-nodemap` "
1178 1183 b"for details"
1179 1184 )
1180 1185 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1181 1186 if slow_path == b'warn':
1182 1187 msg = b"warning: " + msg + b'\n'
1183 1188 ui.warn(msg)
1184 1189 if not ui.quiet:
1185 1190 hint = b'(' + hint + b')\n'
1186 1191 ui.warn(hint)
1187 1192 if slow_path == b'abort':
1188 1193 raise error.Abort(msg, hint=hint)
1189 1194 options[b'persistent-nodemap'] = True
1190 1195 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1191 1196 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1192 1197 if slow_path not in (b'allow', b'warn', b'abort'):
1193 1198 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1194 1199 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1195 1200 ui.warn(msg % slow_path)
1196 1201 if not ui.quiet:
1197 1202 ui.warn(_(b'falling back to default value: %s\n') % default)
1198 1203 slow_path = default
1199 1204
1200 1205 msg = _(
1201 1206 b"accessing `dirstate-v2` repository without associated "
1202 1207 b"fast implementation."
1203 1208 )
1204 1209 hint = _(
1205 1210 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1206 1211 )
1207 1212 if not dirstate.HAS_FAST_DIRSTATE_V2:
1208 1213 if slow_path == b'warn':
1209 1214 msg = b"warning: " + msg + b'\n'
1210 1215 ui.warn(msg)
1211 1216 if not ui.quiet:
1212 1217 hint = b'(' + hint + b')\n'
1213 1218 ui.warn(hint)
1214 1219 if slow_path == b'abort':
1215 1220 raise error.Abort(msg, hint=hint)
1216 1221 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1217 1222 options[b'persistent-nodemap.mmap'] = True
1218 1223 if ui.configbool(b'devel', b'persistent-nodemap'):
1219 1224 options[b'devel-force-nodemap'] = True
1220 1225
1221 1226 return options
1222 1227
1223 1228
1224 1229 def makemain(**kwargs):
1225 1230 """Produce a type conforming to ``ilocalrepositorymain``."""
1226 1231 return localrepository
1227 1232
1228 1233
1229 1234 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1230 1235 class revlogfilestorage:
1231 1236 """File storage when using revlogs."""
1232 1237
1233 1238 def file(self, path):
1234 1239 if path.startswith(b'/'):
1235 1240 path = path[1:]
1236 1241
1237 1242 return filelog.filelog(self.svfs, path)
1238 1243
1239 1244
1240 1245 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1241 1246 class revlognarrowfilestorage:
1242 1247 """File storage when using revlogs and narrow files."""
1243 1248
1244 1249 def file(self, path):
1245 1250 if path.startswith(b'/'):
1246 1251 path = path[1:]
1247 1252
1248 1253 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1249 1254
1250 1255
1251 1256 def makefilestorage(requirements, features, **kwargs):
1252 1257 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1253 1258 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1254 1259 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1255 1260
1256 1261 if requirementsmod.NARROW_REQUIREMENT in requirements:
1257 1262 return revlognarrowfilestorage
1258 1263 else:
1259 1264 return revlogfilestorage
1260 1265
1261 1266
1262 1267 # List of repository interfaces and factory functions for them. Each
1263 1268 # will be called in order during ``makelocalrepository()`` to iteratively
1264 1269 # derive the final type for a local repository instance. We capture the
1265 1270 # function as a lambda so we don't hold a reference and the module-level
1266 1271 # functions can be wrapped.
1267 1272 REPO_INTERFACES = [
1268 1273 (repository.ilocalrepositorymain, lambda: makemain),
1269 1274 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1270 1275 ]
1271 1276
1272 1277
1273 1278 @interfaceutil.implementer(repository.ilocalrepositorymain)
1274 1279 class localrepository:
1275 1280 """Main class for representing local repositories.
1276 1281
1277 1282 All local repositories are instances of this class.
1278 1283
1279 1284 Constructed on its own, instances of this class are not usable as
1280 1285 repository objects. To obtain a usable repository object, call
1281 1286 ``hg.repository()``, ``localrepo.instance()``, or
1282 1287 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1283 1288 ``instance()`` adds support for creating new repositories.
1284 1289 ``hg.repository()`` adds more extension integration, including calling
1285 1290 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1286 1291 used.
1287 1292 """
1288 1293
1289 1294 _basesupported = {
1290 1295 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1291 1296 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1292 1297 requirementsmod.CHANGELOGV2_REQUIREMENT,
1293 1298 requirementsmod.COPIESSDC_REQUIREMENT,
1294 1299 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1295 1300 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1296 1301 requirementsmod.DOTENCODE_REQUIREMENT,
1297 1302 requirementsmod.FNCACHE_REQUIREMENT,
1298 1303 requirementsmod.GENERALDELTA_REQUIREMENT,
1299 1304 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1300 1305 requirementsmod.NODEMAP_REQUIREMENT,
1301 1306 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1302 1307 requirementsmod.REVLOGV1_REQUIREMENT,
1303 1308 requirementsmod.REVLOGV2_REQUIREMENT,
1304 1309 requirementsmod.SHARED_REQUIREMENT,
1305 1310 requirementsmod.SHARESAFE_REQUIREMENT,
1306 1311 requirementsmod.SPARSE_REQUIREMENT,
1307 1312 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1308 1313 requirementsmod.STORE_REQUIREMENT,
1309 1314 requirementsmod.TREEMANIFEST_REQUIREMENT,
1310 1315 }
1311 1316
1312 1317 # list of prefix for file which can be written without 'wlock'
1313 1318 # Extensions should extend this list when needed
1314 1319 _wlockfreeprefix = {
1315 1320 # We migh consider requiring 'wlock' for the next
1316 1321 # two, but pretty much all the existing code assume
1317 1322 # wlock is not needed so we keep them excluded for
1318 1323 # now.
1319 1324 b'hgrc',
1320 1325 b'requires',
1321 1326 # XXX cache is a complicatged business someone
1322 1327 # should investigate this in depth at some point
1323 1328 b'cache/',
1324 1329 # XXX shouldn't be dirstate covered by the wlock?
1325 1330 b'dirstate',
1326 1331 # XXX bisect was still a bit too messy at the time
1327 1332 # this changeset was introduced. Someone should fix
1328 1333 # the remainig bit and drop this line
1329 1334 b'bisect.state',
1330 1335 }
1331 1336
1332 1337 def __init__(
1333 1338 self,
1334 1339 baseui,
1335 1340 ui,
1336 1341 origroot: bytes,
1337 1342 wdirvfs: vfsmod.vfs,
1338 1343 hgvfs: vfsmod.vfs,
1339 1344 requirements,
1340 1345 supportedrequirements,
1341 1346 sharedpath: bytes,
1342 1347 store,
1343 1348 cachevfs: vfsmod.vfs,
1344 1349 wcachevfs: vfsmod.vfs,
1345 1350 features,
1346 1351 intents=None,
1347 1352 ):
1348 1353 """Create a new local repository instance.
1349 1354
1350 1355 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1351 1356 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1352 1357 object.
1353 1358
1354 1359 Arguments:
1355 1360
1356 1361 baseui
1357 1362 ``ui.ui`` instance that ``ui`` argument was based off of.
1358 1363
1359 1364 ui
1360 1365 ``ui.ui`` instance for use by the repository.
1361 1366
1362 1367 origroot
1363 1368 ``bytes`` path to working directory root of this repository.
1364 1369
1365 1370 wdirvfs
1366 1371 ``vfs.vfs`` rooted at the working directory.
1367 1372
1368 1373 hgvfs
1369 1374 ``vfs.vfs`` rooted at .hg/
1370 1375
1371 1376 requirements
1372 1377 ``set`` of bytestrings representing repository opening requirements.
1373 1378
1374 1379 supportedrequirements
1375 1380 ``set`` of bytestrings representing repository requirements that we
1376 1381 know how to open. May be a supetset of ``requirements``.
1377 1382
1378 1383 sharedpath
1379 1384 ``bytes`` Defining path to storage base directory. Points to a
1380 1385 ``.hg/`` directory somewhere.
1381 1386
1382 1387 store
1383 1388 ``store.basicstore`` (or derived) instance providing access to
1384 1389 versioned storage.
1385 1390
1386 1391 cachevfs
1387 1392 ``vfs.vfs`` used for cache files.
1388 1393
1389 1394 wcachevfs
1390 1395 ``vfs.vfs`` used for cache files related to the working copy.
1391 1396
1392 1397 features
1393 1398 ``set`` of bytestrings defining features/capabilities of this
1394 1399 instance.
1395 1400
1396 1401 intents
1397 1402 ``set`` of system strings indicating what this repo will be used
1398 1403 for.
1399 1404 """
1400 1405 self.baseui = baseui
1401 1406 self.ui = ui
1402 1407 self.origroot = origroot
1403 1408 # vfs rooted at working directory.
1404 1409 self.wvfs = wdirvfs
1405 1410 self.root = wdirvfs.base
1406 1411 # vfs rooted at .hg/. Used to access most non-store paths.
1407 1412 self.vfs = hgvfs
1408 1413 self.path = hgvfs.base
1409 1414 self.requirements = requirements
1410 1415 self.nodeconstants = sha1nodeconstants
1411 1416 self.nullid = self.nodeconstants.nullid
1412 1417 self.supported = supportedrequirements
1413 1418 self.sharedpath = sharedpath
1414 1419 self.store = store
1415 1420 self.cachevfs = cachevfs
1416 1421 self.wcachevfs = wcachevfs
1417 1422 self.features = features
1418 1423
1419 1424 self.filtername = None
1420 1425
1421 1426 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 1427 b'devel', b'check-locks'
1423 1428 ):
1424 1429 self.vfs.audit = self._getvfsward(self.vfs.audit)
1425 1430 # A list of callback to shape the phase if no data were found.
1426 1431 # Callback are in the form: func(repo, roots) --> processed root.
1427 1432 # This list it to be filled by extension during repo setup
1428 1433 self._phasedefaults = []
1429 1434
1430 1435 color.setup(self.ui)
1431 1436
1432 1437 self.spath = self.store.path
1433 1438 self.svfs = self.store.vfs
1434 1439 self.sjoin = self.store.join
1435 1440 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1436 1441 b'devel', b'check-locks'
1437 1442 ):
1438 1443 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1439 1444 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1440 1445 else: # standard vfs
1441 1446 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1442 1447
1443 1448 self._dirstatevalidatewarned = False
1444 1449
1445 1450 self._branchcaches = branchmap.BranchMapCache()
1446 1451 self._revbranchcache = None
1447 1452 self._filterpats = {}
1448 1453 self._datafilters = {}
1449 1454 self._transref = self._lockref = self._wlockref = None
1450 1455
1451 1456 # A cache for various files under .hg/ that tracks file changes,
1452 1457 # (used by the filecache decorator)
1453 1458 #
1454 1459 # Maps a property name to its util.filecacheentry
1455 1460 self._filecache = {}
1456 1461
1457 1462 # hold sets of revision to be filtered
1458 1463 # should be cleared when something might have changed the filter value:
1459 1464 # - new changesets,
1460 1465 # - phase change,
1461 1466 # - new obsolescence marker,
1462 1467 # - working directory parent change,
1463 1468 # - bookmark changes
1464 1469 self.filteredrevcache = {}
1465 1470
1466 1471 # post-dirstate-status hooks
1467 1472 self._postdsstatus = []
1468 1473
1469 1474 # generic mapping between names and nodes
1470 1475 self.names = namespaces.namespaces()
1471 1476
1472 1477 # Key to signature value.
1473 1478 self._sparsesignaturecache = {}
1474 1479 # Signature to cached matcher instance.
1475 1480 self._sparsematchercache = {}
1476 1481
1477 1482 self._extrafilterid = repoview.extrafilter(ui)
1478 1483
1479 1484 self.filecopiesmode = None
1480 1485 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1481 1486 self.filecopiesmode = b'changeset-sidedata'
1482 1487
1483 1488 self._wanted_sidedata = set()
1484 1489 self._sidedata_computers = {}
1485 1490 sidedatamod.set_sidedata_spec_for_repo(self)
1486 1491
1487 1492 def _getvfsward(self, origfunc):
1488 1493 """build a ward for self.vfs"""
1489 1494 rref = weakref.ref(self)
1490 1495
1491 1496 def checkvfs(path, mode=None):
1492 1497 ret = origfunc(path, mode=mode)
1493 1498 repo = rref()
1494 1499 if (
1495 1500 repo is None
1496 1501 or not util.safehasattr(repo, b'_wlockref')
1497 1502 or not util.safehasattr(repo, b'_lockref')
1498 1503 ):
1499 1504 return
1500 1505 if mode in (None, b'r', b'rb'):
1501 1506 return
1502 1507 if path.startswith(repo.path):
1503 1508 # truncate name relative to the repository (.hg)
1504 1509 path = path[len(repo.path) + 1 :]
1505 1510 if path.startswith(b'cache/'):
1506 1511 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1507 1512 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1508 1513 # path prefixes covered by 'lock'
1509 1514 vfs_path_prefixes = (
1510 1515 b'journal.',
1511 1516 b'undo.',
1512 1517 b'strip-backup/',
1513 1518 b'cache/',
1514 1519 )
1515 1520 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1516 1521 if repo._currentlock(repo._lockref) is None:
1517 1522 repo.ui.develwarn(
1518 1523 b'write with no lock: "%s"' % path,
1519 1524 stacklevel=3,
1520 1525 config=b'check-locks',
1521 1526 )
1522 1527 elif repo._currentlock(repo._wlockref) is None:
1523 1528 # rest of vfs files are covered by 'wlock'
1524 1529 #
1525 1530 # exclude special files
1526 1531 for prefix in self._wlockfreeprefix:
1527 1532 if path.startswith(prefix):
1528 1533 return
1529 1534 repo.ui.develwarn(
1530 1535 b'write with no wlock: "%s"' % path,
1531 1536 stacklevel=3,
1532 1537 config=b'check-locks',
1533 1538 )
1534 1539 return ret
1535 1540
1536 1541 return checkvfs
1537 1542
1538 1543 def _getsvfsward(self, origfunc):
1539 1544 """build a ward for self.svfs"""
1540 1545 rref = weakref.ref(self)
1541 1546
1542 1547 def checksvfs(path, mode=None):
1543 1548 ret = origfunc(path, mode=mode)
1544 1549 repo = rref()
1545 1550 if repo is None or not util.safehasattr(repo, b'_lockref'):
1546 1551 return
1547 1552 if mode in (None, b'r', b'rb'):
1548 1553 return
1549 1554 if path.startswith(repo.sharedpath):
1550 1555 # truncate name relative to the repository (.hg)
1551 1556 path = path[len(repo.sharedpath) + 1 :]
1552 1557 if repo._currentlock(repo._lockref) is None:
1553 1558 repo.ui.develwarn(
1554 1559 b'write with no lock: "%s"' % path, stacklevel=4
1555 1560 )
1556 1561 return ret
1557 1562
1558 1563 return checksvfs
1559 1564
1560 1565 def close(self):
1561 1566 self._writecaches()
1562 1567
1563 1568 def _writecaches(self):
1564 1569 if self._revbranchcache:
1565 1570 self._revbranchcache.write()
1566 1571
1567 1572 def _restrictcapabilities(self, caps):
1568 1573 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1569 1574 caps = set(caps)
1570 1575 capsblob = bundle2.encodecaps(
1571 1576 bundle2.getrepocaps(self, role=b'client')
1572 1577 )
1573 1578 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1574 1579 if self.ui.configbool(b'experimental', b'narrow'):
1575 1580 caps.add(wireprototypes.NARROWCAP)
1576 1581 return caps
1577 1582
1578 1583 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1579 1584 # self -> auditor -> self._checknested -> self
1580 1585
1581 1586 @property
1582 1587 def auditor(self):
1583 1588 # This is only used by context.workingctx.match in order to
1584 1589 # detect files in subrepos.
1585 1590 return pathutil.pathauditor(self.root, callback=self._checknested)
1586 1591
1587 1592 @property
1588 1593 def nofsauditor(self):
1589 1594 # This is only used by context.basectx.match in order to detect
1590 1595 # files in subrepos.
1591 1596 return pathutil.pathauditor(
1592 1597 self.root, callback=self._checknested, realfs=False, cached=True
1593 1598 )
1594 1599
1595 1600 def _checknested(self, path):
1596 1601 """Determine if path is a legal nested repository."""
1597 1602 if not path.startswith(self.root):
1598 1603 return False
1599 1604 subpath = path[len(self.root) + 1 :]
1600 1605 normsubpath = util.pconvert(subpath)
1601 1606
1602 1607 # XXX: Checking against the current working copy is wrong in
1603 1608 # the sense that it can reject things like
1604 1609 #
1605 1610 # $ hg cat -r 10 sub/x.txt
1606 1611 #
1607 1612 # if sub/ is no longer a subrepository in the working copy
1608 1613 # parent revision.
1609 1614 #
1610 1615 # However, it can of course also allow things that would have
1611 1616 # been rejected before, such as the above cat command if sub/
1612 1617 # is a subrepository now, but was a normal directory before.
1613 1618 # The old path auditor would have rejected by mistake since it
1614 1619 # panics when it sees sub/.hg/.
1615 1620 #
1616 1621 # All in all, checking against the working copy seems sensible
1617 1622 # since we want to prevent access to nested repositories on
1618 1623 # the filesystem *now*.
1619 1624 ctx = self[None]
1620 1625 parts = util.splitpath(subpath)
1621 1626 while parts:
1622 1627 prefix = b'/'.join(parts)
1623 1628 if prefix in ctx.substate:
1624 1629 if prefix == normsubpath:
1625 1630 return True
1626 1631 else:
1627 1632 sub = ctx.sub(prefix)
1628 1633 return sub.checknested(subpath[len(prefix) + 1 :])
1629 1634 else:
1630 1635 parts.pop()
1631 1636 return False
1632 1637
1633 1638 def peer(self):
1634 1639 return localpeer(self) # not cached to avoid reference cycle
1635 1640
1636 1641 def unfiltered(self):
1637 1642 """Return unfiltered version of the repository
1638 1643
1639 1644 Intended to be overwritten by filtered repo."""
1640 1645 return self
1641 1646
1642 1647 def filtered(self, name, visibilityexceptions=None):
1643 1648 """Return a filtered version of a repository
1644 1649
1645 1650 The `name` parameter is the identifier of the requested view. This
1646 1651 will return a repoview object set "exactly" to the specified view.
1647 1652
1648 1653 This function does not apply recursive filtering to a repository. For
1649 1654 example calling `repo.filtered("served")` will return a repoview using
1650 1655 the "served" view, regardless of the initial view used by `repo`.
1651 1656
1652 1657 In other word, there is always only one level of `repoview` "filtering".
1653 1658 """
1654 1659 if self._extrafilterid is not None and b'%' not in name:
1655 1660 name = name + b'%' + self._extrafilterid
1656 1661
1657 1662 cls = repoview.newtype(self.unfiltered().__class__)
1658 1663 return cls(self, name, visibilityexceptions)
1659 1664
1660 1665 @mixedrepostorecache(
1661 1666 (b'bookmarks', b'plain'),
1662 1667 (b'bookmarks.current', b'plain'),
1663 1668 (b'bookmarks', b''),
1664 1669 (b'00changelog.i', b''),
1665 1670 )
1666 1671 def _bookmarks(self):
1667 1672 # Since the multiple files involved in the transaction cannot be
1668 1673 # written atomically (with current repository format), there is a race
1669 1674 # condition here.
1670 1675 #
1671 1676 # 1) changelog content A is read
1672 1677 # 2) outside transaction update changelog to content B
1673 1678 # 3) outside transaction update bookmark file referring to content B
1674 1679 # 4) bookmarks file content is read and filtered against changelog-A
1675 1680 #
1676 1681 # When this happens, bookmarks against nodes missing from A are dropped.
1677 1682 #
1678 1683 # Having this happening during read is not great, but it become worse
1679 1684 # when this happen during write because the bookmarks to the "unknown"
1680 1685 # nodes will be dropped for good. However, writes happen within locks.
1681 1686 # This locking makes it possible to have a race free consistent read.
1682 1687 # For this purpose data read from disc before locking are
1683 1688 # "invalidated" right after the locks are taken. This invalidations are
1684 1689 # "light", the `filecache` mechanism keep the data in memory and will
1685 1690 # reuse them if the underlying files did not changed. Not parsing the
1686 1691 # same data multiple times helps performances.
1687 1692 #
1688 1693 # Unfortunately in the case describe above, the files tracked by the
1689 1694 # bookmarks file cache might not have changed, but the in-memory
1690 1695 # content is still "wrong" because we used an older changelog content
1691 1696 # to process the on-disk data. So after locking, the changelog would be
1692 1697 # refreshed but `_bookmarks` would be preserved.
1693 1698 # Adding `00changelog.i` to the list of tracked file is not
1694 1699 # enough, because at the time we build the content for `_bookmarks` in
1695 1700 # (4), the changelog file has already diverged from the content used
1696 1701 # for loading `changelog` in (1)
1697 1702 #
1698 1703 # To prevent the issue, we force the changelog to be explicitly
1699 1704 # reloaded while computing `_bookmarks`. The data race can still happen
1700 1705 # without the lock (with a narrower window), but it would no longer go
1701 1706 # undetected during the lock time refresh.
1702 1707 #
1703 1708 # The new schedule is as follow
1704 1709 #
1705 1710 # 1) filecache logic detect that `_bookmarks` needs to be computed
1706 1711 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1707 1712 # 3) We force `changelog` filecache to be tested
1708 1713 # 4) cachestat for `changelog` are captured (for changelog)
1709 1714 # 5) `_bookmarks` is computed and cached
1710 1715 #
1711 1716 # The step in (3) ensure we have a changelog at least as recent as the
1712 1717 # cache stat computed in (1). As a result at locking time:
1713 1718 # * if the changelog did not changed since (1) -> we can reuse the data
1714 1719 # * otherwise -> the bookmarks get refreshed.
1715 1720 self._refreshchangelog()
1716 1721 return bookmarks.bmstore(self)
1717 1722
1718 1723 def _refreshchangelog(self):
1719 1724 """make sure the in memory changelog match the on-disk one"""
1720 1725 if 'changelog' in vars(self) and self.currenttransaction() is None:
1721 1726 del self.changelog
1722 1727
1723 1728 @property
1724 1729 def _activebookmark(self):
1725 1730 return self._bookmarks.active
1726 1731
1727 1732 # _phasesets depend on changelog. what we need is to call
1728 1733 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1729 1734 # can't be easily expressed in filecache mechanism.
1730 1735 @storecache(b'phaseroots', b'00changelog.i')
1731 1736 def _phasecache(self):
1732 1737 return phases.phasecache(self, self._phasedefaults)
1733 1738
1734 1739 @storecache(b'obsstore')
1735 1740 def obsstore(self):
1736 1741 return obsolete.makestore(self.ui, self)
1737 1742
1738 1743 @changelogcache()
1739 1744 def changelog(repo):
1740 1745 # load dirstate before changelog to avoid race see issue6303
1741 1746 repo.dirstate.prefetch_parents()
1742 1747 return repo.store.changelog(
1743 1748 txnutil.mayhavepending(repo.root),
1744 1749 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1745 1750 )
1746 1751
1747 1752 @manifestlogcache()
1748 1753 def manifestlog(self):
1749 1754 return self.store.manifestlog(self, self._storenarrowmatch)
1750 1755
1751 1756 @repofilecache(b'dirstate')
1752 1757 def dirstate(self):
1753 1758 return self._makedirstate()
1754 1759
1755 1760 def _makedirstate(self):
1756 1761 """Extension point for wrapping the dirstate per-repo."""
1757 1762 sparsematchfn = None
1758 1763 if sparse.use_sparse(self):
1759 1764 sparsematchfn = lambda: sparse.matcher(self)
1760 1765 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1761 1766 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1762 1767 use_dirstate_v2 = v2_req in self.requirements
1763 1768 use_tracked_hint = th in self.requirements
1764 1769
1765 1770 return dirstate.dirstate(
1766 1771 self.vfs,
1767 1772 self.ui,
1768 1773 self.root,
1769 1774 self._dirstatevalidate,
1770 1775 sparsematchfn,
1771 1776 self.nodeconstants,
1772 1777 use_dirstate_v2,
1773 1778 use_tracked_hint=use_tracked_hint,
1774 1779 )
1775 1780
1776 1781 def _dirstatevalidate(self, node):
1777 1782 try:
1778 1783 self.changelog.rev(node)
1779 1784 return node
1780 1785 except error.LookupError:
1781 1786 if not self._dirstatevalidatewarned:
1782 1787 self._dirstatevalidatewarned = True
1783 1788 self.ui.warn(
1784 1789 _(b"warning: ignoring unknown working parent %s!\n")
1785 1790 % short(node)
1786 1791 )
1787 1792 return self.nullid
1788 1793
1789 1794 @storecache(narrowspec.FILENAME)
1790 1795 def narrowpats(self):
1791 1796 """matcher patterns for this repository's narrowspec
1792 1797
1793 1798 A tuple of (includes, excludes).
1794 1799 """
1795 1800 return narrowspec.load(self)
1796 1801
1797 1802 @storecache(narrowspec.FILENAME)
1798 1803 def _storenarrowmatch(self):
1799 1804 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1800 1805 return matchmod.always()
1801 1806 include, exclude = self.narrowpats
1802 1807 return narrowspec.match(self.root, include=include, exclude=exclude)
1803 1808
1804 1809 @storecache(narrowspec.FILENAME)
1805 1810 def _narrowmatch(self):
1806 1811 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1807 1812 return matchmod.always()
1808 1813 narrowspec.checkworkingcopynarrowspec(self)
1809 1814 include, exclude = self.narrowpats
1810 1815 return narrowspec.match(self.root, include=include, exclude=exclude)
1811 1816
1812 1817 def narrowmatch(self, match=None, includeexact=False):
1813 1818 """matcher corresponding the the repo's narrowspec
1814 1819
1815 1820 If `match` is given, then that will be intersected with the narrow
1816 1821 matcher.
1817 1822
1818 1823 If `includeexact` is True, then any exact matches from `match` will
1819 1824 be included even if they're outside the narrowspec.
1820 1825 """
1821 1826 if match:
1822 1827 if includeexact and not self._narrowmatch.always():
1823 1828 # do not exclude explicitly-specified paths so that they can
1824 1829 # be warned later on
1825 1830 em = matchmod.exact(match.files())
1826 1831 nm = matchmod.unionmatcher([self._narrowmatch, em])
1827 1832 return matchmod.intersectmatchers(match, nm)
1828 1833 return matchmod.intersectmatchers(match, self._narrowmatch)
1829 1834 return self._narrowmatch
1830 1835
1831 1836 def setnarrowpats(self, newincludes, newexcludes):
1832 1837 narrowspec.save(self, newincludes, newexcludes)
1833 1838 self.invalidate(clearfilecache=True)
1834 1839
1835 1840 @unfilteredpropertycache
1836 1841 def _quick_access_changeid_null(self):
1837 1842 return {
1838 1843 b'null': (nullrev, self.nodeconstants.nullid),
1839 1844 nullrev: (nullrev, self.nodeconstants.nullid),
1840 1845 self.nullid: (nullrev, self.nullid),
1841 1846 }
1842 1847
1843 1848 @unfilteredpropertycache
1844 1849 def _quick_access_changeid_wc(self):
1845 1850 # also fast path access to the working copy parents
1846 1851 # however, only do it for filter that ensure wc is visible.
1847 1852 quick = self._quick_access_changeid_null.copy()
1848 1853 cl = self.unfiltered().changelog
1849 1854 for node in self.dirstate.parents():
1850 1855 if node == self.nullid:
1851 1856 continue
1852 1857 rev = cl.index.get_rev(node)
1853 1858 if rev is None:
1854 1859 # unknown working copy parent case:
1855 1860 #
1856 1861 # skip the fast path and let higher code deal with it
1857 1862 continue
1858 1863 pair = (rev, node)
1859 1864 quick[rev] = pair
1860 1865 quick[node] = pair
1861 1866 # also add the parents of the parents
1862 1867 for r in cl.parentrevs(rev):
1863 1868 if r == nullrev:
1864 1869 continue
1865 1870 n = cl.node(r)
1866 1871 pair = (r, n)
1867 1872 quick[r] = pair
1868 1873 quick[n] = pair
1869 1874 p1node = self.dirstate.p1()
1870 1875 if p1node != self.nullid:
1871 1876 quick[b'.'] = quick[p1node]
1872 1877 return quick
1873 1878
1874 1879 @unfilteredmethod
1875 1880 def _quick_access_changeid_invalidate(self):
1876 1881 if '_quick_access_changeid_wc' in vars(self):
1877 1882 del self.__dict__['_quick_access_changeid_wc']
1878 1883
1879 1884 @property
1880 1885 def _quick_access_changeid(self):
1881 1886 """an helper dictionnary for __getitem__ calls
1882 1887
1883 1888 This contains a list of symbol we can recognise right away without
1884 1889 further processing.
1885 1890 """
1886 1891 if self.filtername in repoview.filter_has_wc:
1887 1892 return self._quick_access_changeid_wc
1888 1893 return self._quick_access_changeid_null
1889 1894
1890 1895 def __getitem__(self, changeid):
1891 1896 # dealing with special cases
1892 1897 if changeid is None:
1893 1898 return context.workingctx(self)
1894 1899 if isinstance(changeid, context.basectx):
1895 1900 return changeid
1896 1901
1897 1902 # dealing with multiple revisions
1898 1903 if isinstance(changeid, slice):
1899 1904 # wdirrev isn't contiguous so the slice shouldn't include it
1900 1905 return [
1901 1906 self[i]
1902 1907 for i in range(*changeid.indices(len(self)))
1903 1908 if i not in self.changelog.filteredrevs
1904 1909 ]
1905 1910
1906 1911 # dealing with some special values
1907 1912 quick_access = self._quick_access_changeid.get(changeid)
1908 1913 if quick_access is not None:
1909 1914 rev, node = quick_access
1910 1915 return context.changectx(self, rev, node, maybe_filtered=False)
1911 1916 if changeid == b'tip':
1912 1917 node = self.changelog.tip()
1913 1918 rev = self.changelog.rev(node)
1914 1919 return context.changectx(self, rev, node)
1915 1920
1916 1921 # dealing with arbitrary values
1917 1922 try:
1918 1923 if isinstance(changeid, int):
1919 1924 node = self.changelog.node(changeid)
1920 1925 rev = changeid
1921 1926 elif changeid == b'.':
1922 1927 # this is a hack to delay/avoid loading obsmarkers
1923 1928 # when we know that '.' won't be hidden
1924 1929 node = self.dirstate.p1()
1925 1930 rev = self.unfiltered().changelog.rev(node)
1926 1931 elif len(changeid) == self.nodeconstants.nodelen:
1927 1932 try:
1928 1933 node = changeid
1929 1934 rev = self.changelog.rev(changeid)
1930 1935 except error.FilteredLookupError:
1931 1936 changeid = hex(changeid) # for the error message
1932 1937 raise
1933 1938 except LookupError:
1934 1939 # check if it might have come from damaged dirstate
1935 1940 #
1936 1941 # XXX we could avoid the unfiltered if we had a recognizable
1937 1942 # exception for filtered changeset access
1938 1943 if (
1939 1944 self.local()
1940 1945 and changeid in self.unfiltered().dirstate.parents()
1941 1946 ):
1942 1947 msg = _(b"working directory has unknown parent '%s'!")
1943 1948 raise error.Abort(msg % short(changeid))
1944 1949 changeid = hex(changeid) # for the error message
1945 1950 raise
1946 1951
1947 1952 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1948 1953 node = bin(changeid)
1949 1954 rev = self.changelog.rev(node)
1950 1955 else:
1951 1956 raise error.ProgrammingError(
1952 1957 b"unsupported changeid '%s' of type %s"
1953 1958 % (changeid, pycompat.bytestr(type(changeid)))
1954 1959 )
1955 1960
1956 1961 return context.changectx(self, rev, node)
1957 1962
1958 1963 except (error.FilteredIndexError, error.FilteredLookupError):
1959 1964 raise error.FilteredRepoLookupError(
1960 1965 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1961 1966 )
1962 1967 except (IndexError, LookupError):
1963 1968 raise error.RepoLookupError(
1964 1969 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1965 1970 )
1966 1971 except error.WdirUnsupported:
1967 1972 return context.workingctx(self)
1968 1973
1969 1974 def __contains__(self, changeid):
1970 1975 """True if the given changeid exists"""
1971 1976 try:
1972 1977 self[changeid]
1973 1978 return True
1974 1979 except error.RepoLookupError:
1975 1980 return False
1976 1981
1977 1982 def __nonzero__(self):
1978 1983 return True
1979 1984
1980 1985 __bool__ = __nonzero__
1981 1986
1982 1987 def __len__(self):
1983 1988 # no need to pay the cost of repoview.changelog
1984 1989 unfi = self.unfiltered()
1985 1990 return len(unfi.changelog)
1986 1991
1987 1992 def __iter__(self):
1988 1993 return iter(self.changelog)
1989 1994
1990 1995 def revs(self, expr: bytes, *args):
1991 1996 """Find revisions matching a revset.
1992 1997
1993 1998 The revset is specified as a string ``expr`` that may contain
1994 1999 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1995 2000
1996 2001 Revset aliases from the configuration are not expanded. To expand
1997 2002 user aliases, consider calling ``scmutil.revrange()`` or
1998 2003 ``repo.anyrevs([expr], user=True)``.
1999 2004
2000 2005 Returns a smartset.abstractsmartset, which is a list-like interface
2001 2006 that contains integer revisions.
2002 2007 """
2003 2008 tree = revsetlang.spectree(expr, *args)
2004 2009 return revset.makematcher(tree)(self)
2005 2010
2006 2011 def set(self, expr: bytes, *args):
2007 2012 """Find revisions matching a revset and emit changectx instances.
2008 2013
2009 2014 This is a convenience wrapper around ``revs()`` that iterates the
2010 2015 result and is a generator of changectx instances.
2011 2016
2012 2017 Revset aliases from the configuration are not expanded. To expand
2013 2018 user aliases, consider calling ``scmutil.revrange()``.
2014 2019 """
2015 2020 for r in self.revs(expr, *args):
2016 2021 yield self[r]
2017 2022
2018 2023 def anyrevs(self, specs: bytes, user=False, localalias=None):
2019 2024 """Find revisions matching one of the given revsets.
2020 2025
2021 2026 Revset aliases from the configuration are not expanded by default. To
2022 2027 expand user aliases, specify ``user=True``. To provide some local
2023 2028 definitions overriding user aliases, set ``localalias`` to
2024 2029 ``{name: definitionstring}``.
2025 2030 """
2026 2031 if specs == [b'null']:
2027 2032 return revset.baseset([nullrev])
2028 2033 if specs == [b'.']:
2029 2034 quick_data = self._quick_access_changeid.get(b'.')
2030 2035 if quick_data is not None:
2031 2036 return revset.baseset([quick_data[0]])
2032 2037 if user:
2033 2038 m = revset.matchany(
2034 2039 self.ui,
2035 2040 specs,
2036 2041 lookup=revset.lookupfn(self),
2037 2042 localalias=localalias,
2038 2043 )
2039 2044 else:
2040 2045 m = revset.matchany(None, specs, localalias=localalias)
2041 2046 return m(self)
2042 2047
2043 2048 def url(self) -> bytes:
2044 2049 return b'file:' + self.root
2045 2050
2046 2051 def hook(self, name, throw=False, **args):
2047 2052 """Call a hook, passing this repo instance.
2048 2053
2049 2054 This a convenience method to aid invoking hooks. Extensions likely
2050 2055 won't call this unless they have registered a custom hook or are
2051 2056 replacing code that is expected to call a hook.
2052 2057 """
2053 2058 return hook.hook(self.ui, self, name, throw, **args)
2054 2059
2055 2060 @filteredpropertycache
2056 2061 def _tagscache(self):
2057 2062 """Returns a tagscache object that contains various tags related
2058 2063 caches."""
2059 2064
2060 2065 # This simplifies its cache management by having one decorated
2061 2066 # function (this one) and the rest simply fetch things from it.
2062 2067 class tagscache:
2063 2068 def __init__(self):
2064 2069 # These two define the set of tags for this repository. tags
2065 2070 # maps tag name to node; tagtypes maps tag name to 'global' or
2066 2071 # 'local'. (Global tags are defined by .hgtags across all
2067 2072 # heads, and local tags are defined in .hg/localtags.)
2068 2073 # They constitute the in-memory cache of tags.
2069 2074 self.tags = self.tagtypes = None
2070 2075
2071 2076 self.nodetagscache = self.tagslist = None
2072 2077
2073 2078 cache = tagscache()
2074 2079 cache.tags, cache.tagtypes = self._findtags()
2075 2080
2076 2081 return cache
2077 2082
2078 2083 def tags(self):
2079 2084 '''return a mapping of tag to node'''
2080 2085 t = {}
2081 2086 if self.changelog.filteredrevs:
2082 2087 tags, tt = self._findtags()
2083 2088 else:
2084 2089 tags = self._tagscache.tags
2085 2090 rev = self.changelog.rev
2086 2091 for k, v in tags.items():
2087 2092 try:
2088 2093 # ignore tags to unknown nodes
2089 2094 rev(v)
2090 2095 t[k] = v
2091 2096 except (error.LookupError, ValueError):
2092 2097 pass
2093 2098 return t
2094 2099
2095 2100 def _findtags(self):
2096 2101 """Do the hard work of finding tags. Return a pair of dicts
2097 2102 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2098 2103 maps tag name to a string like \'global\' or \'local\'.
2099 2104 Subclasses or extensions are free to add their own tags, but
2100 2105 should be aware that the returned dicts will be retained for the
2101 2106 duration of the localrepo object."""
2102 2107
2103 2108 # XXX what tagtype should subclasses/extensions use? Currently
2104 2109 # mq and bookmarks add tags, but do not set the tagtype at all.
2105 2110 # Should each extension invent its own tag type? Should there
2106 2111 # be one tagtype for all such "virtual" tags? Or is the status
2107 2112 # quo fine?
2108 2113
2109 2114 # map tag name to (node, hist)
2110 2115 alltags = tagsmod.findglobaltags(self.ui, self)
2111 2116 # map tag name to tag type
2112 2117 tagtypes = {tag: b'global' for tag in alltags}
2113 2118
2114 2119 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2115 2120
2116 2121 # Build the return dicts. Have to re-encode tag names because
2117 2122 # the tags module always uses UTF-8 (in order not to lose info
2118 2123 # writing to the cache), but the rest of Mercurial wants them in
2119 2124 # local encoding.
2120 2125 tags = {}
2121 2126 for (name, (node, hist)) in alltags.items():
2122 2127 if node != self.nullid:
2123 2128 tags[encoding.tolocal(name)] = node
2124 2129 tags[b'tip'] = self.changelog.tip()
2125 2130 tagtypes = {
2126 2131 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2127 2132 }
2128 2133 return (tags, tagtypes)
2129 2134
2130 2135 def tagtype(self, tagname):
2131 2136 """
2132 2137 return the type of the given tag. result can be:
2133 2138
2134 2139 'local' : a local tag
2135 2140 'global' : a global tag
2136 2141 None : tag does not exist
2137 2142 """
2138 2143
2139 2144 return self._tagscache.tagtypes.get(tagname)
2140 2145
2141 2146 def tagslist(self):
2142 2147 '''return a list of tags ordered by revision'''
2143 2148 if not self._tagscache.tagslist:
2144 2149 l = []
2145 2150 for t, n in self.tags().items():
2146 2151 l.append((self.changelog.rev(n), t, n))
2147 2152 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2148 2153
2149 2154 return self._tagscache.tagslist
2150 2155
2151 2156 def nodetags(self, node):
2152 2157 '''return the tags associated with a node'''
2153 2158 if not self._tagscache.nodetagscache:
2154 2159 nodetagscache = {}
2155 2160 for t, n in self._tagscache.tags.items():
2156 2161 nodetagscache.setdefault(n, []).append(t)
2157 2162 for tags in nodetagscache.values():
2158 2163 tags.sort()
2159 2164 self._tagscache.nodetagscache = nodetagscache
2160 2165 return self._tagscache.nodetagscache.get(node, [])
2161 2166
2162 2167 def nodebookmarks(self, node):
2163 2168 """return the list of bookmarks pointing to the specified node"""
2164 2169 return self._bookmarks.names(node)
2165 2170
2166 2171 def branchmap(self):
2167 2172 """returns a dictionary {branch: [branchheads]} with branchheads
2168 2173 ordered by increasing revision number"""
2169 2174 return self._branchcaches[self]
2170 2175
2171 2176 @unfilteredmethod
2172 2177 def revbranchcache(self):
2173 2178 if not self._revbranchcache:
2174 2179 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2175 2180 return self._revbranchcache
2176 2181
2177 2182 def register_changeset(self, rev, changelogrevision):
2178 2183 self.revbranchcache().setdata(rev, changelogrevision)
2179 2184
2180 2185 def branchtip(self, branch, ignoremissing=False):
2181 2186 """return the tip node for a given branch
2182 2187
2183 2188 If ignoremissing is True, then this method will not raise an error.
2184 2189 This is helpful for callers that only expect None for a missing branch
2185 2190 (e.g. namespace).
2186 2191
2187 2192 """
2188 2193 try:
2189 2194 return self.branchmap().branchtip(branch)
2190 2195 except KeyError:
2191 2196 if not ignoremissing:
2192 2197 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2193 2198 else:
2194 2199 pass
2195 2200
2196 2201 def lookup(self, key):
2197 2202 node = scmutil.revsymbol(self, key).node()
2198 2203 if node is None:
2199 2204 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2200 2205 return node
2201 2206
2202 2207 def lookupbranch(self, key):
2203 2208 if self.branchmap().hasbranch(key):
2204 2209 return key
2205 2210
2206 2211 return scmutil.revsymbol(self, key).branch()
2207 2212
2208 2213 def known(self, nodes):
2209 2214 cl = self.changelog
2210 2215 get_rev = cl.index.get_rev
2211 2216 filtered = cl.filteredrevs
2212 2217 result = []
2213 2218 for n in nodes:
2214 2219 r = get_rev(n)
2215 2220 resp = not (r is None or r in filtered)
2216 2221 result.append(resp)
2217 2222 return result
2218 2223
2219 2224 def local(self):
2220 2225 return self
2221 2226
2222 2227 def publishing(self):
2223 2228 # it's safe (and desirable) to trust the publish flag unconditionally
2224 2229 # so that we don't finalize changes shared between users via ssh or nfs
2225 2230 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2226 2231
2227 2232 def cancopy(self):
2228 2233 # so statichttprepo's override of local() works
2229 2234 if not self.local():
2230 2235 return False
2231 2236 if not self.publishing():
2232 2237 return True
2233 2238 # if publishing we can't copy if there is filtered content
2234 2239 return not self.filtered(b'visible').changelog.filteredrevs
2235 2240
2236 2241 def shared(self):
2237 2242 '''the type of shared repository (None if not shared)'''
2238 2243 if self.sharedpath != self.path:
2239 2244 return b'store'
2240 2245 return None
2241 2246
2242 2247 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2243 2248 return self.vfs.reljoin(self.root, f, *insidef)
2244 2249
2245 2250 def setparents(self, p1, p2=None):
2246 2251 if p2 is None:
2247 2252 p2 = self.nullid
2248 2253 self[None].setparents(p1, p2)
2249 2254 self._quick_access_changeid_invalidate()
2250 2255
2251 2256 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2252 2257 """changeid must be a changeset revision, if specified.
2253 2258 fileid can be a file revision or node."""
2254 2259 return context.filectx(
2255 2260 self, path, changeid, fileid, changectx=changectx
2256 2261 )
2257 2262
2258 2263 def getcwd(self) -> bytes:
2259 2264 return self.dirstate.getcwd()
2260 2265
2261 2266 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2262 2267 return self.dirstate.pathto(f, cwd)
2263 2268
2264 2269 def _loadfilter(self, filter):
2265 2270 if filter not in self._filterpats:
2266 2271 l = []
2267 2272 for pat, cmd in self.ui.configitems(filter):
2268 2273 if cmd == b'!':
2269 2274 continue
2270 2275 mf = matchmod.match(self.root, b'', [pat])
2271 2276 fn = None
2272 2277 params = cmd
2273 2278 for name, filterfn in self._datafilters.items():
2274 2279 if cmd.startswith(name):
2275 2280 fn = filterfn
2276 2281 params = cmd[len(name) :].lstrip()
2277 2282 break
2278 2283 if not fn:
2279 2284 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2280 2285 fn.__name__ = 'commandfilter'
2281 2286 # Wrap old filters not supporting keyword arguments
2282 2287 if not pycompat.getargspec(fn)[2]:
2283 2288 oldfn = fn
2284 2289 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2285 2290 fn.__name__ = 'compat-' + oldfn.__name__
2286 2291 l.append((mf, fn, params))
2287 2292 self._filterpats[filter] = l
2288 2293 return self._filterpats[filter]
2289 2294
2290 2295 def _filter(self, filterpats, filename, data):
2291 2296 for mf, fn, cmd in filterpats:
2292 2297 if mf(filename):
2293 2298 self.ui.debug(
2294 2299 b"filtering %s through %s\n"
2295 2300 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2296 2301 )
2297 2302 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2298 2303 break
2299 2304
2300 2305 return data
2301 2306
2302 2307 @unfilteredpropertycache
2303 2308 def _encodefilterpats(self):
2304 2309 return self._loadfilter(b'encode')
2305 2310
2306 2311 @unfilteredpropertycache
2307 2312 def _decodefilterpats(self):
2308 2313 return self._loadfilter(b'decode')
2309 2314
2310 2315 def adddatafilter(self, name, filter):
2311 2316 self._datafilters[name] = filter
2312 2317
2313 2318 def wread(self, filename: bytes) -> bytes:
2314 2319 if self.wvfs.islink(filename):
2315 2320 data = self.wvfs.readlink(filename)
2316 2321 else:
2317 2322 data = self.wvfs.read(filename)
2318 2323 return self._filter(self._encodefilterpats, filename, data)
2319 2324
2320 2325 def wwrite(
2321 2326 self,
2322 2327 filename: bytes,
2323 2328 data: bytes,
2324 2329 flags: bytes,
2325 2330 backgroundclose=False,
2326 2331 **kwargs
2327 2332 ) -> int:
2328 2333 """write ``data`` into ``filename`` in the working directory
2329 2334
2330 2335 This returns length of written (maybe decoded) data.
2331 2336 """
2332 2337 data = self._filter(self._decodefilterpats, filename, data)
2333 2338 if b'l' in flags:
2334 2339 self.wvfs.symlink(data, filename)
2335 2340 else:
2336 2341 self.wvfs.write(
2337 2342 filename, data, backgroundclose=backgroundclose, **kwargs
2338 2343 )
2339 2344 if b'x' in flags:
2340 2345 self.wvfs.setflags(filename, False, True)
2341 2346 else:
2342 2347 self.wvfs.setflags(filename, False, False)
2343 2348 return len(data)
2344 2349
2345 2350 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2346 2351 return self._filter(self._decodefilterpats, filename, data)
2347 2352
2348 2353 def currenttransaction(self):
2349 2354 """return the current transaction or None if non exists"""
2350 2355 if self._transref:
2351 2356 tr = self._transref()
2352 2357 else:
2353 2358 tr = None
2354 2359
2355 2360 if tr and tr.running():
2356 2361 return tr
2357 2362 return None
2358 2363
2359 2364 def transaction(self, desc, report=None):
2360 2365 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2361 2366 b'devel', b'check-locks'
2362 2367 ):
2363 2368 if self._currentlock(self._lockref) is None:
2364 2369 raise error.ProgrammingError(b'transaction requires locking')
2365 2370 tr = self.currenttransaction()
2366 2371 if tr is not None:
2367 2372 return tr.nest(name=desc)
2368 2373
2369 2374 # abort here if the journal already exists
2370 2375 if self.svfs.exists(b"journal"):
2371 2376 raise error.RepoError(
2372 2377 _(b"abandoned transaction found"),
2373 2378 hint=_(b"run 'hg recover' to clean up transaction"),
2374 2379 )
2375 2380
2376 2381 idbase = b"%.40f#%f" % (random.random(), time.time())
2377 2382 ha = hex(hashutil.sha1(idbase).digest())
2378 2383 txnid = b'TXN:' + ha
2379 2384 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2380 2385
2381 2386 self._writejournal(desc)
2382 2387 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2383 2388 if report:
2384 2389 rp = report
2385 2390 else:
2386 2391 rp = self.ui.warn
2387 2392 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2388 2393 # we must avoid cyclic reference between repo and transaction.
2389 2394 reporef = weakref.ref(self)
2390 2395 # Code to track tag movement
2391 2396 #
2392 2397 # Since tags are all handled as file content, it is actually quite hard
2393 2398 # to track these movement from a code perspective. So we fallback to a
2394 2399 # tracking at the repository level. One could envision to track changes
2395 2400 # to the '.hgtags' file through changegroup apply but that fails to
2396 2401 # cope with case where transaction expose new heads without changegroup
2397 2402 # being involved (eg: phase movement).
2398 2403 #
2399 2404 # For now, We gate the feature behind a flag since this likely comes
2400 2405 # with performance impacts. The current code run more often than needed
2401 2406 # and do not use caches as much as it could. The current focus is on
2402 2407 # the behavior of the feature so we disable it by default. The flag
2403 2408 # will be removed when we are happy with the performance impact.
2404 2409 #
2405 2410 # Once this feature is no longer experimental move the following
2406 2411 # documentation to the appropriate help section:
2407 2412 #
2408 2413 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2409 2414 # tags (new or changed or deleted tags). In addition the details of
2410 2415 # these changes are made available in a file at:
2411 2416 # ``REPOROOT/.hg/changes/tags.changes``.
2412 2417 # Make sure you check for HG_TAG_MOVED before reading that file as it
2413 2418 # might exist from a previous transaction even if no tag were touched
2414 2419 # in this one. Changes are recorded in a line base format::
2415 2420 #
2416 2421 # <action> <hex-node> <tag-name>\n
2417 2422 #
2418 2423 # Actions are defined as follow:
2419 2424 # "-R": tag is removed,
2420 2425 # "+A": tag is added,
2421 2426 # "-M": tag is moved (old value),
2422 2427 # "+M": tag is moved (new value),
2423 2428 tracktags = lambda x: None
2424 2429 # experimental config: experimental.hook-track-tags
2425 2430 shouldtracktags = self.ui.configbool(
2426 2431 b'experimental', b'hook-track-tags'
2427 2432 )
2428 2433 if desc != b'strip' and shouldtracktags:
2429 2434 oldheads = self.changelog.headrevs()
2430 2435
2431 2436 def tracktags(tr2):
2432 2437 repo = reporef()
2433 2438 assert repo is not None # help pytype
2434 2439 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2435 2440 newheads = repo.changelog.headrevs()
2436 2441 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2437 2442 # notes: we compare lists here.
2438 2443 # As we do it only once buiding set would not be cheaper
2439 2444 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2440 2445 if changes:
2441 2446 tr2.hookargs[b'tag_moved'] = b'1'
2442 2447 with repo.vfs(
2443 2448 b'changes/tags.changes', b'w', atomictemp=True
2444 2449 ) as changesfile:
2445 2450 # note: we do not register the file to the transaction
2446 2451 # because we needs it to still exist on the transaction
2447 2452 # is close (for txnclose hooks)
2448 2453 tagsmod.writediff(changesfile, changes)
2449 2454
2450 2455 def validate(tr2):
2451 2456 """will run pre-closing hooks"""
2452 2457 # XXX the transaction API is a bit lacking here so we take a hacky
2453 2458 # path for now
2454 2459 #
2455 2460 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2456 2461 # dict is copied before these run. In addition we needs the data
2457 2462 # available to in memory hooks too.
2458 2463 #
2459 2464 # Moreover, we also need to make sure this runs before txnclose
2460 2465 # hooks and there is no "pending" mechanism that would execute
2461 2466 # logic only if hooks are about to run.
2462 2467 #
2463 2468 # Fixing this limitation of the transaction is also needed to track
2464 2469 # other families of changes (bookmarks, phases, obsolescence).
2465 2470 #
2466 2471 # This will have to be fixed before we remove the experimental
2467 2472 # gating.
2468 2473 tracktags(tr2)
2469 2474 repo = reporef()
2470 2475 assert repo is not None # help pytype
2471 2476
2472 2477 singleheadopt = (b'experimental', b'single-head-per-branch')
2473 2478 singlehead = repo.ui.configbool(*singleheadopt)
2474 2479 if singlehead:
2475 2480 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2476 2481 accountclosed = singleheadsub.get(
2477 2482 b"account-closed-heads", False
2478 2483 )
2479 2484 if singleheadsub.get(b"public-changes-only", False):
2480 2485 filtername = b"immutable"
2481 2486 else:
2482 2487 filtername = b"visible"
2483 2488 scmutil.enforcesinglehead(
2484 2489 repo, tr2, desc, accountclosed, filtername
2485 2490 )
2486 2491 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2487 2492 for name, (old, new) in sorted(
2488 2493 tr.changes[b'bookmarks'].items()
2489 2494 ):
2490 2495 args = tr.hookargs.copy()
2491 2496 args.update(bookmarks.preparehookargs(name, old, new))
2492 2497 repo.hook(
2493 2498 b'pretxnclose-bookmark',
2494 2499 throw=True,
2495 2500 **pycompat.strkwargs(args)
2496 2501 )
2497 2502 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2498 2503 cl = repo.unfiltered().changelog
2499 2504 for revs, (old, new) in tr.changes[b'phases']:
2500 2505 for rev in revs:
2501 2506 args = tr.hookargs.copy()
2502 2507 node = hex(cl.node(rev))
2503 2508 args.update(phases.preparehookargs(node, old, new))
2504 2509 repo.hook(
2505 2510 b'pretxnclose-phase',
2506 2511 throw=True,
2507 2512 **pycompat.strkwargs(args)
2508 2513 )
2509 2514
2510 2515 repo.hook(
2511 2516 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2512 2517 )
2513 2518
2514 2519 def releasefn(tr, success):
2515 2520 repo = reporef()
2516 2521 if repo is None:
2517 2522 # If the repo has been GC'd (and this release function is being
2518 2523 # called from transaction.__del__), there's not much we can do,
2519 2524 # so just leave the unfinished transaction there and let the
2520 2525 # user run `hg recover`.
2521 2526 return
2522 2527 if success:
2523 2528 # this should be explicitly invoked here, because
2524 2529 # in-memory changes aren't written out at closing
2525 2530 # transaction, if tr.addfilegenerator (via
2526 2531 # dirstate.write or so) isn't invoked while
2527 2532 # transaction running
2528 2533 repo.dirstate.write(None)
2529 2534 else:
2530 2535 # discard all changes (including ones already written
2531 2536 # out) in this transaction
2532 2537 narrowspec.restorebackup(self, b'journal.narrowspec')
2533 2538 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2534 2539 repo.dirstate.restorebackup(None, b'journal.dirstate')
2535 2540
2536 2541 repo.invalidate(clearfilecache=True)
2537 2542
2538 2543 tr = transaction.transaction(
2539 2544 rp,
2540 2545 self.svfs,
2541 2546 vfsmap,
2542 2547 b"journal",
2543 2548 b"undo",
2544 2549 aftertrans(renames),
2545 2550 self.store.createmode,
2546 2551 validator=validate,
2547 2552 releasefn=releasefn,
2548 2553 checkambigfiles=_cachedfiles,
2549 2554 name=desc,
2550 2555 )
2551 2556 tr.changes[b'origrepolen'] = len(self)
2552 2557 tr.changes[b'obsmarkers'] = set()
2553 2558 tr.changes[b'phases'] = []
2554 2559 tr.changes[b'bookmarks'] = {}
2555 2560
2556 2561 tr.hookargs[b'txnid'] = txnid
2557 2562 tr.hookargs[b'txnname'] = desc
2558 2563 tr.hookargs[b'changes'] = tr.changes
2559 2564 # note: writing the fncache only during finalize mean that the file is
2560 2565 # outdated when running hooks. As fncache is used for streaming clone,
2561 2566 # this is not expected to break anything that happen during the hooks.
2562 2567 tr.addfinalize(b'flush-fncache', self.store.write)
2563 2568
2564 2569 def txnclosehook(tr2):
2565 2570 """To be run if transaction is successful, will schedule a hook run"""
2566 2571 # Don't reference tr2 in hook() so we don't hold a reference.
2567 2572 # This reduces memory consumption when there are multiple
2568 2573 # transactions per lock. This can likely go away if issue5045
2569 2574 # fixes the function accumulation.
2570 2575 hookargs = tr2.hookargs
2571 2576
2572 2577 def hookfunc(unused_success):
2573 2578 repo = reporef()
2574 2579 assert repo is not None # help pytype
2575 2580
2576 2581 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2577 2582 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2578 2583 for name, (old, new) in bmchanges:
2579 2584 args = tr.hookargs.copy()
2580 2585 args.update(bookmarks.preparehookargs(name, old, new))
2581 2586 repo.hook(
2582 2587 b'txnclose-bookmark',
2583 2588 throw=False,
2584 2589 **pycompat.strkwargs(args)
2585 2590 )
2586 2591
2587 2592 if hook.hashook(repo.ui, b'txnclose-phase'):
2588 2593 cl = repo.unfiltered().changelog
2589 2594 phasemv = sorted(
2590 2595 tr.changes[b'phases'], key=lambda r: r[0][0]
2591 2596 )
2592 2597 for revs, (old, new) in phasemv:
2593 2598 for rev in revs:
2594 2599 args = tr.hookargs.copy()
2595 2600 node = hex(cl.node(rev))
2596 2601 args.update(phases.preparehookargs(node, old, new))
2597 2602 repo.hook(
2598 2603 b'txnclose-phase',
2599 2604 throw=False,
2600 2605 **pycompat.strkwargs(args)
2601 2606 )
2602 2607
2603 2608 repo.hook(
2604 2609 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2605 2610 )
2606 2611
2607 2612 repo = reporef()
2608 2613 assert repo is not None # help pytype
2609 2614 repo._afterlock(hookfunc)
2610 2615
2611 2616 tr.addfinalize(b'txnclose-hook', txnclosehook)
2612 2617 # Include a leading "-" to make it happen before the transaction summary
2613 2618 # reports registered via scmutil.registersummarycallback() whose names
2614 2619 # are 00-txnreport etc. That way, the caches will be warm when the
2615 2620 # callbacks run.
2616 2621 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2617 2622
2618 2623 def txnaborthook(tr2):
2619 2624 """To be run if transaction is aborted"""
2620 2625 repo = reporef()
2621 2626 assert repo is not None # help pytype
2622 2627 repo.hook(
2623 2628 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2624 2629 )
2625 2630
2626 2631 tr.addabort(b'txnabort-hook', txnaborthook)
2627 2632 # avoid eager cache invalidation. in-memory data should be identical
2628 2633 # to stored data if transaction has no error.
2629 2634 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2630 2635 self._transref = weakref.ref(tr)
2631 2636 scmutil.registersummarycallback(self, tr, desc)
2632 2637 return tr
2633 2638
2634 2639 def _journalfiles(self):
2635 2640 first = (
2636 2641 (self.svfs, b'journal'),
2637 2642 (self.svfs, b'journal.narrowspec'),
2638 2643 (self.vfs, b'journal.narrowspec.dirstate'),
2639 2644 (self.vfs, b'journal.dirstate'),
2640 2645 )
2641 2646 middle = []
2642 2647 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2643 2648 if dirstate_data is not None:
2644 2649 middle.append((self.vfs, dirstate_data))
2645 2650 end = (
2646 2651 (self.vfs, b'journal.branch'),
2647 2652 (self.vfs, b'journal.desc'),
2648 2653 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2649 2654 (self.svfs, b'journal.phaseroots'),
2650 2655 )
2651 2656 return first + tuple(middle) + end
2652 2657
2653 2658 def undofiles(self):
2654 2659 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2655 2660
2656 2661 @unfilteredmethod
2657 2662 def _writejournal(self, desc):
2658 2663 self.dirstate.savebackup(None, b'journal.dirstate')
2659 2664 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2660 2665 narrowspec.savebackup(self, b'journal.narrowspec')
2661 2666 self.vfs.write(
2662 2667 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2663 2668 )
2664 2669 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2665 2670 bookmarksvfs = bookmarks.bookmarksvfs(self)
2666 2671 bookmarksvfs.write(
2667 2672 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2668 2673 )
2669 2674 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2670 2675
2671 2676 def recover(self):
2672 2677 with self.lock():
2673 2678 if self.svfs.exists(b"journal"):
2674 2679 self.ui.status(_(b"rolling back interrupted transaction\n"))
2675 2680 vfsmap = {
2676 2681 b'': self.svfs,
2677 2682 b'plain': self.vfs,
2678 2683 }
2679 2684 transaction.rollback(
2680 2685 self.svfs,
2681 2686 vfsmap,
2682 2687 b"journal",
2683 2688 self.ui.warn,
2684 2689 checkambigfiles=_cachedfiles,
2685 2690 )
2686 2691 self.invalidate()
2687 2692 return True
2688 2693 else:
2689 2694 self.ui.warn(_(b"no interrupted transaction available\n"))
2690 2695 return False
2691 2696
2692 2697 def rollback(self, dryrun=False, force=False):
2693 2698 wlock = lock = dsguard = None
2694 2699 try:
2695 2700 wlock = self.wlock()
2696 2701 lock = self.lock()
2697 2702 if self.svfs.exists(b"undo"):
2698 2703 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2699 2704
2700 2705 return self._rollback(dryrun, force, dsguard)
2701 2706 else:
2702 2707 self.ui.warn(_(b"no rollback information available\n"))
2703 2708 return 1
2704 2709 finally:
2705 2710 release(dsguard, lock, wlock)
2706 2711
2707 2712 @unfilteredmethod # Until we get smarter cache management
2708 2713 def _rollback(self, dryrun, force, dsguard):
2709 2714 ui = self.ui
2710 2715 try:
2711 2716 args = self.vfs.read(b'undo.desc').splitlines()
2712 2717 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2713 2718 if len(args) >= 3:
2714 2719 detail = args[2]
2715 2720 oldtip = oldlen - 1
2716 2721
2717 2722 if detail and ui.verbose:
2718 2723 msg = _(
2719 2724 b'repository tip rolled back to revision %d'
2720 2725 b' (undo %s: %s)\n'
2721 2726 ) % (oldtip, desc, detail)
2722 2727 else:
2723 2728 msg = _(
2724 2729 b'repository tip rolled back to revision %d (undo %s)\n'
2725 2730 ) % (oldtip, desc)
2726 2731 except IOError:
2727 2732 msg = _(b'rolling back unknown transaction\n')
2728 2733 desc = None
2729 2734
2730 2735 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2731 2736 raise error.Abort(
2732 2737 _(
2733 2738 b'rollback of last commit while not checked out '
2734 2739 b'may lose data'
2735 2740 ),
2736 2741 hint=_(b'use -f to force'),
2737 2742 )
2738 2743
2739 2744 ui.status(msg)
2740 2745 if dryrun:
2741 2746 return 0
2742 2747
2743 2748 parents = self.dirstate.parents()
2744 2749 self.destroying()
2745 2750 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2746 2751 transaction.rollback(
2747 2752 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2748 2753 )
2749 2754 bookmarksvfs = bookmarks.bookmarksvfs(self)
2750 2755 if bookmarksvfs.exists(b'undo.bookmarks'):
2751 2756 bookmarksvfs.rename(
2752 2757 b'undo.bookmarks', b'bookmarks', checkambig=True
2753 2758 )
2754 2759 if self.svfs.exists(b'undo.phaseroots'):
2755 2760 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2756 2761 self.invalidate()
2757 2762
2758 2763 has_node = self.changelog.index.has_node
2759 2764 parentgone = any(not has_node(p) for p in parents)
2760 2765 if parentgone:
2761 2766 # prevent dirstateguard from overwriting already restored one
2762 2767 dsguard.close()
2763 2768
2764 2769 narrowspec.restorebackup(self, b'undo.narrowspec')
2765 2770 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2766 2771 self.dirstate.restorebackup(None, b'undo.dirstate')
2767 2772 try:
2768 2773 branch = self.vfs.read(b'undo.branch')
2769 2774 self.dirstate.setbranch(encoding.tolocal(branch))
2770 2775 except IOError:
2771 2776 ui.warn(
2772 2777 _(
2773 2778 b'named branch could not be reset: '
2774 2779 b'current branch is still \'%s\'\n'
2775 2780 )
2776 2781 % self.dirstate.branch()
2777 2782 )
2778 2783
2779 2784 parents = tuple([p.rev() for p in self[None].parents()])
2780 2785 if len(parents) > 1:
2781 2786 ui.status(
2782 2787 _(
2783 2788 b'working directory now based on '
2784 2789 b'revisions %d and %d\n'
2785 2790 )
2786 2791 % parents
2787 2792 )
2788 2793 else:
2789 2794 ui.status(
2790 2795 _(b'working directory now based on revision %d\n') % parents
2791 2796 )
2792 2797 mergestatemod.mergestate.clean(self)
2793 2798
2794 2799 # TODO: if we know which new heads may result from this rollback, pass
2795 2800 # them to destroy(), which will prevent the branchhead cache from being
2796 2801 # invalidated.
2797 2802 self.destroyed()
2798 2803 return 0
2799 2804
2800 2805 def _buildcacheupdater(self, newtransaction):
2801 2806 """called during transaction to build the callback updating cache
2802 2807
2803 2808 Lives on the repository to help extension who might want to augment
2804 2809 this logic. For this purpose, the created transaction is passed to the
2805 2810 method.
2806 2811 """
2807 2812 # we must avoid cyclic reference between repo and transaction.
2808 2813 reporef = weakref.ref(self)
2809 2814
2810 2815 def updater(tr):
2811 2816 repo = reporef()
2812 2817 assert repo is not None # help pytype
2813 2818 repo.updatecaches(tr)
2814 2819
2815 2820 return updater
2816 2821
2817 2822 @unfilteredmethod
2818 2823 def updatecaches(self, tr=None, full=False, caches=None):
2819 2824 """warm appropriate caches
2820 2825
2821 2826 If this function is called after a transaction closed. The transaction
2822 2827 will be available in the 'tr' argument. This can be used to selectively
2823 2828 update caches relevant to the changes in that transaction.
2824 2829
2825 2830 If 'full' is set, make sure all caches the function knows about have
2826 2831 up-to-date data. Even the ones usually loaded more lazily.
2827 2832
2828 2833 The `full` argument can take a special "post-clone" value. In this case
2829 2834 the cache warming is made after a clone and of the slower cache might
2830 2835 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2831 2836 as we plan for a cleaner way to deal with this for 5.9.
2832 2837 """
2833 2838 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2834 2839 # During strip, many caches are invalid but
2835 2840 # later call to `destroyed` will refresh them.
2836 2841 return
2837 2842
2838 2843 unfi = self.unfiltered()
2839 2844
2840 2845 if full:
2841 2846 msg = (
2842 2847 "`full` argument for `repo.updatecaches` is deprecated\n"
2843 2848 "(use `caches=repository.CACHE_ALL` instead)"
2844 2849 )
2845 2850 self.ui.deprecwarn(msg, b"5.9")
2846 2851 caches = repository.CACHES_ALL
2847 2852 if full == b"post-clone":
2848 2853 caches = repository.CACHES_POST_CLONE
2849 2854 caches = repository.CACHES_ALL
2850 2855 elif caches is None:
2851 2856 caches = repository.CACHES_DEFAULT
2852 2857
2853 2858 if repository.CACHE_BRANCHMAP_SERVED in caches:
2854 2859 if tr is None or tr.changes[b'origrepolen'] < len(self):
2855 2860 # accessing the 'served' branchmap should refresh all the others,
2856 2861 self.ui.debug(b'updating the branch cache\n')
2857 2862 self.filtered(b'served').branchmap()
2858 2863 self.filtered(b'served.hidden').branchmap()
2859 2864 # flush all possibly delayed write.
2860 2865 self._branchcaches.write_delayed(self)
2861 2866
2862 2867 if repository.CACHE_CHANGELOG_CACHE in caches:
2863 2868 self.changelog.update_caches(transaction=tr)
2864 2869
2865 2870 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2866 2871 self.manifestlog.update_caches(transaction=tr)
2867 2872
2868 2873 if repository.CACHE_REV_BRANCH in caches:
2869 2874 rbc = unfi.revbranchcache()
2870 2875 for r in unfi.changelog:
2871 2876 rbc.branchinfo(r)
2872 2877 rbc.write()
2873 2878
2874 2879 if repository.CACHE_FULL_MANIFEST in caches:
2875 2880 # ensure the working copy parents are in the manifestfulltextcache
2876 2881 for ctx in self[b'.'].parents():
2877 2882 ctx.manifest() # accessing the manifest is enough
2878 2883
2879 2884 if repository.CACHE_FILE_NODE_TAGS in caches:
2880 2885 # accessing fnode cache warms the cache
2881 2886 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2882 2887
2883 2888 if repository.CACHE_TAGS_DEFAULT in caches:
2884 2889 # accessing tags warm the cache
2885 2890 self.tags()
2886 2891 if repository.CACHE_TAGS_SERVED in caches:
2887 2892 self.filtered(b'served').tags()
2888 2893
2889 2894 if repository.CACHE_BRANCHMAP_ALL in caches:
2890 2895 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2891 2896 # so we're forcing a write to cause these caches to be warmed up
2892 2897 # even if they haven't explicitly been requested yet (if they've
2893 2898 # never been used by hg, they won't ever have been written, even if
2894 2899 # they're a subset of another kind of cache that *has* been used).
2895 2900 for filt in repoview.filtertable.keys():
2896 2901 filtered = self.filtered(filt)
2897 2902 filtered.branchmap().write(filtered)
2898 2903
2899 2904 def invalidatecaches(self):
2900 2905
2901 2906 if '_tagscache' in vars(self):
2902 2907 # can't use delattr on proxy
2903 2908 del self.__dict__['_tagscache']
2904 2909
2905 2910 self._branchcaches.clear()
2906 2911 self.invalidatevolatilesets()
2907 2912 self._sparsesignaturecache.clear()
2908 2913
2909 2914 def invalidatevolatilesets(self):
2910 2915 self.filteredrevcache.clear()
2911 2916 obsolete.clearobscaches(self)
2912 2917 self._quick_access_changeid_invalidate()
2913 2918
2914 2919 def invalidatedirstate(self):
2915 2920 """Invalidates the dirstate, causing the next call to dirstate
2916 2921 to check if it was modified since the last time it was read,
2917 2922 rereading it if it has.
2918 2923
2919 2924 This is different to dirstate.invalidate() that it doesn't always
2920 2925 rereads the dirstate. Use dirstate.invalidate() if you want to
2921 2926 explicitly read the dirstate again (i.e. restoring it to a previous
2922 2927 known good state)."""
2923 2928 if hasunfilteredcache(self, 'dirstate'):
2924 2929 for k in self.dirstate._filecache:
2925 2930 try:
2926 2931 delattr(self.dirstate, k)
2927 2932 except AttributeError:
2928 2933 pass
2929 2934 delattr(self.unfiltered(), 'dirstate')
2930 2935
2931 2936 def invalidate(self, clearfilecache=False):
2932 2937 """Invalidates both store and non-store parts other than dirstate
2933 2938
2934 2939 If a transaction is running, invalidation of store is omitted,
2935 2940 because discarding in-memory changes might cause inconsistency
2936 2941 (e.g. incomplete fncache causes unintentional failure, but
2937 2942 redundant one doesn't).
2938 2943 """
2939 2944 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2940 2945 for k in list(self._filecache.keys()):
2941 2946 # dirstate is invalidated separately in invalidatedirstate()
2942 2947 if k == b'dirstate':
2943 2948 continue
2944 2949 if (
2945 2950 k == b'changelog'
2946 2951 and self.currenttransaction()
2947 2952 and self.changelog._delayed
2948 2953 ):
2949 2954 # The changelog object may store unwritten revisions. We don't
2950 2955 # want to lose them.
2951 2956 # TODO: Solve the problem instead of working around it.
2952 2957 continue
2953 2958
2954 2959 if clearfilecache:
2955 2960 del self._filecache[k]
2956 2961 try:
2957 2962 delattr(unfiltered, k)
2958 2963 except AttributeError:
2959 2964 pass
2960 2965 self.invalidatecaches()
2961 2966 if not self.currenttransaction():
2962 2967 # TODO: Changing contents of store outside transaction
2963 2968 # causes inconsistency. We should make in-memory store
2964 2969 # changes detectable, and abort if changed.
2965 2970 self.store.invalidatecaches()
2966 2971
2967 2972 def invalidateall(self):
2968 2973 """Fully invalidates both store and non-store parts, causing the
2969 2974 subsequent operation to reread any outside changes."""
2970 2975 # extension should hook this to invalidate its caches
2971 2976 self.invalidate()
2972 2977 self.invalidatedirstate()
2973 2978
2974 2979 @unfilteredmethod
2975 2980 def _refreshfilecachestats(self, tr):
2976 2981 """Reload stats of cached files so that they are flagged as valid"""
2977 2982 for k, ce in self._filecache.items():
2978 2983 k = pycompat.sysstr(k)
2979 2984 if k == 'dirstate' or k not in self.__dict__:
2980 2985 continue
2981 2986 ce.refresh()
2982 2987
2983 2988 def _lock(
2984 2989 self,
2985 2990 vfs,
2986 2991 lockname,
2987 2992 wait,
2988 2993 releasefn,
2989 2994 acquirefn,
2990 2995 desc,
2991 2996 ):
2992 2997 timeout = 0
2993 2998 warntimeout = 0
2994 2999 if wait:
2995 3000 timeout = self.ui.configint(b"ui", b"timeout")
2996 3001 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2997 3002 # internal config: ui.signal-safe-lock
2998 3003 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2999 3004
3000 3005 l = lockmod.trylock(
3001 3006 self.ui,
3002 3007 vfs,
3003 3008 lockname,
3004 3009 timeout,
3005 3010 warntimeout,
3006 3011 releasefn=releasefn,
3007 3012 acquirefn=acquirefn,
3008 3013 desc=desc,
3009 3014 signalsafe=signalsafe,
3010 3015 )
3011 3016 return l
3012 3017
3013 3018 def _afterlock(self, callback):
3014 3019 """add a callback to be run when the repository is fully unlocked
3015 3020
3016 3021 The callback will be executed when the outermost lock is released
3017 3022 (with wlock being higher level than 'lock')."""
3018 3023 for ref in (self._wlockref, self._lockref):
3019 3024 l = ref and ref()
3020 3025 if l and l.held:
3021 3026 l.postrelease.append(callback)
3022 3027 break
3023 3028 else: # no lock have been found.
3024 3029 callback(True)
3025 3030
3026 3031 def lock(self, wait=True):
3027 3032 """Lock the repository store (.hg/store) and return a weak reference
3028 3033 to the lock. Use this before modifying the store (e.g. committing or
3029 3034 stripping). If you are opening a transaction, get a lock as well.)
3030 3035
3031 3036 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3032 3037 'wlock' first to avoid a dead-lock hazard."""
3033 3038 l = self._currentlock(self._lockref)
3034 3039 if l is not None:
3035 3040 l.lock()
3036 3041 return l
3037 3042
3038 3043 l = self._lock(
3039 3044 vfs=self.svfs,
3040 3045 lockname=b"lock",
3041 3046 wait=wait,
3042 3047 releasefn=None,
3043 3048 acquirefn=self.invalidate,
3044 3049 desc=_(b'repository %s') % self.origroot,
3045 3050 )
3046 3051 self._lockref = weakref.ref(l)
3047 3052 return l
3048 3053
3049 3054 def wlock(self, wait=True):
3050 3055 """Lock the non-store parts of the repository (everything under
3051 3056 .hg except .hg/store) and return a weak reference to the lock.
3052 3057
3053 3058 Use this before modifying files in .hg.
3054 3059
3055 3060 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3056 3061 'wlock' first to avoid a dead-lock hazard."""
3057 3062 l = self._wlockref() if self._wlockref else None
3058 3063 if l is not None and l.held:
3059 3064 l.lock()
3060 3065 return l
3061 3066
3062 3067 # We do not need to check for non-waiting lock acquisition. Such
3063 3068 # acquisition would not cause dead-lock as they would just fail.
3064 3069 if wait and (
3065 3070 self.ui.configbool(b'devel', b'all-warnings')
3066 3071 or self.ui.configbool(b'devel', b'check-locks')
3067 3072 ):
3068 3073 if self._currentlock(self._lockref) is not None:
3069 3074 self.ui.develwarn(b'"wlock" acquired after "lock"')
3070 3075
3071 3076 def unlock():
3072 3077 if self.dirstate.pendingparentchange():
3073 3078 self.dirstate.invalidate()
3074 3079 else:
3075 3080 self.dirstate.write(None)
3076 3081
3077 3082 self._filecache[b'dirstate'].refresh()
3078 3083
3079 3084 l = self._lock(
3080 3085 self.vfs,
3081 3086 b"wlock",
3082 3087 wait,
3083 3088 unlock,
3084 3089 self.invalidatedirstate,
3085 3090 _(b'working directory of %s') % self.origroot,
3086 3091 )
3087 3092 self._wlockref = weakref.ref(l)
3088 3093 return l
3089 3094
3090 3095 def _currentlock(self, lockref):
3091 3096 """Returns the lock if it's held, or None if it's not."""
3092 3097 if lockref is None:
3093 3098 return None
3094 3099 l = lockref()
3095 3100 if l is None or not l.held:
3096 3101 return None
3097 3102 return l
3098 3103
3099 3104 def currentwlock(self):
3100 3105 """Returns the wlock if it's held, or None if it's not."""
3101 3106 return self._currentlock(self._wlockref)
3102 3107
3103 3108 def checkcommitpatterns(self, wctx, match, status, fail):
3104 3109 """check for commit arguments that aren't committable"""
3105 3110 if match.isexact() or match.prefix():
3106 3111 matched = set(status.modified + status.added + status.removed)
3107 3112
3108 3113 for f in match.files():
3109 3114 f = self.dirstate.normalize(f)
3110 3115 if f == b'.' or f in matched or f in wctx.substate:
3111 3116 continue
3112 3117 if f in status.deleted:
3113 3118 fail(f, _(b'file not found!'))
3114 3119 # Is it a directory that exists or used to exist?
3115 3120 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3116 3121 d = f + b'/'
3117 3122 for mf in matched:
3118 3123 if mf.startswith(d):
3119 3124 break
3120 3125 else:
3121 3126 fail(f, _(b"no match under directory!"))
3122 3127 elif f not in self.dirstate:
3123 3128 fail(f, _(b"file not tracked!"))
3124 3129
3125 3130 @unfilteredmethod
3126 3131 def commit(
3127 3132 self,
3128 3133 text=b"",
3129 3134 user=None,
3130 3135 date=None,
3131 3136 match=None,
3132 3137 force=False,
3133 3138 editor=None,
3134 3139 extra=None,
3135 3140 ):
3136 3141 """Add a new revision to current repository.
3137 3142
3138 3143 Revision information is gathered from the working directory,
3139 3144 match can be used to filter the committed files. If editor is
3140 3145 supplied, it is called to get a commit message.
3141 3146 """
3142 3147 if extra is None:
3143 3148 extra = {}
3144 3149
3145 3150 def fail(f, msg):
3146 3151 raise error.InputError(b'%s: %s' % (f, msg))
3147 3152
3148 3153 if not match:
3149 3154 match = matchmod.always()
3150 3155
3151 3156 if not force:
3152 3157 match.bad = fail
3153 3158
3154 3159 # lock() for recent changelog (see issue4368)
3155 3160 with self.wlock(), self.lock():
3156 3161 wctx = self[None]
3157 3162 merge = len(wctx.parents()) > 1
3158 3163
3159 3164 if not force and merge and not match.always():
3160 3165 raise error.Abort(
3161 3166 _(
3162 3167 b'cannot partially commit a merge '
3163 3168 b'(do not specify files or patterns)'
3164 3169 )
3165 3170 )
3166 3171
3167 3172 status = self.status(match=match, clean=force)
3168 3173 if force:
3169 3174 status.modified.extend(
3170 3175 status.clean
3171 3176 ) # mq may commit clean files
3172 3177
3173 3178 # check subrepos
3174 3179 subs, commitsubs, newstate = subrepoutil.precommit(
3175 3180 self.ui, wctx, status, match, force=force
3176 3181 )
3177 3182
3178 3183 # make sure all explicit patterns are matched
3179 3184 if not force:
3180 3185 self.checkcommitpatterns(wctx, match, status, fail)
3181 3186
3182 3187 cctx = context.workingcommitctx(
3183 3188 self, status, text, user, date, extra
3184 3189 )
3185 3190
3186 3191 ms = mergestatemod.mergestate.read(self)
3187 3192 mergeutil.checkunresolved(ms)
3188 3193
3189 3194 # internal config: ui.allowemptycommit
3190 3195 if cctx.isempty() and not self.ui.configbool(
3191 3196 b'ui', b'allowemptycommit'
3192 3197 ):
3193 3198 self.ui.debug(b'nothing to commit, clearing merge state\n')
3194 3199 ms.reset()
3195 3200 return None
3196 3201
3197 3202 if merge and cctx.deleted():
3198 3203 raise error.Abort(_(b"cannot commit merge with missing files"))
3199 3204
3200 3205 if editor:
3201 3206 cctx._text = editor(self, cctx, subs)
3202 3207 edited = text != cctx._text
3203 3208
3204 3209 # Save commit message in case this transaction gets rolled back
3205 3210 # (e.g. by a pretxncommit hook). Leave the content alone on
3206 3211 # the assumption that the user will use the same editor again.
3207 3212 msg_path = self.savecommitmessage(cctx._text)
3208 3213
3209 3214 # commit subs and write new state
3210 3215 if subs:
3211 3216 uipathfn = scmutil.getuipathfn(self)
3212 3217 for s in sorted(commitsubs):
3213 3218 sub = wctx.sub(s)
3214 3219 self.ui.status(
3215 3220 _(b'committing subrepository %s\n')
3216 3221 % uipathfn(subrepoutil.subrelpath(sub))
3217 3222 )
3218 3223 sr = sub.commit(cctx._text, user, date)
3219 3224 newstate[s] = (newstate[s][0], sr)
3220 3225 subrepoutil.writestate(self, newstate)
3221 3226
3222 3227 p1, p2 = self.dirstate.parents()
3223 3228 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3224 3229 try:
3225 3230 self.hook(
3226 3231 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3227 3232 )
3228 3233 with self.transaction(b'commit'):
3229 3234 ret = self.commitctx(cctx, True)
3230 3235 # update bookmarks, dirstate and mergestate
3231 3236 bookmarks.update(self, [p1, p2], ret)
3232 3237 cctx.markcommitted(ret)
3233 3238 ms.reset()
3234 3239 except: # re-raises
3235 3240 if edited:
3236 3241 self.ui.write(
3237 3242 _(b'note: commit message saved in %s\n') % msg_path
3238 3243 )
3239 3244 self.ui.write(
3240 3245 _(
3241 3246 b"note: use 'hg commit --logfile "
3242 3247 b"%s --edit' to reuse it\n"
3243 3248 )
3244 3249 % msg_path
3245 3250 )
3246 3251 raise
3247 3252
3248 3253 def commithook(unused_success):
3249 3254 # hack for command that use a temporary commit (eg: histedit)
3250 3255 # temporary commit got stripped before hook release
3251 3256 if self.changelog.hasnode(ret):
3252 3257 self.hook(
3253 3258 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3254 3259 )
3255 3260
3256 3261 self._afterlock(commithook)
3257 3262 return ret
3258 3263
3259 3264 @unfilteredmethod
3260 3265 def commitctx(self, ctx, error=False, origctx=None):
3261 3266 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3262 3267
3263 3268 @unfilteredmethod
3264 3269 def destroying(self):
3265 3270 """Inform the repository that nodes are about to be destroyed.
3266 3271 Intended for use by strip and rollback, so there's a common
3267 3272 place for anything that has to be done before destroying history.
3268 3273
3269 3274 This is mostly useful for saving state that is in memory and waiting
3270 3275 to be flushed when the current lock is released. Because a call to
3271 3276 destroyed is imminent, the repo will be invalidated causing those
3272 3277 changes to stay in memory (waiting for the next unlock), or vanish
3273 3278 completely.
3274 3279 """
3275 3280 # When using the same lock to commit and strip, the phasecache is left
3276 3281 # dirty after committing. Then when we strip, the repo is invalidated,
3277 3282 # causing those changes to disappear.
3278 3283 if '_phasecache' in vars(self):
3279 3284 self._phasecache.write()
3280 3285
3281 3286 @unfilteredmethod
3282 3287 def destroyed(self):
3283 3288 """Inform the repository that nodes have been destroyed.
3284 3289 Intended for use by strip and rollback, so there's a common
3285 3290 place for anything that has to be done after destroying history.
3286 3291 """
3287 3292 # When one tries to:
3288 3293 # 1) destroy nodes thus calling this method (e.g. strip)
3289 3294 # 2) use phasecache somewhere (e.g. commit)
3290 3295 #
3291 3296 # then 2) will fail because the phasecache contains nodes that were
3292 3297 # removed. We can either remove phasecache from the filecache,
3293 3298 # causing it to reload next time it is accessed, or simply filter
3294 3299 # the removed nodes now and write the updated cache.
3295 3300 self._phasecache.filterunknown(self)
3296 3301 self._phasecache.write()
3297 3302
3298 3303 # refresh all repository caches
3299 3304 self.updatecaches()
3300 3305
3301 3306 # Ensure the persistent tag cache is updated. Doing it now
3302 3307 # means that the tag cache only has to worry about destroyed
3303 3308 # heads immediately after a strip/rollback. That in turn
3304 3309 # guarantees that "cachetip == currenttip" (comparing both rev
3305 3310 # and node) always means no nodes have been added or destroyed.
3306 3311
3307 3312 # XXX this is suboptimal when qrefresh'ing: we strip the current
3308 3313 # head, refresh the tag cache, then immediately add a new head.
3309 3314 # But I think doing it this way is necessary for the "instant
3310 3315 # tag cache retrieval" case to work.
3311 3316 self.invalidate()
3312 3317
3313 3318 def status(
3314 3319 self,
3315 3320 node1=b'.',
3316 3321 node2=None,
3317 3322 match=None,
3318 3323 ignored=False,
3319 3324 clean=False,
3320 3325 unknown=False,
3321 3326 listsubrepos=False,
3322 3327 ):
3323 3328 '''a convenience method that calls node1.status(node2)'''
3324 3329 return self[node1].status(
3325 3330 node2, match, ignored, clean, unknown, listsubrepos
3326 3331 )
3327 3332
3328 3333 def addpostdsstatus(self, ps):
3329 3334 """Add a callback to run within the wlock, at the point at which status
3330 3335 fixups happen.
3331 3336
3332 3337 On status completion, callback(wctx, status) will be called with the
3333 3338 wlock held, unless the dirstate has changed from underneath or the wlock
3334 3339 couldn't be grabbed.
3335 3340
3336 3341 Callbacks should not capture and use a cached copy of the dirstate --
3337 3342 it might change in the meanwhile. Instead, they should access the
3338 3343 dirstate via wctx.repo().dirstate.
3339 3344
3340 3345 This list is emptied out after each status run -- extensions should
3341 3346 make sure it adds to this list each time dirstate.status is called.
3342 3347 Extensions should also make sure they don't call this for statuses
3343 3348 that don't involve the dirstate.
3344 3349 """
3345 3350
3346 3351 # The list is located here for uniqueness reasons -- it is actually
3347 3352 # managed by the workingctx, but that isn't unique per-repo.
3348 3353 self._postdsstatus.append(ps)
3349 3354
3350 3355 def postdsstatus(self):
3351 3356 """Used by workingctx to get the list of post-dirstate-status hooks."""
3352 3357 return self._postdsstatus
3353 3358
3354 3359 def clearpostdsstatus(self):
3355 3360 """Used by workingctx to clear post-dirstate-status hooks."""
3356 3361 del self._postdsstatus[:]
3357 3362
3358 3363 def heads(self, start=None):
3359 3364 if start is None:
3360 3365 cl = self.changelog
3361 3366 headrevs = reversed(cl.headrevs())
3362 3367 return [cl.node(rev) for rev in headrevs]
3363 3368
3364 3369 heads = self.changelog.heads(start)
3365 3370 # sort the output in rev descending order
3366 3371 return sorted(heads, key=self.changelog.rev, reverse=True)
3367 3372
3368 3373 def branchheads(self, branch=None, start=None, closed=False):
3369 3374 """return a (possibly filtered) list of heads for the given branch
3370 3375
3371 3376 Heads are returned in topological order, from newest to oldest.
3372 3377 If branch is None, use the dirstate branch.
3373 3378 If start is not None, return only heads reachable from start.
3374 3379 If closed is True, return heads that are marked as closed as well.
3375 3380 """
3376 3381 if branch is None:
3377 3382 branch = self[None].branch()
3378 3383 branches = self.branchmap()
3379 3384 if not branches.hasbranch(branch):
3380 3385 return []
3381 3386 # the cache returns heads ordered lowest to highest
3382 3387 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3383 3388 if start is not None:
3384 3389 # filter out the heads that cannot be reached from startrev
3385 3390 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3386 3391 bheads = [h for h in bheads if h in fbheads]
3387 3392 return bheads
3388 3393
3389 3394 def branches(self, nodes):
3390 3395 if not nodes:
3391 3396 nodes = [self.changelog.tip()]
3392 3397 b = []
3393 3398 for n in nodes:
3394 3399 t = n
3395 3400 while True:
3396 3401 p = self.changelog.parents(n)
3397 3402 if p[1] != self.nullid or p[0] == self.nullid:
3398 3403 b.append((t, n, p[0], p[1]))
3399 3404 break
3400 3405 n = p[0]
3401 3406 return b
3402 3407
3403 3408 def between(self, pairs):
3404 3409 r = []
3405 3410
3406 3411 for top, bottom in pairs:
3407 3412 n, l, i = top, [], 0
3408 3413 f = 1
3409 3414
3410 3415 while n != bottom and n != self.nullid:
3411 3416 p = self.changelog.parents(n)[0]
3412 3417 if i == f:
3413 3418 l.append(n)
3414 3419 f = f * 2
3415 3420 n = p
3416 3421 i += 1
3417 3422
3418 3423 r.append(l)
3419 3424
3420 3425 return r
3421 3426
3422 3427 def checkpush(self, pushop):
3423 3428 """Extensions can override this function if additional checks have
3424 3429 to be performed before pushing, or call it if they override push
3425 3430 command.
3426 3431 """
3427 3432
3428 3433 @unfilteredpropertycache
3429 3434 def prepushoutgoinghooks(self):
3430 3435 """Return util.hooks consists of a pushop with repo, remote, outgoing
3431 3436 methods, which are called before pushing changesets.
3432 3437 """
3433 3438 return util.hooks()
3434 3439
3435 3440 def pushkey(self, namespace, key, old, new):
3436 3441 try:
3437 3442 tr = self.currenttransaction()
3438 3443 hookargs = {}
3439 3444 if tr is not None:
3440 3445 hookargs.update(tr.hookargs)
3441 3446 hookargs = pycompat.strkwargs(hookargs)
3442 3447 hookargs['namespace'] = namespace
3443 3448 hookargs['key'] = key
3444 3449 hookargs['old'] = old
3445 3450 hookargs['new'] = new
3446 3451 self.hook(b'prepushkey', throw=True, **hookargs)
3447 3452 except error.HookAbort as exc:
3448 3453 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3449 3454 if exc.hint:
3450 3455 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3451 3456 return False
3452 3457 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3453 3458 ret = pushkey.push(self, namespace, key, old, new)
3454 3459
3455 3460 def runhook(unused_success):
3456 3461 self.hook(
3457 3462 b'pushkey',
3458 3463 namespace=namespace,
3459 3464 key=key,
3460 3465 old=old,
3461 3466 new=new,
3462 3467 ret=ret,
3463 3468 )
3464 3469
3465 3470 self._afterlock(runhook)
3466 3471 return ret
3467 3472
3468 3473 def listkeys(self, namespace):
3469 3474 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3470 3475 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3471 3476 values = pushkey.list(self, namespace)
3472 3477 self.hook(b'listkeys', namespace=namespace, values=values)
3473 3478 return values
3474 3479
3475 3480 def debugwireargs(self, one, two, three=None, four=None, five=None):
3476 3481 '''used to test argument passing over the wire'''
3477 3482 return b"%s %s %s %s %s" % (
3478 3483 one,
3479 3484 two,
3480 3485 pycompat.bytestr(three),
3481 3486 pycompat.bytestr(four),
3482 3487 pycompat.bytestr(five),
3483 3488 )
3484 3489
3485 3490 def savecommitmessage(self, text):
3486 3491 fp = self.vfs(b'last-message.txt', b'wb')
3487 3492 try:
3488 3493 fp.write(text)
3489 3494 finally:
3490 3495 fp.close()
3491 3496 return self.pathto(fp.name[len(self.root) + 1 :])
3492 3497
3493 3498 def register_wanted_sidedata(self, category):
3494 3499 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3495 3500 # Only revlogv2 repos can want sidedata.
3496 3501 return
3497 3502 self._wanted_sidedata.add(pycompat.bytestr(category))
3498 3503
3499 3504 def register_sidedata_computer(
3500 3505 self, kind, category, keys, computer, flags, replace=False
3501 3506 ):
3502 3507 if kind not in revlogconst.ALL_KINDS:
3503 3508 msg = _(b"unexpected revlog kind '%s'.")
3504 3509 raise error.ProgrammingError(msg % kind)
3505 3510 category = pycompat.bytestr(category)
3506 3511 already_registered = category in self._sidedata_computers.get(kind, [])
3507 3512 if already_registered and not replace:
3508 3513 msg = _(
3509 3514 b"cannot register a sidedata computer twice for category '%s'."
3510 3515 )
3511 3516 raise error.ProgrammingError(msg % category)
3512 3517 if replace and not already_registered:
3513 3518 msg = _(
3514 3519 b"cannot replace a sidedata computer that isn't registered "
3515 3520 b"for category '%s'."
3516 3521 )
3517 3522 raise error.ProgrammingError(msg % category)
3518 3523 self._sidedata_computers.setdefault(kind, {})
3519 3524 self._sidedata_computers[kind][category] = (keys, computer, flags)
3520 3525
3521 3526
3522 3527 # used to avoid circular references so destructors work
3523 3528 def aftertrans(files):
3524 3529 renamefiles = [tuple(t) for t in files]
3525 3530
3526 3531 def a():
3527 3532 for vfs, src, dest in renamefiles:
3528 3533 # if src and dest refer to a same file, vfs.rename is a no-op,
3529 3534 # leaving both src and dest on disk. delete dest to make sure
3530 3535 # the rename couldn't be such a no-op.
3531 3536 vfs.tryunlink(dest)
3532 3537 try:
3533 3538 vfs.rename(src, dest)
3534 3539 except FileNotFoundError: # journal file does not yet exist
3535 3540 pass
3536 3541
3537 3542 return a
3538 3543
3539 3544
3540 3545 def undoname(fn: bytes) -> bytes:
3541 3546 base, name = os.path.split(fn)
3542 3547 assert name.startswith(b'journal')
3543 3548 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3544 3549
3545 3550
3546 3551 def instance(ui, path: bytes, create, intents=None, createopts=None):
3547 3552
3548 3553 # prevent cyclic import localrepo -> upgrade -> localrepo
3549 3554 from . import upgrade
3550 3555
3551 3556 localpath = urlutil.urllocalpath(path)
3552 3557 if create:
3553 3558 createrepository(ui, localpath, createopts=createopts)
3554 3559
3555 3560 def repo_maker():
3556 3561 return makelocalrepository(ui, localpath, intents=intents)
3557 3562
3558 3563 repo = repo_maker()
3559 3564 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3560 3565 return repo
3561 3566
3562 3567
3563 3568 def islocal(path: bytes) -> bool:
3564 3569 return True
3565 3570
3566 3571
3567 3572 def defaultcreateopts(ui, createopts=None):
3568 3573 """Populate the default creation options for a repository.
3569 3574
3570 3575 A dictionary of explicitly requested creation options can be passed
3571 3576 in. Missing keys will be populated.
3572 3577 """
3573 3578 createopts = dict(createopts or {})
3574 3579
3575 3580 if b'backend' not in createopts:
3576 3581 # experimental config: storage.new-repo-backend
3577 3582 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3578 3583
3579 3584 return createopts
3580 3585
3581 3586
3582 3587 def clone_requirements(ui, createopts, srcrepo):
3583 3588 """clone the requirements of a local repo for a local clone
3584 3589
3585 3590 The store requirements are unchanged while the working copy requirements
3586 3591 depends on the configuration
3587 3592 """
3588 3593 target_requirements = set()
3589 3594 if not srcrepo.requirements:
3590 3595 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3591 3596 # with it.
3592 3597 return target_requirements
3593 3598 createopts = defaultcreateopts(ui, createopts=createopts)
3594 3599 for r in newreporequirements(ui, createopts):
3595 3600 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3596 3601 target_requirements.add(r)
3597 3602
3598 3603 for r in srcrepo.requirements:
3599 3604 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3600 3605 target_requirements.add(r)
3601 3606 return target_requirements
3602 3607
3603 3608
3604 3609 def newreporequirements(ui, createopts):
3605 3610 """Determine the set of requirements for a new local repository.
3606 3611
3607 3612 Extensions can wrap this function to specify custom requirements for
3608 3613 new repositories.
3609 3614 """
3610 3615
3611 3616 if b'backend' not in createopts:
3612 3617 raise error.ProgrammingError(
3613 3618 b'backend key not present in createopts; '
3614 3619 b'was defaultcreateopts() called?'
3615 3620 )
3616 3621
3617 3622 if createopts[b'backend'] != b'revlogv1':
3618 3623 raise error.Abort(
3619 3624 _(
3620 3625 b'unable to determine repository requirements for '
3621 3626 b'storage backend: %s'
3622 3627 )
3623 3628 % createopts[b'backend']
3624 3629 )
3625 3630
3626 3631 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3627 3632 if ui.configbool(b'format', b'usestore'):
3628 3633 requirements.add(requirementsmod.STORE_REQUIREMENT)
3629 3634 if ui.configbool(b'format', b'usefncache'):
3630 3635 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3631 3636 if ui.configbool(b'format', b'dotencode'):
3632 3637 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3633 3638
3634 3639 compengines = ui.configlist(b'format', b'revlog-compression')
3635 3640 for compengine in compengines:
3636 3641 if compengine in util.compengines:
3637 3642 engine = util.compengines[compengine]
3638 3643 if engine.available() and engine.revlogheader():
3639 3644 break
3640 3645 else:
3641 3646 raise error.Abort(
3642 3647 _(
3643 3648 b'compression engines %s defined by '
3644 3649 b'format.revlog-compression not available'
3645 3650 )
3646 3651 % b', '.join(b'"%s"' % e for e in compengines),
3647 3652 hint=_(
3648 3653 b'run "hg debuginstall" to list available '
3649 3654 b'compression engines'
3650 3655 ),
3651 3656 )
3652 3657
3653 3658 # zlib is the historical default and doesn't need an explicit requirement.
3654 3659 if compengine == b'zstd':
3655 3660 requirements.add(b'revlog-compression-zstd')
3656 3661 elif compengine != b'zlib':
3657 3662 requirements.add(b'exp-compression-%s' % compengine)
3658 3663
3659 3664 if scmutil.gdinitconfig(ui):
3660 3665 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3661 3666 if ui.configbool(b'format', b'sparse-revlog'):
3662 3667 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3663 3668
3664 3669 # experimental config: format.use-dirstate-v2
3665 3670 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3666 3671 if ui.configbool(b'format', b'use-dirstate-v2'):
3667 3672 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3668 3673
3669 3674 # experimental config: format.exp-use-copies-side-data-changeset
3670 3675 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3671 3676 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3672 3677 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3673 3678 if ui.configbool(b'experimental', b'treemanifest'):
3674 3679 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3675 3680
3676 3681 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3677 3682 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3678 3683 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3679 3684
3680 3685 revlogv2 = ui.config(b'experimental', b'revlogv2')
3681 3686 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3682 3687 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3683 3688 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3684 3689 # experimental config: format.internal-phase
3685 3690 if ui.configbool(b'format', b'use-internal-phase'):
3686 3691 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3687 3692
3688 3693 # experimental config: format.exp-archived-phase
3689 3694 if ui.configbool(b'format', b'exp-archived-phase'):
3690 3695 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3691 3696
3692 3697 if createopts.get(b'narrowfiles'):
3693 3698 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3694 3699
3695 3700 if createopts.get(b'lfs'):
3696 3701 requirements.add(b'lfs')
3697 3702
3698 3703 if ui.configbool(b'format', b'bookmarks-in-store'):
3699 3704 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3700 3705
3701 3706 if ui.configbool(b'format', b'use-persistent-nodemap'):
3702 3707 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3703 3708
3704 3709 # if share-safe is enabled, let's create the new repository with the new
3705 3710 # requirement
3706 3711 if ui.configbool(b'format', b'use-share-safe'):
3707 3712 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3708 3713
3709 3714 # if we are creating a share-repoΒΉ we have to handle requirement
3710 3715 # differently.
3711 3716 #
3712 3717 # [1] (i.e. reusing the store from another repository, just having a
3713 3718 # working copy)
3714 3719 if b'sharedrepo' in createopts:
3715 3720 source_requirements = set(createopts[b'sharedrepo'].requirements)
3716 3721
3717 3722 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3718 3723 # share to an old school repository, we have to copy the
3719 3724 # requirements and hope for the best.
3720 3725 requirements = source_requirements
3721 3726 else:
3722 3727 # We have control on the working copy only, so "copy" the non
3723 3728 # working copy part over, ignoring previous logic.
3724 3729 to_drop = set()
3725 3730 for req in requirements:
3726 3731 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3727 3732 continue
3728 3733 if req in source_requirements:
3729 3734 continue
3730 3735 to_drop.add(req)
3731 3736 requirements -= to_drop
3732 3737 requirements |= source_requirements
3733 3738
3734 3739 if createopts.get(b'sharedrelative'):
3735 3740 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3736 3741 else:
3737 3742 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3738 3743
3739 3744 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3740 3745 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3741 3746 msg = _(b"ignoring unknown tracked key version: %d\n")
3742 3747 hint = _(
3743 3748 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3744 3749 )
3745 3750 if version != 1:
3746 3751 ui.warn(msg % version, hint=hint)
3747 3752 else:
3748 3753 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3749 3754
3750 3755 return requirements
3751 3756
3752 3757
3753 3758 def checkrequirementscompat(ui, requirements):
3754 3759 """Checks compatibility of repository requirements enabled and disabled.
3755 3760
3756 3761 Returns a set of requirements which needs to be dropped because dependend
3757 3762 requirements are not enabled. Also warns users about it"""
3758 3763
3759 3764 dropped = set()
3760 3765
3761 3766 if requirementsmod.STORE_REQUIREMENT not in requirements:
3762 3767 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3763 3768 ui.warn(
3764 3769 _(
3765 3770 b'ignoring enabled \'format.bookmarks-in-store\' config '
3766 3771 b'beacuse it is incompatible with disabled '
3767 3772 b'\'format.usestore\' config\n'
3768 3773 )
3769 3774 )
3770 3775 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3771 3776
3772 3777 if (
3773 3778 requirementsmod.SHARED_REQUIREMENT in requirements
3774 3779 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3775 3780 ):
3776 3781 raise error.Abort(
3777 3782 _(
3778 3783 b"cannot create shared repository as source was created"
3779 3784 b" with 'format.usestore' config disabled"
3780 3785 )
3781 3786 )
3782 3787
3783 3788 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3784 3789 if ui.hasconfig(b'format', b'use-share-safe'):
3785 3790 msg = _(
3786 3791 b"ignoring enabled 'format.use-share-safe' config because "
3787 3792 b"it is incompatible with disabled 'format.usestore'"
3788 3793 b" config\n"
3789 3794 )
3790 3795 ui.warn(msg)
3791 3796 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3792 3797
3793 3798 return dropped
3794 3799
3795 3800
3796 3801 def filterknowncreateopts(ui, createopts):
3797 3802 """Filters a dict of repo creation options against options that are known.
3798 3803
3799 3804 Receives a dict of repo creation options and returns a dict of those
3800 3805 options that we don't know how to handle.
3801 3806
3802 3807 This function is called as part of repository creation. If the
3803 3808 returned dict contains any items, repository creation will not
3804 3809 be allowed, as it means there was a request to create a repository
3805 3810 with options not recognized by loaded code.
3806 3811
3807 3812 Extensions can wrap this function to filter out creation options
3808 3813 they know how to handle.
3809 3814 """
3810 3815 known = {
3811 3816 b'backend',
3812 3817 b'lfs',
3813 3818 b'narrowfiles',
3814 3819 b'sharedrepo',
3815 3820 b'sharedrelative',
3816 3821 b'shareditems',
3817 3822 b'shallowfilestore',
3818 3823 }
3819 3824
3820 3825 return {k: v for k, v in createopts.items() if k not in known}
3821 3826
3822 3827
3823 3828 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3824 3829 """Create a new repository in a vfs.
3825 3830
3826 3831 ``path`` path to the new repo's working directory.
3827 3832 ``createopts`` options for the new repository.
3828 3833 ``requirement`` predefined set of requirements.
3829 3834 (incompatible with ``createopts``)
3830 3835
3831 3836 The following keys for ``createopts`` are recognized:
3832 3837
3833 3838 backend
3834 3839 The storage backend to use.
3835 3840 lfs
3836 3841 Repository will be created with ``lfs`` requirement. The lfs extension
3837 3842 will automatically be loaded when the repository is accessed.
3838 3843 narrowfiles
3839 3844 Set up repository to support narrow file storage.
3840 3845 sharedrepo
3841 3846 Repository object from which storage should be shared.
3842 3847 sharedrelative
3843 3848 Boolean indicating if the path to the shared repo should be
3844 3849 stored as relative. By default, the pointer to the "parent" repo
3845 3850 is stored as an absolute path.
3846 3851 shareditems
3847 3852 Set of items to share to the new repository (in addition to storage).
3848 3853 shallowfilestore
3849 3854 Indicates that storage for files should be shallow (not all ancestor
3850 3855 revisions are known).
3851 3856 """
3852 3857
3853 3858 if requirements is not None:
3854 3859 if createopts is not None:
3855 3860 msg = b'cannot specify both createopts and requirements'
3856 3861 raise error.ProgrammingError(msg)
3857 3862 createopts = {}
3858 3863 else:
3859 3864 createopts = defaultcreateopts(ui, createopts=createopts)
3860 3865
3861 3866 unknownopts = filterknowncreateopts(ui, createopts)
3862 3867
3863 3868 if not isinstance(unknownopts, dict):
3864 3869 raise error.ProgrammingError(
3865 3870 b'filterknowncreateopts() did not return a dict'
3866 3871 )
3867 3872
3868 3873 if unknownopts:
3869 3874 raise error.Abort(
3870 3875 _(
3871 3876 b'unable to create repository because of unknown '
3872 3877 b'creation option: %s'
3873 3878 )
3874 3879 % b', '.join(sorted(unknownopts)),
3875 3880 hint=_(b'is a required extension not loaded?'),
3876 3881 )
3877 3882
3878 3883 requirements = newreporequirements(ui, createopts=createopts)
3879 3884 requirements -= checkrequirementscompat(ui, requirements)
3880 3885
3881 3886 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3882 3887
3883 3888 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3884 3889 if hgvfs.exists():
3885 3890 raise error.RepoError(_(b'repository %s already exists') % path)
3886 3891
3887 3892 if b'sharedrepo' in createopts:
3888 3893 sharedpath = createopts[b'sharedrepo'].sharedpath
3889 3894
3890 3895 if createopts.get(b'sharedrelative'):
3891 3896 try:
3892 3897 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3893 3898 sharedpath = util.pconvert(sharedpath)
3894 3899 except (IOError, ValueError) as e:
3895 3900 # ValueError is raised on Windows if the drive letters differ
3896 3901 # on each path.
3897 3902 raise error.Abort(
3898 3903 _(b'cannot calculate relative path'),
3899 3904 hint=stringutil.forcebytestr(e),
3900 3905 )
3901 3906
3902 3907 if not wdirvfs.exists():
3903 3908 wdirvfs.makedirs()
3904 3909
3905 3910 hgvfs.makedir(notindexed=True)
3906 3911 if b'sharedrepo' not in createopts:
3907 3912 hgvfs.mkdir(b'cache')
3908 3913 hgvfs.mkdir(b'wcache')
3909 3914
3910 3915 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3911 3916 if has_store and b'sharedrepo' not in createopts:
3912 3917 hgvfs.mkdir(b'store')
3913 3918
3914 3919 # We create an invalid changelog outside the store so very old
3915 3920 # Mercurial versions (which didn't know about the requirements
3916 3921 # file) encounter an error on reading the changelog. This
3917 3922 # effectively locks out old clients and prevents them from
3918 3923 # mucking with a repo in an unknown format.
3919 3924 #
3920 3925 # The revlog header has version 65535, which won't be recognized by
3921 3926 # such old clients.
3922 3927 hgvfs.append(
3923 3928 b'00changelog.i',
3924 3929 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3925 3930 b'layout',
3926 3931 )
3927 3932
3928 3933 # Filter the requirements into working copy and store ones
3929 3934 wcreq, storereq = scmutil.filterrequirements(requirements)
3930 3935 # write working copy ones
3931 3936 scmutil.writerequires(hgvfs, wcreq)
3932 3937 # If there are store requirements and the current repository
3933 3938 # is not a shared one, write stored requirements
3934 3939 # For new shared repository, we don't need to write the store
3935 3940 # requirements as they are already present in store requires
3936 3941 if storereq and b'sharedrepo' not in createopts:
3937 3942 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3938 3943 scmutil.writerequires(storevfs, storereq)
3939 3944
3940 3945 # Write out file telling readers where to find the shared store.
3941 3946 if b'sharedrepo' in createopts:
3942 3947 hgvfs.write(b'sharedpath', sharedpath)
3943 3948
3944 3949 if createopts.get(b'shareditems'):
3945 3950 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3946 3951 hgvfs.write(b'shared', shared)
3947 3952
3948 3953
3949 3954 def poisonrepository(repo):
3950 3955 """Poison a repository instance so it can no longer be used."""
3951 3956 # Perform any cleanup on the instance.
3952 3957 repo.close()
3953 3958
3954 3959 # Our strategy is to replace the type of the object with one that
3955 3960 # has all attribute lookups result in error.
3956 3961 #
3957 3962 # But we have to allow the close() method because some constructors
3958 3963 # of repos call close() on repo references.
3959 3964 class poisonedrepository:
3960 3965 def __getattribute__(self, item):
3961 3966 if item == 'close':
3962 3967 return object.__getattribute__(self, item)
3963 3968
3964 3969 raise error.ProgrammingError(
3965 3970 b'repo instances should not be used after unshare'
3966 3971 )
3967 3972
3968 3973 def close(self):
3969 3974 pass
3970 3975
3971 3976 # We may have a repoview, which intercepts __setattr__. So be sure
3972 3977 # we operate at the lowest level possible.
3973 3978 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3354 +1,3358 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 # coding: utf8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Storage back-end for Mercurial.
10 10
11 11 This provides efficient delta storage with O(1) retrieve and append
12 12 and O(changes) merge between branches.
13 13 """
14 14
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullrev,
29 29 sha1nodeconstants,
30 30 short,
31 31 wdirrev,
32 32 )
33 33 from .i18n import _
34 34 from .pycompat import getattr
35 35 from .revlogutils.constants import (
36 36 ALL_KINDS,
37 37 CHANGELOGV2,
38 38 COMP_MODE_DEFAULT,
39 39 COMP_MODE_INLINE,
40 40 COMP_MODE_PLAIN,
41 41 ENTRY_RANK,
42 42 FEATURES_BY_VERSION,
43 43 FLAG_GENERALDELTA,
44 44 FLAG_INLINE_DATA,
45 45 INDEX_HEADER,
46 46 KIND_CHANGELOG,
47 47 KIND_FILELOG,
48 48 RANK_UNKNOWN,
49 49 REVLOGV0,
50 50 REVLOGV1,
51 51 REVLOGV1_FLAGS,
52 52 REVLOGV2,
53 53 REVLOGV2_FLAGS,
54 54 REVLOG_DEFAULT_FLAGS,
55 55 REVLOG_DEFAULT_FORMAT,
56 56 REVLOG_DEFAULT_VERSION,
57 57 SUPPORTED_FLAGS,
58 58 )
59 59 from .revlogutils.flagutil import (
60 60 REVIDX_DEFAULT_FLAGS,
61 61 REVIDX_ELLIPSIS,
62 62 REVIDX_EXTSTORED,
63 63 REVIDX_FLAGS_ORDER,
64 64 REVIDX_HASCOPIESINFO,
65 65 REVIDX_ISCENSORED,
66 66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 67 )
68 68 from .thirdparty import attr
69 69 from . import (
70 70 ancestor,
71 71 dagop,
72 72 error,
73 73 mdiff,
74 74 policy,
75 75 pycompat,
76 76 revlogutils,
77 77 templatefilters,
78 78 util,
79 79 )
80 80 from .interfaces import (
81 81 repository,
82 82 util as interfaceutil,
83 83 )
84 84 from .revlogutils import (
85 85 deltas as deltautil,
86 86 docket as docketutil,
87 87 flagutil,
88 88 nodemap as nodemaputil,
89 89 randomaccessfile,
90 90 revlogv0,
91 91 rewrite,
92 92 sidedata as sidedatautil,
93 93 )
94 94 from .utils import (
95 95 storageutil,
96 96 stringutil,
97 97 )
98 98
99 99 # blanked usage of all the name to prevent pyflakes constraints
100 100 # We need these name available in the module for extensions.
101 101
102 102 REVLOGV0
103 103 REVLOGV1
104 104 REVLOGV2
105 105 CHANGELOGV2
106 106 FLAG_INLINE_DATA
107 107 FLAG_GENERALDELTA
108 108 REVLOG_DEFAULT_FLAGS
109 109 REVLOG_DEFAULT_FORMAT
110 110 REVLOG_DEFAULT_VERSION
111 111 REVLOGV1_FLAGS
112 112 REVLOGV2_FLAGS
113 113 REVIDX_ISCENSORED
114 114 REVIDX_ELLIPSIS
115 115 REVIDX_HASCOPIESINFO
116 116 REVIDX_EXTSTORED
117 117 REVIDX_DEFAULT_FLAGS
118 118 REVIDX_FLAGS_ORDER
119 119 REVIDX_RAWTEXT_CHANGING_FLAGS
120 120
121 121 parsers = policy.importmod('parsers')
122 122 rustancestor = policy.importrust('ancestor')
123 123 rustdagop = policy.importrust('dagop')
124 124 rustrevlog = policy.importrust('revlog')
125 125
126 126 # Aliased for performance.
127 127 _zlibdecompress = zlib.decompress
128 128
129 129 # max size of revlog with inline data
130 130 _maxinline = 131072
131 131
132 132 # Flag processors for REVIDX_ELLIPSIS.
133 133 def ellipsisreadprocessor(rl, text):
134 134 return text, False
135 135
136 136
137 137 def ellipsiswriteprocessor(rl, text):
138 138 return text, False
139 139
140 140
141 141 def ellipsisrawprocessor(rl, text):
142 142 return False
143 143
144 144
145 145 ellipsisprocessor = (
146 146 ellipsisreadprocessor,
147 147 ellipsiswriteprocessor,
148 148 ellipsisrawprocessor,
149 149 )
150 150
151 151
152 152 def _verify_revision(rl, skipflags, state, node):
153 153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 154 point for extensions to influence the operation."""
155 155 if skipflags:
156 156 state[b'skipread'].add(node)
157 157 else:
158 158 # Side-effect: read content and verify hash.
159 159 rl.revision(node)
160 160
161 161
162 162 # True if a fast implementation for persistent-nodemap is available
163 163 #
164 164 # We also consider we have a "fast" implementation in "pure" python because
165 165 # people using pure don't really have performance consideration (and a
166 166 # wheelbarrow of other slowness source)
167 167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 168 parsers, 'BaseIndexObject'
169 169 )
170 170
171 171
172 172 @interfaceutil.implementer(repository.irevisiondelta)
173 173 @attr.s(slots=True)
174 174 class revlogrevisiondelta:
175 175 node = attr.ib()
176 176 p1node = attr.ib()
177 177 p2node = attr.ib()
178 178 basenode = attr.ib()
179 179 flags = attr.ib()
180 180 baserevisionsize = attr.ib()
181 181 revision = attr.ib()
182 182 delta = attr.ib()
183 183 sidedata = attr.ib()
184 184 protocol_flags = attr.ib()
185 185 linknode = attr.ib(default=None)
186 186
187 187
188 188 @interfaceutil.implementer(repository.iverifyproblem)
189 189 @attr.s(frozen=True)
190 190 class revlogproblem:
191 191 warning = attr.ib(default=None)
192 192 error = attr.ib(default=None)
193 193 node = attr.ib(default=None)
194 194
195 195
196 196 def parse_index_v1(data, inline):
197 197 # call the C implementation to parse the index data
198 198 index, cache = parsers.parse_index2(data, inline)
199 199 return index, cache
200 200
201 201
202 202 def parse_index_v2(data, inline):
203 203 # call the C implementation to parse the index data
204 204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 205 return index, cache
206 206
207 207
208 208 def parse_index_cl_v2(data, inline):
209 209 # call the C implementation to parse the index data
210 210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 211 return index, cache
212 212
213 213
214 214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215 215
216 216 def parse_index_v1_nodemap(data, inline):
217 217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 218 return index, cache
219 219
220 220
221 221 else:
222 222 parse_index_v1_nodemap = None
223 223
224 224
225 225 def parse_index_v1_mixed(data, inline):
226 226 index, cache = parse_index_v1(data, inline)
227 227 return rustrevlog.MixedIndex(index), cache
228 228
229 229
230 230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 231 # signed integer)
232 232 _maxentrysize = 0x7FFFFFFF
233 233
234 234 FILE_TOO_SHORT_MSG = _(
235 235 b'cannot read from revlog %s;'
236 236 b' expected %d bytes from offset %d, data size is %d'
237 237 )
238 238
239 239 hexdigits = b'0123456789abcdefABCDEF'
240 240
241 241
242 242 class revlog:
243 243 """
244 244 the underlying revision storage object
245 245
246 246 A revlog consists of two parts, an index and the revision data.
247 247
248 248 The index is a file with a fixed record size containing
249 249 information on each revision, including its nodeid (hash), the
250 250 nodeids of its parents, the position and offset of its data within
251 251 the data file, and the revision it's based on. Finally, each entry
252 252 contains a linkrev entry that can serve as a pointer to external
253 253 data.
254 254
255 255 The revision data itself is a linear collection of data chunks.
256 256 Each chunk represents a revision and is usually represented as a
257 257 delta against the previous chunk. To bound lookup time, runs of
258 258 deltas are limited to about 2 times the length of the original
259 259 version data. This makes retrieval of a version proportional to
260 260 its size, or O(1) relative to the number of revisions.
261 261
262 262 Both pieces of the revlog are written to in an append-only
263 263 fashion, which means we never need to rewrite a file to insert or
264 264 remove data, and can use some simple techniques to avoid the need
265 265 for locking while reading.
266 266
267 267 If checkambig, indexfile is opened with checkambig=True at
268 268 writing, to avoid file stat ambiguity.
269 269
270 270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
271 271 index will be mmapped rather than read if it is larger than the
272 272 configured threshold.
273 273
274 274 If censorable is True, the revlog can have censored revisions.
275 275
276 276 If `upperboundcomp` is not None, this is the expected maximal gain from
277 277 compression for the data content.
278 278
279 279 `concurrencychecker` is an optional function that receives 3 arguments: a
280 280 file handle, a filename, and an expected position. It should check whether
281 281 the current position in the file handle is valid, and log/warn/fail (by
282 282 raising).
283 283
284 284 See mercurial/revlogutils/contants.py for details about the content of an
285 285 index entry.
286 286 """
287 287
288 288 _flagserrorclass = error.RevlogError
289 289
290 290 def __init__(
291 291 self,
292 292 opener,
293 293 target,
294 294 radix,
295 295 postfix=None, # only exist for `tmpcensored` now
296 296 checkambig=False,
297 297 mmaplargeindex=False,
298 298 censorable=False,
299 299 upperboundcomp=None,
300 300 persistentnodemap=False,
301 301 concurrencychecker=None,
302 302 trypending=False,
303 303 canonical_parent_order=True,
304 304 ):
305 305 """
306 306 create a revlog object
307 307
308 308 opener is a function that abstracts the file opening operation
309 309 and can be used to implement COW semantics or the like.
310 310
311 311 `target`: a (KIND, ID) tuple that identify the content stored in
312 312 this revlog. It help the rest of the code to understand what the revlog
313 313 is about without having to resort to heuristic and index filename
314 314 analysis. Note: that this must be reliably be set by normal code, but
315 315 that test, debug, or performance measurement code might not set this to
316 316 accurate value.
317 317 """
318 318 self.upperboundcomp = upperboundcomp
319 319
320 320 self.radix = radix
321 321
322 322 self._docket_file = None
323 323 self._indexfile = None
324 324 self._datafile = None
325 325 self._sidedatafile = None
326 326 self._nodemap_file = None
327 327 self.postfix = postfix
328 328 self._trypending = trypending
329 329 self.opener = opener
330 330 if persistentnodemap:
331 331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
332 332
333 333 assert target[0] in ALL_KINDS
334 334 assert len(target) == 2
335 335 self.target = target
336 336 # When True, indexfile is opened with checkambig=True at writing, to
337 337 # avoid file stat ambiguity.
338 338 self._checkambig = checkambig
339 339 self._mmaplargeindex = mmaplargeindex
340 340 self._censorable = censorable
341 341 # 3-tuple of (node, rev, text) for a raw revision.
342 342 self._revisioncache = None
343 343 # Maps rev to chain base rev.
344 344 self._chainbasecache = util.lrucachedict(100)
345 345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
346 346 self._chunkcache = (0, b'')
347 347 # How much data to read and cache into the raw revlog data cache.
348 348 self._chunkcachesize = 65536
349 349 self._maxchainlen = None
350 350 self._deltabothparents = True
351 self._candidate_group_chunk_size = 0
351 352 self._debug_delta = False
352 353 self.index = None
353 354 self._docket = None
354 355 self._nodemap_docket = None
355 356 # Mapping of partial identifiers to full nodes.
356 357 self._pcache = {}
357 358 # Mapping of revision integer to full node.
358 359 self._compengine = b'zlib'
359 360 self._compengineopts = {}
360 361 self._maxdeltachainspan = -1
361 362 self._withsparseread = False
362 363 self._sparserevlog = False
363 364 self.hassidedata = False
364 365 self._srdensitythreshold = 0.50
365 366 self._srmingapsize = 262144
366 367
367 368 # Make copy of flag processors so each revlog instance can support
368 369 # custom flags.
369 370 self._flagprocessors = dict(flagutil.flagprocessors)
370 371
371 372 # 3-tuple of file handles being used for active writing.
372 373 self._writinghandles = None
373 374 # prevent nesting of addgroup
374 375 self._adding_group = None
375 376
376 377 self._loadindex()
377 378
378 379 self._concurrencychecker = concurrencychecker
379 380
380 381 # parent order is supposed to be semantically irrelevant, so we
381 382 # normally resort parents to ensure that the first parent is non-null,
382 383 # if there is a non-null parent at all.
383 384 # filelog abuses the parent order as flag to mark some instances of
384 385 # meta-encoded files, so allow it to disable this behavior.
385 386 self.canonical_parent_order = canonical_parent_order
386 387
387 388 def _init_opts(self):
388 389 """process options (from above/config) to setup associated default revlog mode
389 390
390 391 These values might be affected when actually reading on disk information.
391 392
392 393 The relevant values are returned for use in _loadindex().
393 394
394 395 * newversionflags:
395 396 version header to use if we need to create a new revlog
396 397
397 398 * mmapindexthreshold:
398 399 minimal index size for start to use mmap
399 400
400 401 * force_nodemap:
401 402 force the usage of a "development" version of the nodemap code
402 403 """
403 404 mmapindexthreshold = None
404 405 opts = self.opener.options
405 406
406 407 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
407 408 new_header = CHANGELOGV2
408 409 elif b'revlogv2' in opts:
409 410 new_header = REVLOGV2
410 411 elif b'revlogv1' in opts:
411 412 new_header = REVLOGV1 | FLAG_INLINE_DATA
412 413 if b'generaldelta' in opts:
413 414 new_header |= FLAG_GENERALDELTA
414 415 elif b'revlogv0' in self.opener.options:
415 416 new_header = REVLOGV0
416 417 else:
417 418 new_header = REVLOG_DEFAULT_VERSION
418 419
419 420 if b'chunkcachesize' in opts:
420 421 self._chunkcachesize = opts[b'chunkcachesize']
421 422 if b'maxchainlen' in opts:
422 423 self._maxchainlen = opts[b'maxchainlen']
423 424 if b'deltabothparents' in opts:
424 425 self._deltabothparents = opts[b'deltabothparents']
426 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
427 if dps_cgds:
428 self._candidate_group_chunk_size = dps_cgds
425 429 self._lazydelta = bool(opts.get(b'lazydelta', True))
426 430 self._lazydeltabase = False
427 431 if self._lazydelta:
428 432 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
429 433 if b'debug-delta' in opts:
430 434 self._debug_delta = opts[b'debug-delta']
431 435 if b'compengine' in opts:
432 436 self._compengine = opts[b'compengine']
433 437 if b'zlib.level' in opts:
434 438 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
435 439 if b'zstd.level' in opts:
436 440 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
437 441 if b'maxdeltachainspan' in opts:
438 442 self._maxdeltachainspan = opts[b'maxdeltachainspan']
439 443 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
440 444 mmapindexthreshold = opts[b'mmapindexthreshold']
441 445 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
442 446 withsparseread = bool(opts.get(b'with-sparse-read', False))
443 447 # sparse-revlog forces sparse-read
444 448 self._withsparseread = self._sparserevlog or withsparseread
445 449 if b'sparse-read-density-threshold' in opts:
446 450 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
447 451 if b'sparse-read-min-gap-size' in opts:
448 452 self._srmingapsize = opts[b'sparse-read-min-gap-size']
449 453 if opts.get(b'enableellipsis'):
450 454 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
451 455
452 456 # revlog v0 doesn't have flag processors
453 457 for flag, processor in opts.get(b'flagprocessors', {}).items():
454 458 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
455 459
456 460 if self._chunkcachesize <= 0:
457 461 raise error.RevlogError(
458 462 _(b'revlog chunk cache size %r is not greater than 0')
459 463 % self._chunkcachesize
460 464 )
461 465 elif self._chunkcachesize & (self._chunkcachesize - 1):
462 466 raise error.RevlogError(
463 467 _(b'revlog chunk cache size %r is not a power of 2')
464 468 % self._chunkcachesize
465 469 )
466 470 force_nodemap = opts.get(b'devel-force-nodemap', False)
467 471 return new_header, mmapindexthreshold, force_nodemap
468 472
469 473 def _get_data(self, filepath, mmap_threshold, size=None):
470 474 """return a file content with or without mmap
471 475
472 476 If the file is missing return the empty string"""
473 477 try:
474 478 with self.opener(filepath) as fp:
475 479 if mmap_threshold is not None:
476 480 file_size = self.opener.fstat(fp).st_size
477 481 if file_size >= mmap_threshold:
478 482 if size is not None:
479 483 # avoid potentiel mmap crash
480 484 size = min(file_size, size)
481 485 # TODO: should .close() to release resources without
482 486 # relying on Python GC
483 487 if size is None:
484 488 return util.buffer(util.mmapread(fp))
485 489 else:
486 490 return util.buffer(util.mmapread(fp, size))
487 491 if size is None:
488 492 return fp.read()
489 493 else:
490 494 return fp.read(size)
491 495 except FileNotFoundError:
492 496 return b''
493 497
494 498 def _loadindex(self, docket=None):
495 499
496 500 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
497 501
498 502 if self.postfix is not None:
499 503 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
500 504 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
501 505 entry_point = b'%s.i.a' % self.radix
502 506 else:
503 507 entry_point = b'%s.i' % self.radix
504 508
505 509 if docket is not None:
506 510 self._docket = docket
507 511 self._docket_file = entry_point
508 512 else:
509 513 self._initempty = True
510 514 entry_data = self._get_data(entry_point, mmapindexthreshold)
511 515 if len(entry_data) > 0:
512 516 header = INDEX_HEADER.unpack(entry_data[:4])[0]
513 517 self._initempty = False
514 518 else:
515 519 header = new_header
516 520
517 521 self._format_flags = header & ~0xFFFF
518 522 self._format_version = header & 0xFFFF
519 523
520 524 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
521 525 if supported_flags is None:
522 526 msg = _(b'unknown version (%d) in revlog %s')
523 527 msg %= (self._format_version, self.display_id)
524 528 raise error.RevlogError(msg)
525 529 elif self._format_flags & ~supported_flags:
526 530 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
527 531 display_flag = self._format_flags >> 16
528 532 msg %= (display_flag, self._format_version, self.display_id)
529 533 raise error.RevlogError(msg)
530 534
531 535 features = FEATURES_BY_VERSION[self._format_version]
532 536 self._inline = features[b'inline'](self._format_flags)
533 537 self._generaldelta = features[b'generaldelta'](self._format_flags)
534 538 self.hassidedata = features[b'sidedata']
535 539
536 540 if not features[b'docket']:
537 541 self._indexfile = entry_point
538 542 index_data = entry_data
539 543 else:
540 544 self._docket_file = entry_point
541 545 if self._initempty:
542 546 self._docket = docketutil.default_docket(self, header)
543 547 else:
544 548 self._docket = docketutil.parse_docket(
545 549 self, entry_data, use_pending=self._trypending
546 550 )
547 551
548 552 if self._docket is not None:
549 553 self._indexfile = self._docket.index_filepath()
550 554 index_data = b''
551 555 index_size = self._docket.index_end
552 556 if index_size > 0:
553 557 index_data = self._get_data(
554 558 self._indexfile, mmapindexthreshold, size=index_size
555 559 )
556 560 if len(index_data) < index_size:
557 561 msg = _(b'too few index data for %s: got %d, expected %d')
558 562 msg %= (self.display_id, len(index_data), index_size)
559 563 raise error.RevlogError(msg)
560 564
561 565 self._inline = False
562 566 # generaldelta implied by version 2 revlogs.
563 567 self._generaldelta = True
564 568 # the logic for persistent nodemap will be dealt with within the
565 569 # main docket, so disable it for now.
566 570 self._nodemap_file = None
567 571
568 572 if self._docket is not None:
569 573 self._datafile = self._docket.data_filepath()
570 574 self._sidedatafile = self._docket.sidedata_filepath()
571 575 elif self.postfix is None:
572 576 self._datafile = b'%s.d' % self.radix
573 577 else:
574 578 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
575 579
576 580 self.nodeconstants = sha1nodeconstants
577 581 self.nullid = self.nodeconstants.nullid
578 582
579 583 # sparse-revlog can't be on without general-delta (issue6056)
580 584 if not self._generaldelta:
581 585 self._sparserevlog = False
582 586
583 587 self._storedeltachains = True
584 588
585 589 devel_nodemap = (
586 590 self._nodemap_file
587 591 and force_nodemap
588 592 and parse_index_v1_nodemap is not None
589 593 )
590 594
591 595 use_rust_index = False
592 596 if rustrevlog is not None:
593 597 if self._nodemap_file is not None:
594 598 use_rust_index = True
595 599 else:
596 600 use_rust_index = self.opener.options.get(b'rust.index')
597 601
598 602 self._parse_index = parse_index_v1
599 603 if self._format_version == REVLOGV0:
600 604 self._parse_index = revlogv0.parse_index_v0
601 605 elif self._format_version == REVLOGV2:
602 606 self._parse_index = parse_index_v2
603 607 elif self._format_version == CHANGELOGV2:
604 608 self._parse_index = parse_index_cl_v2
605 609 elif devel_nodemap:
606 610 self._parse_index = parse_index_v1_nodemap
607 611 elif use_rust_index:
608 612 self._parse_index = parse_index_v1_mixed
609 613 try:
610 614 d = self._parse_index(index_data, self._inline)
611 615 index, chunkcache = d
612 616 use_nodemap = (
613 617 not self._inline
614 618 and self._nodemap_file is not None
615 619 and util.safehasattr(index, 'update_nodemap_data')
616 620 )
617 621 if use_nodemap:
618 622 nodemap_data = nodemaputil.persisted_data(self)
619 623 if nodemap_data is not None:
620 624 docket = nodemap_data[0]
621 625 if (
622 626 len(d[0]) > docket.tip_rev
623 627 and d[0][docket.tip_rev][7] == docket.tip_node
624 628 ):
625 629 # no changelog tampering
626 630 self._nodemap_docket = docket
627 631 index.update_nodemap_data(*nodemap_data)
628 632 except (ValueError, IndexError):
629 633 raise error.RevlogError(
630 634 _(b"index %s is corrupted") % self.display_id
631 635 )
632 636 self.index = index
633 637 self._segmentfile = randomaccessfile.randomaccessfile(
634 638 self.opener,
635 639 (self._indexfile if self._inline else self._datafile),
636 640 self._chunkcachesize,
637 641 chunkcache,
638 642 )
639 643 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
640 644 self.opener,
641 645 self._sidedatafile,
642 646 self._chunkcachesize,
643 647 )
644 648 # revnum -> (chain-length, sum-delta-length)
645 649 self._chaininfocache = util.lrucachedict(500)
646 650 # revlog header -> revlog compressor
647 651 self._decompressors = {}
648 652
649 653 @util.propertycache
650 654 def revlog_kind(self):
651 655 return self.target[0]
652 656
653 657 @util.propertycache
654 658 def display_id(self):
655 659 """The public facing "ID" of the revlog that we use in message"""
656 660 if self.revlog_kind == KIND_FILELOG:
657 661 # Reference the file without the "data/" prefix, so it is familiar
658 662 # to the user.
659 663 return self.target[1]
660 664 else:
661 665 return self.radix
662 666
663 667 def _get_decompressor(self, t):
664 668 try:
665 669 compressor = self._decompressors[t]
666 670 except KeyError:
667 671 try:
668 672 engine = util.compengines.forrevlogheader(t)
669 673 compressor = engine.revlogcompressor(self._compengineopts)
670 674 self._decompressors[t] = compressor
671 675 except KeyError:
672 676 raise error.RevlogError(
673 677 _(b'unknown compression type %s') % binascii.hexlify(t)
674 678 )
675 679 return compressor
676 680
677 681 @util.propertycache
678 682 def _compressor(self):
679 683 engine = util.compengines[self._compengine]
680 684 return engine.revlogcompressor(self._compengineopts)
681 685
682 686 @util.propertycache
683 687 def _decompressor(self):
684 688 """the default decompressor"""
685 689 if self._docket is None:
686 690 return None
687 691 t = self._docket.default_compression_header
688 692 c = self._get_decompressor(t)
689 693 return c.decompress
690 694
691 695 def _indexfp(self):
692 696 """file object for the revlog's index file"""
693 697 return self.opener(self._indexfile, mode=b"r")
694 698
695 699 def __index_write_fp(self):
696 700 # You should not use this directly and use `_writing` instead
697 701 try:
698 702 f = self.opener(
699 703 self._indexfile, mode=b"r+", checkambig=self._checkambig
700 704 )
701 705 if self._docket is None:
702 706 f.seek(0, os.SEEK_END)
703 707 else:
704 708 f.seek(self._docket.index_end, os.SEEK_SET)
705 709 return f
706 710 except FileNotFoundError:
707 711 return self.opener(
708 712 self._indexfile, mode=b"w+", checkambig=self._checkambig
709 713 )
710 714
711 715 def __index_new_fp(self):
712 716 # You should not use this unless you are upgrading from inline revlog
713 717 return self.opener(
714 718 self._indexfile,
715 719 mode=b"w",
716 720 checkambig=self._checkambig,
717 721 atomictemp=True,
718 722 )
719 723
720 724 def _datafp(self, mode=b'r'):
721 725 """file object for the revlog's data file"""
722 726 return self.opener(self._datafile, mode=mode)
723 727
724 728 @contextlib.contextmanager
725 729 def _sidedatareadfp(self):
726 730 """file object suitable to read sidedata"""
727 731 if self._writinghandles:
728 732 yield self._writinghandles[2]
729 733 else:
730 734 with self.opener(self._sidedatafile) as fp:
731 735 yield fp
732 736
733 737 def tiprev(self):
734 738 return len(self.index) - 1
735 739
736 740 def tip(self):
737 741 return self.node(self.tiprev())
738 742
739 743 def __contains__(self, rev):
740 744 return 0 <= rev < len(self)
741 745
742 746 def __len__(self):
743 747 return len(self.index)
744 748
745 749 def __iter__(self):
746 750 return iter(range(len(self)))
747 751
748 752 def revs(self, start=0, stop=None):
749 753 """iterate over all rev in this revlog (from start to stop)"""
750 754 return storageutil.iterrevs(len(self), start=start, stop=stop)
751 755
752 756 def hasnode(self, node):
753 757 try:
754 758 self.rev(node)
755 759 return True
756 760 except KeyError:
757 761 return False
758 762
759 763 def candelta(self, baserev, rev):
760 764 """whether two revisions (baserev, rev) can be delta-ed or not"""
761 765 # Disable delta if either rev requires a content-changing flag
762 766 # processor (ex. LFS). This is because such flag processor can alter
763 767 # the rawtext content that the delta will be based on, and two clients
764 768 # could have a same revlog node with different flags (i.e. different
765 769 # rawtext contents) and the delta could be incompatible.
766 770 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
767 771 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
768 772 ):
769 773 return False
770 774 return True
771 775
772 776 def update_caches(self, transaction):
773 777 if self._nodemap_file is not None:
774 778 if transaction is None:
775 779 nodemaputil.update_persistent_nodemap(self)
776 780 else:
777 781 nodemaputil.setup_persistent_nodemap(transaction, self)
778 782
779 783 def clearcaches(self):
780 784 self._revisioncache = None
781 785 self._chainbasecache.clear()
782 786 self._segmentfile.clear_cache()
783 787 self._segmentfile_sidedata.clear_cache()
784 788 self._pcache = {}
785 789 self._nodemap_docket = None
786 790 self.index.clearcaches()
787 791 # The python code is the one responsible for validating the docket, we
788 792 # end up having to refresh it here.
789 793 use_nodemap = (
790 794 not self._inline
791 795 and self._nodemap_file is not None
792 796 and util.safehasattr(self.index, 'update_nodemap_data')
793 797 )
794 798 if use_nodemap:
795 799 nodemap_data = nodemaputil.persisted_data(self)
796 800 if nodemap_data is not None:
797 801 self._nodemap_docket = nodemap_data[0]
798 802 self.index.update_nodemap_data(*nodemap_data)
799 803
800 804 def rev(self, node):
801 805 try:
802 806 return self.index.rev(node)
803 807 except TypeError:
804 808 raise
805 809 except error.RevlogError:
806 810 # parsers.c radix tree lookup failed
807 811 if (
808 812 node == self.nodeconstants.wdirid
809 813 or node in self.nodeconstants.wdirfilenodeids
810 814 ):
811 815 raise error.WdirUnsupported
812 816 raise error.LookupError(node, self.display_id, _(b'no node'))
813 817
814 818 # Accessors for index entries.
815 819
816 820 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
817 821 # are flags.
818 822 def start(self, rev):
819 823 return int(self.index[rev][0] >> 16)
820 824
821 825 def sidedata_cut_off(self, rev):
822 826 sd_cut_off = self.index[rev][8]
823 827 if sd_cut_off != 0:
824 828 return sd_cut_off
825 829 # This is some annoying dance, because entries without sidedata
826 830 # currently use 0 as their ofsset. (instead of previous-offset +
827 831 # previous-size)
828 832 #
829 833 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
830 834 # In the meantime, we need this.
831 835 while 0 <= rev:
832 836 e = self.index[rev]
833 837 if e[9] != 0:
834 838 return e[8] + e[9]
835 839 rev -= 1
836 840 return 0
837 841
838 842 def flags(self, rev):
839 843 return self.index[rev][0] & 0xFFFF
840 844
841 845 def length(self, rev):
842 846 return self.index[rev][1]
843 847
844 848 def sidedata_length(self, rev):
845 849 if not self.hassidedata:
846 850 return 0
847 851 return self.index[rev][9]
848 852
849 853 def rawsize(self, rev):
850 854 """return the length of the uncompressed text for a given revision"""
851 855 l = self.index[rev][2]
852 856 if l >= 0:
853 857 return l
854 858
855 859 t = self.rawdata(rev)
856 860 return len(t)
857 861
858 862 def size(self, rev):
859 863 """length of non-raw text (processed by a "read" flag processor)"""
860 864 # fast path: if no "read" flag processor could change the content,
861 865 # size is rawsize. note: ELLIPSIS is known to not change the content.
862 866 flags = self.flags(rev)
863 867 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
864 868 return self.rawsize(rev)
865 869
866 870 return len(self.revision(rev))
867 871
868 872 def fast_rank(self, rev):
869 873 """Return the rank of a revision if already known, or None otherwise.
870 874
871 875 The rank of a revision is the size of the sub-graph it defines as a
872 876 head. Equivalently, the rank of a revision `r` is the size of the set
873 877 `ancestors(r)`, `r` included.
874 878
875 879 This method returns the rank retrieved from the revlog in constant
876 880 time. It makes no attempt at computing unknown values for versions of
877 881 the revlog which do not persist the rank.
878 882 """
879 883 rank = self.index[rev][ENTRY_RANK]
880 884 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
881 885 return None
882 886 if rev == nullrev:
883 887 return 0 # convention
884 888 return rank
885 889
886 890 def chainbase(self, rev):
887 891 base = self._chainbasecache.get(rev)
888 892 if base is not None:
889 893 return base
890 894
891 895 index = self.index
892 896 iterrev = rev
893 897 base = index[iterrev][3]
894 898 while base != iterrev:
895 899 iterrev = base
896 900 base = index[iterrev][3]
897 901
898 902 self._chainbasecache[rev] = base
899 903 return base
900 904
901 905 def linkrev(self, rev):
902 906 return self.index[rev][4]
903 907
904 908 def parentrevs(self, rev):
905 909 try:
906 910 entry = self.index[rev]
907 911 except IndexError:
908 912 if rev == wdirrev:
909 913 raise error.WdirUnsupported
910 914 raise
911 915
912 916 if self.canonical_parent_order and entry[5] == nullrev:
913 917 return entry[6], entry[5]
914 918 else:
915 919 return entry[5], entry[6]
916 920
917 921 # fast parentrevs(rev) where rev isn't filtered
918 922 _uncheckedparentrevs = parentrevs
919 923
920 924 def node(self, rev):
921 925 try:
922 926 return self.index[rev][7]
923 927 except IndexError:
924 928 if rev == wdirrev:
925 929 raise error.WdirUnsupported
926 930 raise
927 931
928 932 # Derived from index values.
929 933
930 934 def end(self, rev):
931 935 return self.start(rev) + self.length(rev)
932 936
933 937 def parents(self, node):
934 938 i = self.index
935 939 d = i[self.rev(node)]
936 940 # inline node() to avoid function call overhead
937 941 if self.canonical_parent_order and d[5] == self.nullid:
938 942 return i[d[6]][7], i[d[5]][7]
939 943 else:
940 944 return i[d[5]][7], i[d[6]][7]
941 945
942 946 def chainlen(self, rev):
943 947 return self._chaininfo(rev)[0]
944 948
945 949 def _chaininfo(self, rev):
946 950 chaininfocache = self._chaininfocache
947 951 if rev in chaininfocache:
948 952 return chaininfocache[rev]
949 953 index = self.index
950 954 generaldelta = self._generaldelta
951 955 iterrev = rev
952 956 e = index[iterrev]
953 957 clen = 0
954 958 compresseddeltalen = 0
955 959 while iterrev != e[3]:
956 960 clen += 1
957 961 compresseddeltalen += e[1]
958 962 if generaldelta:
959 963 iterrev = e[3]
960 964 else:
961 965 iterrev -= 1
962 966 if iterrev in chaininfocache:
963 967 t = chaininfocache[iterrev]
964 968 clen += t[0]
965 969 compresseddeltalen += t[1]
966 970 break
967 971 e = index[iterrev]
968 972 else:
969 973 # Add text length of base since decompressing that also takes
970 974 # work. For cache hits the length is already included.
971 975 compresseddeltalen += e[1]
972 976 r = (clen, compresseddeltalen)
973 977 chaininfocache[rev] = r
974 978 return r
975 979
976 980 def _deltachain(self, rev, stoprev=None):
977 981 """Obtain the delta chain for a revision.
978 982
979 983 ``stoprev`` specifies a revision to stop at. If not specified, we
980 984 stop at the base of the chain.
981 985
982 986 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
983 987 revs in ascending order and ``stopped`` is a bool indicating whether
984 988 ``stoprev`` was hit.
985 989 """
986 990 # Try C implementation.
987 991 try:
988 992 return self.index.deltachain(rev, stoprev, self._generaldelta)
989 993 except AttributeError:
990 994 pass
991 995
992 996 chain = []
993 997
994 998 # Alias to prevent attribute lookup in tight loop.
995 999 index = self.index
996 1000 generaldelta = self._generaldelta
997 1001
998 1002 iterrev = rev
999 1003 e = index[iterrev]
1000 1004 while iterrev != e[3] and iterrev != stoprev:
1001 1005 chain.append(iterrev)
1002 1006 if generaldelta:
1003 1007 iterrev = e[3]
1004 1008 else:
1005 1009 iterrev -= 1
1006 1010 e = index[iterrev]
1007 1011
1008 1012 if iterrev == stoprev:
1009 1013 stopped = True
1010 1014 else:
1011 1015 chain.append(iterrev)
1012 1016 stopped = False
1013 1017
1014 1018 chain.reverse()
1015 1019 return chain, stopped
1016 1020
1017 1021 def ancestors(self, revs, stoprev=0, inclusive=False):
1018 1022 """Generate the ancestors of 'revs' in reverse revision order.
1019 1023 Does not generate revs lower than stoprev.
1020 1024
1021 1025 See the documentation for ancestor.lazyancestors for more details."""
1022 1026
1023 1027 # first, make sure start revisions aren't filtered
1024 1028 revs = list(revs)
1025 1029 checkrev = self.node
1026 1030 for r in revs:
1027 1031 checkrev(r)
1028 1032 # and we're sure ancestors aren't filtered as well
1029 1033
1030 1034 if rustancestor is not None and self.index.rust_ext_compat:
1031 1035 lazyancestors = rustancestor.LazyAncestors
1032 1036 arg = self.index
1033 1037 else:
1034 1038 lazyancestors = ancestor.lazyancestors
1035 1039 arg = self._uncheckedparentrevs
1036 1040 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1037 1041
1038 1042 def descendants(self, revs):
1039 1043 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1040 1044
1041 1045 def findcommonmissing(self, common=None, heads=None):
1042 1046 """Return a tuple of the ancestors of common and the ancestors of heads
1043 1047 that are not ancestors of common. In revset terminology, we return the
1044 1048 tuple:
1045 1049
1046 1050 ::common, (::heads) - (::common)
1047 1051
1048 1052 The list is sorted by revision number, meaning it is
1049 1053 topologically sorted.
1050 1054
1051 1055 'heads' and 'common' are both lists of node IDs. If heads is
1052 1056 not supplied, uses all of the revlog's heads. If common is not
1053 1057 supplied, uses nullid."""
1054 1058 if common is None:
1055 1059 common = [self.nullid]
1056 1060 if heads is None:
1057 1061 heads = self.heads()
1058 1062
1059 1063 common = [self.rev(n) for n in common]
1060 1064 heads = [self.rev(n) for n in heads]
1061 1065
1062 1066 # we want the ancestors, but inclusive
1063 1067 class lazyset:
1064 1068 def __init__(self, lazyvalues):
1065 1069 self.addedvalues = set()
1066 1070 self.lazyvalues = lazyvalues
1067 1071
1068 1072 def __contains__(self, value):
1069 1073 return value in self.addedvalues or value in self.lazyvalues
1070 1074
1071 1075 def __iter__(self):
1072 1076 added = self.addedvalues
1073 1077 for r in added:
1074 1078 yield r
1075 1079 for r in self.lazyvalues:
1076 1080 if not r in added:
1077 1081 yield r
1078 1082
1079 1083 def add(self, value):
1080 1084 self.addedvalues.add(value)
1081 1085
1082 1086 def update(self, values):
1083 1087 self.addedvalues.update(values)
1084 1088
1085 1089 has = lazyset(self.ancestors(common))
1086 1090 has.add(nullrev)
1087 1091 has.update(common)
1088 1092
1089 1093 # take all ancestors from heads that aren't in has
1090 1094 missing = set()
1091 1095 visit = collections.deque(r for r in heads if r not in has)
1092 1096 while visit:
1093 1097 r = visit.popleft()
1094 1098 if r in missing:
1095 1099 continue
1096 1100 else:
1097 1101 missing.add(r)
1098 1102 for p in self.parentrevs(r):
1099 1103 if p not in has:
1100 1104 visit.append(p)
1101 1105 missing = list(missing)
1102 1106 missing.sort()
1103 1107 return has, [self.node(miss) for miss in missing]
1104 1108
1105 1109 def incrementalmissingrevs(self, common=None):
1106 1110 """Return an object that can be used to incrementally compute the
1107 1111 revision numbers of the ancestors of arbitrary sets that are not
1108 1112 ancestors of common. This is an ancestor.incrementalmissingancestors
1109 1113 object.
1110 1114
1111 1115 'common' is a list of revision numbers. If common is not supplied, uses
1112 1116 nullrev.
1113 1117 """
1114 1118 if common is None:
1115 1119 common = [nullrev]
1116 1120
1117 1121 if rustancestor is not None and self.index.rust_ext_compat:
1118 1122 return rustancestor.MissingAncestors(self.index, common)
1119 1123 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1120 1124
1121 1125 def findmissingrevs(self, common=None, heads=None):
1122 1126 """Return the revision numbers of the ancestors of heads that
1123 1127 are not ancestors of common.
1124 1128
1125 1129 More specifically, return a list of revision numbers corresponding to
1126 1130 nodes N such that every N satisfies the following constraints:
1127 1131
1128 1132 1. N is an ancestor of some node in 'heads'
1129 1133 2. N is not an ancestor of any node in 'common'
1130 1134
1131 1135 The list is sorted by revision number, meaning it is
1132 1136 topologically sorted.
1133 1137
1134 1138 'heads' and 'common' are both lists of revision numbers. If heads is
1135 1139 not supplied, uses all of the revlog's heads. If common is not
1136 1140 supplied, uses nullid."""
1137 1141 if common is None:
1138 1142 common = [nullrev]
1139 1143 if heads is None:
1140 1144 heads = self.headrevs()
1141 1145
1142 1146 inc = self.incrementalmissingrevs(common=common)
1143 1147 return inc.missingancestors(heads)
1144 1148
1145 1149 def findmissing(self, common=None, heads=None):
1146 1150 """Return the ancestors of heads that are not ancestors of common.
1147 1151
1148 1152 More specifically, return a list of nodes N such that every N
1149 1153 satisfies the following constraints:
1150 1154
1151 1155 1. N is an ancestor of some node in 'heads'
1152 1156 2. N is not an ancestor of any node in 'common'
1153 1157
1154 1158 The list is sorted by revision number, meaning it is
1155 1159 topologically sorted.
1156 1160
1157 1161 'heads' and 'common' are both lists of node IDs. If heads is
1158 1162 not supplied, uses all of the revlog's heads. If common is not
1159 1163 supplied, uses nullid."""
1160 1164 if common is None:
1161 1165 common = [self.nullid]
1162 1166 if heads is None:
1163 1167 heads = self.heads()
1164 1168
1165 1169 common = [self.rev(n) for n in common]
1166 1170 heads = [self.rev(n) for n in heads]
1167 1171
1168 1172 inc = self.incrementalmissingrevs(common=common)
1169 1173 return [self.node(r) for r in inc.missingancestors(heads)]
1170 1174
1171 1175 def nodesbetween(self, roots=None, heads=None):
1172 1176 """Return a topological path from 'roots' to 'heads'.
1173 1177
1174 1178 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1175 1179 topologically sorted list of all nodes N that satisfy both of
1176 1180 these constraints:
1177 1181
1178 1182 1. N is a descendant of some node in 'roots'
1179 1183 2. N is an ancestor of some node in 'heads'
1180 1184
1181 1185 Every node is considered to be both a descendant and an ancestor
1182 1186 of itself, so every reachable node in 'roots' and 'heads' will be
1183 1187 included in 'nodes'.
1184 1188
1185 1189 'outroots' is the list of reachable nodes in 'roots', i.e., the
1186 1190 subset of 'roots' that is returned in 'nodes'. Likewise,
1187 1191 'outheads' is the subset of 'heads' that is also in 'nodes'.
1188 1192
1189 1193 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1190 1194 unspecified, uses nullid as the only root. If 'heads' is
1191 1195 unspecified, uses list of all of the revlog's heads."""
1192 1196 nonodes = ([], [], [])
1193 1197 if roots is not None:
1194 1198 roots = list(roots)
1195 1199 if not roots:
1196 1200 return nonodes
1197 1201 lowestrev = min([self.rev(n) for n in roots])
1198 1202 else:
1199 1203 roots = [self.nullid] # Everybody's a descendant of nullid
1200 1204 lowestrev = nullrev
1201 1205 if (lowestrev == nullrev) and (heads is None):
1202 1206 # We want _all_ the nodes!
1203 1207 return (
1204 1208 [self.node(r) for r in self],
1205 1209 [self.nullid],
1206 1210 list(self.heads()),
1207 1211 )
1208 1212 if heads is None:
1209 1213 # All nodes are ancestors, so the latest ancestor is the last
1210 1214 # node.
1211 1215 highestrev = len(self) - 1
1212 1216 # Set ancestors to None to signal that every node is an ancestor.
1213 1217 ancestors = None
1214 1218 # Set heads to an empty dictionary for later discovery of heads
1215 1219 heads = {}
1216 1220 else:
1217 1221 heads = list(heads)
1218 1222 if not heads:
1219 1223 return nonodes
1220 1224 ancestors = set()
1221 1225 # Turn heads into a dictionary so we can remove 'fake' heads.
1222 1226 # Also, later we will be using it to filter out the heads we can't
1223 1227 # find from roots.
1224 1228 heads = dict.fromkeys(heads, False)
1225 1229 # Start at the top and keep marking parents until we're done.
1226 1230 nodestotag = set(heads)
1227 1231 # Remember where the top was so we can use it as a limit later.
1228 1232 highestrev = max([self.rev(n) for n in nodestotag])
1229 1233 while nodestotag:
1230 1234 # grab a node to tag
1231 1235 n = nodestotag.pop()
1232 1236 # Never tag nullid
1233 1237 if n == self.nullid:
1234 1238 continue
1235 1239 # A node's revision number represents its place in a
1236 1240 # topologically sorted list of nodes.
1237 1241 r = self.rev(n)
1238 1242 if r >= lowestrev:
1239 1243 if n not in ancestors:
1240 1244 # If we are possibly a descendant of one of the roots
1241 1245 # and we haven't already been marked as an ancestor
1242 1246 ancestors.add(n) # Mark as ancestor
1243 1247 # Add non-nullid parents to list of nodes to tag.
1244 1248 nodestotag.update(
1245 1249 [p for p in self.parents(n) if p != self.nullid]
1246 1250 )
1247 1251 elif n in heads: # We've seen it before, is it a fake head?
1248 1252 # So it is, real heads should not be the ancestors of
1249 1253 # any other heads.
1250 1254 heads.pop(n)
1251 1255 if not ancestors:
1252 1256 return nonodes
1253 1257 # Now that we have our set of ancestors, we want to remove any
1254 1258 # roots that are not ancestors.
1255 1259
1256 1260 # If one of the roots was nullid, everything is included anyway.
1257 1261 if lowestrev > nullrev:
1258 1262 # But, since we weren't, let's recompute the lowest rev to not
1259 1263 # include roots that aren't ancestors.
1260 1264
1261 1265 # Filter out roots that aren't ancestors of heads
1262 1266 roots = [root for root in roots if root in ancestors]
1263 1267 # Recompute the lowest revision
1264 1268 if roots:
1265 1269 lowestrev = min([self.rev(root) for root in roots])
1266 1270 else:
1267 1271 # No more roots? Return empty list
1268 1272 return nonodes
1269 1273 else:
1270 1274 # We are descending from nullid, and don't need to care about
1271 1275 # any other roots.
1272 1276 lowestrev = nullrev
1273 1277 roots = [self.nullid]
1274 1278 # Transform our roots list into a set.
1275 1279 descendants = set(roots)
1276 1280 # Also, keep the original roots so we can filter out roots that aren't
1277 1281 # 'real' roots (i.e. are descended from other roots).
1278 1282 roots = descendants.copy()
1279 1283 # Our topologically sorted list of output nodes.
1280 1284 orderedout = []
1281 1285 # Don't start at nullid since we don't want nullid in our output list,
1282 1286 # and if nullid shows up in descendants, empty parents will look like
1283 1287 # they're descendants.
1284 1288 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1285 1289 n = self.node(r)
1286 1290 isdescendant = False
1287 1291 if lowestrev == nullrev: # Everybody is a descendant of nullid
1288 1292 isdescendant = True
1289 1293 elif n in descendants:
1290 1294 # n is already a descendant
1291 1295 isdescendant = True
1292 1296 # This check only needs to be done here because all the roots
1293 1297 # will start being marked is descendants before the loop.
1294 1298 if n in roots:
1295 1299 # If n was a root, check if it's a 'real' root.
1296 1300 p = tuple(self.parents(n))
1297 1301 # If any of its parents are descendants, it's not a root.
1298 1302 if (p[0] in descendants) or (p[1] in descendants):
1299 1303 roots.remove(n)
1300 1304 else:
1301 1305 p = tuple(self.parents(n))
1302 1306 # A node is a descendant if either of its parents are
1303 1307 # descendants. (We seeded the dependents list with the roots
1304 1308 # up there, remember?)
1305 1309 if (p[0] in descendants) or (p[1] in descendants):
1306 1310 descendants.add(n)
1307 1311 isdescendant = True
1308 1312 if isdescendant and ((ancestors is None) or (n in ancestors)):
1309 1313 # Only include nodes that are both descendants and ancestors.
1310 1314 orderedout.append(n)
1311 1315 if (ancestors is not None) and (n in heads):
1312 1316 # We're trying to figure out which heads are reachable
1313 1317 # from roots.
1314 1318 # Mark this head as having been reached
1315 1319 heads[n] = True
1316 1320 elif ancestors is None:
1317 1321 # Otherwise, we're trying to discover the heads.
1318 1322 # Assume this is a head because if it isn't, the next step
1319 1323 # will eventually remove it.
1320 1324 heads[n] = True
1321 1325 # But, obviously its parents aren't.
1322 1326 for p in self.parents(n):
1323 1327 heads.pop(p, None)
1324 1328 heads = [head for head, flag in heads.items() if flag]
1325 1329 roots = list(roots)
1326 1330 assert orderedout
1327 1331 assert roots
1328 1332 assert heads
1329 1333 return (orderedout, roots, heads)
1330 1334
1331 1335 def headrevs(self, revs=None):
1332 1336 if revs is None:
1333 1337 try:
1334 1338 return self.index.headrevs()
1335 1339 except AttributeError:
1336 1340 return self._headrevs()
1337 1341 if rustdagop is not None and self.index.rust_ext_compat:
1338 1342 return rustdagop.headrevs(self.index, revs)
1339 1343 return dagop.headrevs(revs, self._uncheckedparentrevs)
1340 1344
1341 1345 def computephases(self, roots):
1342 1346 return self.index.computephasesmapsets(roots)
1343 1347
1344 1348 def _headrevs(self):
1345 1349 count = len(self)
1346 1350 if not count:
1347 1351 return [nullrev]
1348 1352 # we won't iter over filtered rev so nobody is a head at start
1349 1353 ishead = [0] * (count + 1)
1350 1354 index = self.index
1351 1355 for r in self:
1352 1356 ishead[r] = 1 # I may be an head
1353 1357 e = index[r]
1354 1358 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1355 1359 return [r for r, val in enumerate(ishead) if val]
1356 1360
1357 1361 def heads(self, start=None, stop=None):
1358 1362 """return the list of all nodes that have no children
1359 1363
1360 1364 if start is specified, only heads that are descendants of
1361 1365 start will be returned
1362 1366 if stop is specified, it will consider all the revs from stop
1363 1367 as if they had no children
1364 1368 """
1365 1369 if start is None and stop is None:
1366 1370 if not len(self):
1367 1371 return [self.nullid]
1368 1372 return [self.node(r) for r in self.headrevs()]
1369 1373
1370 1374 if start is None:
1371 1375 start = nullrev
1372 1376 else:
1373 1377 start = self.rev(start)
1374 1378
1375 1379 stoprevs = {self.rev(n) for n in stop or []}
1376 1380
1377 1381 revs = dagop.headrevssubset(
1378 1382 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1379 1383 )
1380 1384
1381 1385 return [self.node(rev) for rev in revs]
1382 1386
1383 1387 def children(self, node):
1384 1388 """find the children of a given node"""
1385 1389 c = []
1386 1390 p = self.rev(node)
1387 1391 for r in self.revs(start=p + 1):
1388 1392 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1389 1393 if prevs:
1390 1394 for pr in prevs:
1391 1395 if pr == p:
1392 1396 c.append(self.node(r))
1393 1397 elif p == nullrev:
1394 1398 c.append(self.node(r))
1395 1399 return c
1396 1400
1397 1401 def commonancestorsheads(self, a, b):
1398 1402 """calculate all the heads of the common ancestors of nodes a and b"""
1399 1403 a, b = self.rev(a), self.rev(b)
1400 1404 ancs = self._commonancestorsheads(a, b)
1401 1405 return pycompat.maplist(self.node, ancs)
1402 1406
1403 1407 def _commonancestorsheads(self, *revs):
1404 1408 """calculate all the heads of the common ancestors of revs"""
1405 1409 try:
1406 1410 ancs = self.index.commonancestorsheads(*revs)
1407 1411 except (AttributeError, OverflowError): # C implementation failed
1408 1412 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1409 1413 return ancs
1410 1414
1411 1415 def isancestor(self, a, b):
1412 1416 """return True if node a is an ancestor of node b
1413 1417
1414 1418 A revision is considered an ancestor of itself."""
1415 1419 a, b = self.rev(a), self.rev(b)
1416 1420 return self.isancestorrev(a, b)
1417 1421
1418 1422 def isancestorrev(self, a, b):
1419 1423 """return True if revision a is an ancestor of revision b
1420 1424
1421 1425 A revision is considered an ancestor of itself.
1422 1426
1423 1427 The implementation of this is trivial but the use of
1424 1428 reachableroots is not."""
1425 1429 if a == nullrev:
1426 1430 return True
1427 1431 elif a == b:
1428 1432 return True
1429 1433 elif a > b:
1430 1434 return False
1431 1435 return bool(self.reachableroots(a, [b], [a], includepath=False))
1432 1436
1433 1437 def reachableroots(self, minroot, heads, roots, includepath=False):
1434 1438 """return (heads(::(<roots> and <roots>::<heads>)))
1435 1439
1436 1440 If includepath is True, return (<roots>::<heads>)."""
1437 1441 try:
1438 1442 return self.index.reachableroots2(
1439 1443 minroot, heads, roots, includepath
1440 1444 )
1441 1445 except AttributeError:
1442 1446 return dagop._reachablerootspure(
1443 1447 self.parentrevs, minroot, roots, heads, includepath
1444 1448 )
1445 1449
1446 1450 def ancestor(self, a, b):
1447 1451 """calculate the "best" common ancestor of nodes a and b"""
1448 1452
1449 1453 a, b = self.rev(a), self.rev(b)
1450 1454 try:
1451 1455 ancs = self.index.ancestors(a, b)
1452 1456 except (AttributeError, OverflowError):
1453 1457 ancs = ancestor.ancestors(self.parentrevs, a, b)
1454 1458 if ancs:
1455 1459 # choose a consistent winner when there's a tie
1456 1460 return min(map(self.node, ancs))
1457 1461 return self.nullid
1458 1462
1459 1463 def _match(self, id):
1460 1464 if isinstance(id, int):
1461 1465 # rev
1462 1466 return self.node(id)
1463 1467 if len(id) == self.nodeconstants.nodelen:
1464 1468 # possibly a binary node
1465 1469 # odds of a binary node being all hex in ASCII are 1 in 10**25
1466 1470 try:
1467 1471 node = id
1468 1472 self.rev(node) # quick search the index
1469 1473 return node
1470 1474 except error.LookupError:
1471 1475 pass # may be partial hex id
1472 1476 try:
1473 1477 # str(rev)
1474 1478 rev = int(id)
1475 1479 if b"%d" % rev != id:
1476 1480 raise ValueError
1477 1481 if rev < 0:
1478 1482 rev = len(self) + rev
1479 1483 if rev < 0 or rev >= len(self):
1480 1484 raise ValueError
1481 1485 return self.node(rev)
1482 1486 except (ValueError, OverflowError):
1483 1487 pass
1484 1488 if len(id) == 2 * self.nodeconstants.nodelen:
1485 1489 try:
1486 1490 # a full hex nodeid?
1487 1491 node = bin(id)
1488 1492 self.rev(node)
1489 1493 return node
1490 1494 except (binascii.Error, error.LookupError):
1491 1495 pass
1492 1496
1493 1497 def _partialmatch(self, id):
1494 1498 # we don't care wdirfilenodeids as they should be always full hash
1495 1499 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1496 1500 ambiguous = False
1497 1501 try:
1498 1502 partial = self.index.partialmatch(id)
1499 1503 if partial and self.hasnode(partial):
1500 1504 if maybewdir:
1501 1505 # single 'ff...' match in radix tree, ambiguous with wdir
1502 1506 ambiguous = True
1503 1507 else:
1504 1508 return partial
1505 1509 elif maybewdir:
1506 1510 # no 'ff...' match in radix tree, wdir identified
1507 1511 raise error.WdirUnsupported
1508 1512 else:
1509 1513 return None
1510 1514 except error.RevlogError:
1511 1515 # parsers.c radix tree lookup gave multiple matches
1512 1516 # fast path: for unfiltered changelog, radix tree is accurate
1513 1517 if not getattr(self, 'filteredrevs', None):
1514 1518 ambiguous = True
1515 1519 # fall through to slow path that filters hidden revisions
1516 1520 except (AttributeError, ValueError):
1517 1521 # we are pure python, or key is not hex
1518 1522 pass
1519 1523 if ambiguous:
1520 1524 raise error.AmbiguousPrefixLookupError(
1521 1525 id, self.display_id, _(b'ambiguous identifier')
1522 1526 )
1523 1527
1524 1528 if id in self._pcache:
1525 1529 return self._pcache[id]
1526 1530
1527 1531 if len(id) <= 40:
1528 1532 # hex(node)[:...]
1529 1533 l = len(id) // 2 * 2 # grab an even number of digits
1530 1534 try:
1531 1535 # we're dropping the last digit, so let's check that it's hex,
1532 1536 # to avoid the expensive computation below if it's not
1533 1537 if len(id) % 2 > 0:
1534 1538 if not (id[-1] in hexdigits):
1535 1539 return None
1536 1540 prefix = bin(id[:l])
1537 1541 except binascii.Error:
1538 1542 pass
1539 1543 else:
1540 1544 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1541 1545 nl = [
1542 1546 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1543 1547 ]
1544 1548 if self.nodeconstants.nullhex.startswith(id):
1545 1549 nl.append(self.nullid)
1546 1550 if len(nl) > 0:
1547 1551 if len(nl) == 1 and not maybewdir:
1548 1552 self._pcache[id] = nl[0]
1549 1553 return nl[0]
1550 1554 raise error.AmbiguousPrefixLookupError(
1551 1555 id, self.display_id, _(b'ambiguous identifier')
1552 1556 )
1553 1557 if maybewdir:
1554 1558 raise error.WdirUnsupported
1555 1559 return None
1556 1560
1557 1561 def lookup(self, id):
1558 1562 """locate a node based on:
1559 1563 - revision number or str(revision number)
1560 1564 - nodeid or subset of hex nodeid
1561 1565 """
1562 1566 n = self._match(id)
1563 1567 if n is not None:
1564 1568 return n
1565 1569 n = self._partialmatch(id)
1566 1570 if n:
1567 1571 return n
1568 1572
1569 1573 raise error.LookupError(id, self.display_id, _(b'no match found'))
1570 1574
1571 1575 def shortest(self, node, minlength=1):
1572 1576 """Find the shortest unambiguous prefix that matches node."""
1573 1577
1574 1578 def isvalid(prefix):
1575 1579 try:
1576 1580 matchednode = self._partialmatch(prefix)
1577 1581 except error.AmbiguousPrefixLookupError:
1578 1582 return False
1579 1583 except error.WdirUnsupported:
1580 1584 # single 'ff...' match
1581 1585 return True
1582 1586 if matchednode is None:
1583 1587 raise error.LookupError(node, self.display_id, _(b'no node'))
1584 1588 return True
1585 1589
1586 1590 def maybewdir(prefix):
1587 1591 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1588 1592
1589 1593 hexnode = hex(node)
1590 1594
1591 1595 def disambiguate(hexnode, minlength):
1592 1596 """Disambiguate against wdirid."""
1593 1597 for length in range(minlength, len(hexnode) + 1):
1594 1598 prefix = hexnode[:length]
1595 1599 if not maybewdir(prefix):
1596 1600 return prefix
1597 1601
1598 1602 if not getattr(self, 'filteredrevs', None):
1599 1603 try:
1600 1604 length = max(self.index.shortest(node), minlength)
1601 1605 return disambiguate(hexnode, length)
1602 1606 except error.RevlogError:
1603 1607 if node != self.nodeconstants.wdirid:
1604 1608 raise error.LookupError(
1605 1609 node, self.display_id, _(b'no node')
1606 1610 )
1607 1611 except AttributeError:
1608 1612 # Fall through to pure code
1609 1613 pass
1610 1614
1611 1615 if node == self.nodeconstants.wdirid:
1612 1616 for length in range(minlength, len(hexnode) + 1):
1613 1617 prefix = hexnode[:length]
1614 1618 if isvalid(prefix):
1615 1619 return prefix
1616 1620
1617 1621 for length in range(minlength, len(hexnode) + 1):
1618 1622 prefix = hexnode[:length]
1619 1623 if isvalid(prefix):
1620 1624 return disambiguate(hexnode, length)
1621 1625
1622 1626 def cmp(self, node, text):
1623 1627 """compare text with a given file revision
1624 1628
1625 1629 returns True if text is different than what is stored.
1626 1630 """
1627 1631 p1, p2 = self.parents(node)
1628 1632 return storageutil.hashrevisionsha1(text, p1, p2) != node
1629 1633
1630 1634 def _getsegmentforrevs(self, startrev, endrev, df=None):
1631 1635 """Obtain a segment of raw data corresponding to a range of revisions.
1632 1636
1633 1637 Accepts the start and end revisions and an optional already-open
1634 1638 file handle to be used for reading. If the file handle is read, its
1635 1639 seek position will not be preserved.
1636 1640
1637 1641 Requests for data may be satisfied by a cache.
1638 1642
1639 1643 Returns a 2-tuple of (offset, data) for the requested range of
1640 1644 revisions. Offset is the integer offset from the beginning of the
1641 1645 revlog and data is a str or buffer of the raw byte data.
1642 1646
1643 1647 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1644 1648 to determine where each revision's data begins and ends.
1645 1649 """
1646 1650 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1647 1651 # (functions are expensive).
1648 1652 index = self.index
1649 1653 istart = index[startrev]
1650 1654 start = int(istart[0] >> 16)
1651 1655 if startrev == endrev:
1652 1656 end = start + istart[1]
1653 1657 else:
1654 1658 iend = index[endrev]
1655 1659 end = int(iend[0] >> 16) + iend[1]
1656 1660
1657 1661 if self._inline:
1658 1662 start += (startrev + 1) * self.index.entry_size
1659 1663 end += (endrev + 1) * self.index.entry_size
1660 1664 length = end - start
1661 1665
1662 1666 return start, self._segmentfile.read_chunk(start, length, df)
1663 1667
1664 1668 def _chunk(self, rev, df=None):
1665 1669 """Obtain a single decompressed chunk for a revision.
1666 1670
1667 1671 Accepts an integer revision and an optional already-open file handle
1668 1672 to be used for reading. If used, the seek position of the file will not
1669 1673 be preserved.
1670 1674
1671 1675 Returns a str holding uncompressed data for the requested revision.
1672 1676 """
1673 1677 compression_mode = self.index[rev][10]
1674 1678 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1675 1679 if compression_mode == COMP_MODE_PLAIN:
1676 1680 return data
1677 1681 elif compression_mode == COMP_MODE_DEFAULT:
1678 1682 return self._decompressor(data)
1679 1683 elif compression_mode == COMP_MODE_INLINE:
1680 1684 return self.decompress(data)
1681 1685 else:
1682 1686 msg = b'unknown compression mode %d'
1683 1687 msg %= compression_mode
1684 1688 raise error.RevlogError(msg)
1685 1689
1686 1690 def _chunks(self, revs, df=None, targetsize=None):
1687 1691 """Obtain decompressed chunks for the specified revisions.
1688 1692
1689 1693 Accepts an iterable of numeric revisions that are assumed to be in
1690 1694 ascending order. Also accepts an optional already-open file handle
1691 1695 to be used for reading. If used, the seek position of the file will
1692 1696 not be preserved.
1693 1697
1694 1698 This function is similar to calling ``self._chunk()`` multiple times,
1695 1699 but is faster.
1696 1700
1697 1701 Returns a list with decompressed data for each requested revision.
1698 1702 """
1699 1703 if not revs:
1700 1704 return []
1701 1705 start = self.start
1702 1706 length = self.length
1703 1707 inline = self._inline
1704 1708 iosize = self.index.entry_size
1705 1709 buffer = util.buffer
1706 1710
1707 1711 l = []
1708 1712 ladd = l.append
1709 1713
1710 1714 if not self._withsparseread:
1711 1715 slicedchunks = (revs,)
1712 1716 else:
1713 1717 slicedchunks = deltautil.slicechunk(
1714 1718 self, revs, targetsize=targetsize
1715 1719 )
1716 1720
1717 1721 for revschunk in slicedchunks:
1718 1722 firstrev = revschunk[0]
1719 1723 # Skip trailing revisions with empty diff
1720 1724 for lastrev in revschunk[::-1]:
1721 1725 if length(lastrev) != 0:
1722 1726 break
1723 1727
1724 1728 try:
1725 1729 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1726 1730 except OverflowError:
1727 1731 # issue4215 - we can't cache a run of chunks greater than
1728 1732 # 2G on Windows
1729 1733 return [self._chunk(rev, df=df) for rev in revschunk]
1730 1734
1731 1735 decomp = self.decompress
1732 1736 # self._decompressor might be None, but will not be used in that case
1733 1737 def_decomp = self._decompressor
1734 1738 for rev in revschunk:
1735 1739 chunkstart = start(rev)
1736 1740 if inline:
1737 1741 chunkstart += (rev + 1) * iosize
1738 1742 chunklength = length(rev)
1739 1743 comp_mode = self.index[rev][10]
1740 1744 c = buffer(data, chunkstart - offset, chunklength)
1741 1745 if comp_mode == COMP_MODE_PLAIN:
1742 1746 ladd(c)
1743 1747 elif comp_mode == COMP_MODE_INLINE:
1744 1748 ladd(decomp(c))
1745 1749 elif comp_mode == COMP_MODE_DEFAULT:
1746 1750 ladd(def_decomp(c))
1747 1751 else:
1748 1752 msg = b'unknown compression mode %d'
1749 1753 msg %= comp_mode
1750 1754 raise error.RevlogError(msg)
1751 1755
1752 1756 return l
1753 1757
1754 1758 def deltaparent(self, rev):
1755 1759 """return deltaparent of the given revision"""
1756 1760 base = self.index[rev][3]
1757 1761 if base == rev:
1758 1762 return nullrev
1759 1763 elif self._generaldelta:
1760 1764 return base
1761 1765 else:
1762 1766 return rev - 1
1763 1767
1764 1768 def issnapshot(self, rev):
1765 1769 """tells whether rev is a snapshot"""
1766 1770 if not self._sparserevlog:
1767 1771 return self.deltaparent(rev) == nullrev
1768 1772 elif util.safehasattr(self.index, b'issnapshot'):
1769 1773 # directly assign the method to cache the testing and access
1770 1774 self.issnapshot = self.index.issnapshot
1771 1775 return self.issnapshot(rev)
1772 1776 if rev == nullrev:
1773 1777 return True
1774 1778 entry = self.index[rev]
1775 1779 base = entry[3]
1776 1780 if base == rev:
1777 1781 return True
1778 1782 if base == nullrev:
1779 1783 return True
1780 1784 p1 = entry[5]
1781 1785 while self.length(p1) == 0:
1782 1786 b = self.deltaparent(p1)
1783 1787 if b == p1:
1784 1788 break
1785 1789 p1 = b
1786 1790 p2 = entry[6]
1787 1791 while self.length(p2) == 0:
1788 1792 b = self.deltaparent(p2)
1789 1793 if b == p2:
1790 1794 break
1791 1795 p2 = b
1792 1796 if base == p1 or base == p2:
1793 1797 return False
1794 1798 return self.issnapshot(base)
1795 1799
1796 1800 def snapshotdepth(self, rev):
1797 1801 """number of snapshot in the chain before this one"""
1798 1802 if not self.issnapshot(rev):
1799 1803 raise error.ProgrammingError(b'revision %d not a snapshot')
1800 1804 return len(self._deltachain(rev)[0]) - 1
1801 1805
1802 1806 def revdiff(self, rev1, rev2):
1803 1807 """return or calculate a delta between two revisions
1804 1808
1805 1809 The delta calculated is in binary form and is intended to be written to
1806 1810 revlog data directly. So this function needs raw revision data.
1807 1811 """
1808 1812 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1809 1813 return bytes(self._chunk(rev2))
1810 1814
1811 1815 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1812 1816
1813 1817 def revision(self, nodeorrev, _df=None):
1814 1818 """return an uncompressed revision of a given node or revision
1815 1819 number.
1816 1820
1817 1821 _df - an existing file handle to read from. (internal-only)
1818 1822 """
1819 1823 return self._revisiondata(nodeorrev, _df)
1820 1824
1821 1825 def sidedata(self, nodeorrev, _df=None):
1822 1826 """a map of extra data related to the changeset but not part of the hash
1823 1827
1824 1828 This function currently return a dictionary. However, more advanced
1825 1829 mapping object will likely be used in the future for a more
1826 1830 efficient/lazy code.
1827 1831 """
1828 1832 # deal with <nodeorrev> argument type
1829 1833 if isinstance(nodeorrev, int):
1830 1834 rev = nodeorrev
1831 1835 else:
1832 1836 rev = self.rev(nodeorrev)
1833 1837 return self._sidedata(rev)
1834 1838
1835 1839 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1836 1840 # deal with <nodeorrev> argument type
1837 1841 if isinstance(nodeorrev, int):
1838 1842 rev = nodeorrev
1839 1843 node = self.node(rev)
1840 1844 else:
1841 1845 node = nodeorrev
1842 1846 rev = None
1843 1847
1844 1848 # fast path the special `nullid` rev
1845 1849 if node == self.nullid:
1846 1850 return b""
1847 1851
1848 1852 # ``rawtext`` is the text as stored inside the revlog. Might be the
1849 1853 # revision or might need to be processed to retrieve the revision.
1850 1854 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1851 1855
1852 1856 if raw and validated:
1853 1857 # if we don't want to process the raw text and that raw
1854 1858 # text is cached, we can exit early.
1855 1859 return rawtext
1856 1860 if rev is None:
1857 1861 rev = self.rev(node)
1858 1862 # the revlog's flag for this revision
1859 1863 # (usually alter its state or content)
1860 1864 flags = self.flags(rev)
1861 1865
1862 1866 if validated and flags == REVIDX_DEFAULT_FLAGS:
1863 1867 # no extra flags set, no flag processor runs, text = rawtext
1864 1868 return rawtext
1865 1869
1866 1870 if raw:
1867 1871 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1868 1872 text = rawtext
1869 1873 else:
1870 1874 r = flagutil.processflagsread(self, rawtext, flags)
1871 1875 text, validatehash = r
1872 1876 if validatehash:
1873 1877 self.checkhash(text, node, rev=rev)
1874 1878 if not validated:
1875 1879 self._revisioncache = (node, rev, rawtext)
1876 1880
1877 1881 return text
1878 1882
1879 1883 def _rawtext(self, node, rev, _df=None):
1880 1884 """return the possibly unvalidated rawtext for a revision
1881 1885
1882 1886 returns (rev, rawtext, validated)
1883 1887 """
1884 1888
1885 1889 # revision in the cache (could be useful to apply delta)
1886 1890 cachedrev = None
1887 1891 # An intermediate text to apply deltas to
1888 1892 basetext = None
1889 1893
1890 1894 # Check if we have the entry in cache
1891 1895 # The cache entry looks like (node, rev, rawtext)
1892 1896 if self._revisioncache:
1893 1897 if self._revisioncache[0] == node:
1894 1898 return (rev, self._revisioncache[2], True)
1895 1899 cachedrev = self._revisioncache[1]
1896 1900
1897 1901 if rev is None:
1898 1902 rev = self.rev(node)
1899 1903
1900 1904 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1901 1905 if stopped:
1902 1906 basetext = self._revisioncache[2]
1903 1907
1904 1908 # drop cache to save memory, the caller is expected to
1905 1909 # update self._revisioncache after validating the text
1906 1910 self._revisioncache = None
1907 1911
1908 1912 targetsize = None
1909 1913 rawsize = self.index[rev][2]
1910 1914 if 0 <= rawsize:
1911 1915 targetsize = 4 * rawsize
1912 1916
1913 1917 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1914 1918 if basetext is None:
1915 1919 basetext = bytes(bins[0])
1916 1920 bins = bins[1:]
1917 1921
1918 1922 rawtext = mdiff.patches(basetext, bins)
1919 1923 del basetext # let us have a chance to free memory early
1920 1924 return (rev, rawtext, False)
1921 1925
1922 1926 def _sidedata(self, rev):
1923 1927 """Return the sidedata for a given revision number."""
1924 1928 index_entry = self.index[rev]
1925 1929 sidedata_offset = index_entry[8]
1926 1930 sidedata_size = index_entry[9]
1927 1931
1928 1932 if self._inline:
1929 1933 sidedata_offset += self.index.entry_size * (1 + rev)
1930 1934 if sidedata_size == 0:
1931 1935 return {}
1932 1936
1933 1937 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1934 1938 filename = self._sidedatafile
1935 1939 end = self._docket.sidedata_end
1936 1940 offset = sidedata_offset
1937 1941 length = sidedata_size
1938 1942 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1939 1943 raise error.RevlogError(m)
1940 1944
1941 1945 comp_segment = self._segmentfile_sidedata.read_chunk(
1942 1946 sidedata_offset, sidedata_size
1943 1947 )
1944 1948
1945 1949 comp = self.index[rev][11]
1946 1950 if comp == COMP_MODE_PLAIN:
1947 1951 segment = comp_segment
1948 1952 elif comp == COMP_MODE_DEFAULT:
1949 1953 segment = self._decompressor(comp_segment)
1950 1954 elif comp == COMP_MODE_INLINE:
1951 1955 segment = self.decompress(comp_segment)
1952 1956 else:
1953 1957 msg = b'unknown compression mode %d'
1954 1958 msg %= comp
1955 1959 raise error.RevlogError(msg)
1956 1960
1957 1961 sidedata = sidedatautil.deserialize_sidedata(segment)
1958 1962 return sidedata
1959 1963
1960 1964 def rawdata(self, nodeorrev, _df=None):
1961 1965 """return an uncompressed raw data of a given node or revision number.
1962 1966
1963 1967 _df - an existing file handle to read from. (internal-only)
1964 1968 """
1965 1969 return self._revisiondata(nodeorrev, _df, raw=True)
1966 1970
1967 1971 def hash(self, text, p1, p2):
1968 1972 """Compute a node hash.
1969 1973
1970 1974 Available as a function so that subclasses can replace the hash
1971 1975 as needed.
1972 1976 """
1973 1977 return storageutil.hashrevisionsha1(text, p1, p2)
1974 1978
1975 1979 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1976 1980 """Check node hash integrity.
1977 1981
1978 1982 Available as a function so that subclasses can extend hash mismatch
1979 1983 behaviors as needed.
1980 1984 """
1981 1985 try:
1982 1986 if p1 is None and p2 is None:
1983 1987 p1, p2 = self.parents(node)
1984 1988 if node != self.hash(text, p1, p2):
1985 1989 # Clear the revision cache on hash failure. The revision cache
1986 1990 # only stores the raw revision and clearing the cache does have
1987 1991 # the side-effect that we won't have a cache hit when the raw
1988 1992 # revision data is accessed. But this case should be rare and
1989 1993 # it is extra work to teach the cache about the hash
1990 1994 # verification state.
1991 1995 if self._revisioncache and self._revisioncache[0] == node:
1992 1996 self._revisioncache = None
1993 1997
1994 1998 revornode = rev
1995 1999 if revornode is None:
1996 2000 revornode = templatefilters.short(hex(node))
1997 2001 raise error.RevlogError(
1998 2002 _(b"integrity check failed on %s:%s")
1999 2003 % (self.display_id, pycompat.bytestr(revornode))
2000 2004 )
2001 2005 except error.RevlogError:
2002 2006 if self._censorable and storageutil.iscensoredtext(text):
2003 2007 raise error.CensoredNodeError(self.display_id, node, text)
2004 2008 raise
2005 2009
2006 2010 def _enforceinlinesize(self, tr):
2007 2011 """Check if the revlog is too big for inline and convert if so.
2008 2012
2009 2013 This should be called after revisions are added to the revlog. If the
2010 2014 revlog has grown too large to be an inline revlog, it will convert it
2011 2015 to use multiple index and data files.
2012 2016 """
2013 2017 tiprev = len(self) - 1
2014 2018 total_size = self.start(tiprev) + self.length(tiprev)
2015 2019 if not self._inline or total_size < _maxinline:
2016 2020 return
2017 2021
2018 2022 troffset = tr.findoffset(self._indexfile)
2019 2023 if troffset is None:
2020 2024 raise error.RevlogError(
2021 2025 _(b"%s not found in the transaction") % self._indexfile
2022 2026 )
2023 2027 trindex = None
2024 2028 tr.add(self._datafile, 0)
2025 2029
2026 2030 existing_handles = False
2027 2031 if self._writinghandles is not None:
2028 2032 existing_handles = True
2029 2033 fp = self._writinghandles[0]
2030 2034 fp.flush()
2031 2035 fp.close()
2032 2036 # We can't use the cached file handle after close(). So prevent
2033 2037 # its usage.
2034 2038 self._writinghandles = None
2035 2039 self._segmentfile.writing_handle = None
2036 2040 # No need to deal with sidedata writing handle as it is only
2037 2041 # relevant with revlog-v2 which is never inline, not reaching
2038 2042 # this code
2039 2043
2040 2044 new_dfh = self._datafp(b'w+')
2041 2045 new_dfh.truncate(0) # drop any potentially existing data
2042 2046 try:
2043 2047 with self._indexfp() as read_ifh:
2044 2048 for r in self:
2045 2049 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2046 2050 if (
2047 2051 trindex is None
2048 2052 and troffset
2049 2053 <= self.start(r) + r * self.index.entry_size
2050 2054 ):
2051 2055 trindex = r
2052 2056 new_dfh.flush()
2053 2057
2054 2058 if trindex is None:
2055 2059 trindex = 0
2056 2060
2057 2061 with self.__index_new_fp() as fp:
2058 2062 self._format_flags &= ~FLAG_INLINE_DATA
2059 2063 self._inline = False
2060 2064 for i in self:
2061 2065 e = self.index.entry_binary(i)
2062 2066 if i == 0 and self._docket is None:
2063 2067 header = self._format_flags | self._format_version
2064 2068 header = self.index.pack_header(header)
2065 2069 e = header + e
2066 2070 fp.write(e)
2067 2071 if self._docket is not None:
2068 2072 self._docket.index_end = fp.tell()
2069 2073
2070 2074 # There is a small transactional race here. If the rename of
2071 2075 # the index fails, we should remove the datafile. It is more
2072 2076 # important to ensure that the data file is not truncated
2073 2077 # when the index is replaced as otherwise data is lost.
2074 2078 tr.replace(self._datafile, self.start(trindex))
2075 2079
2076 2080 # the temp file replace the real index when we exit the context
2077 2081 # manager
2078 2082
2079 2083 tr.replace(self._indexfile, trindex * self.index.entry_size)
2080 2084 nodemaputil.setup_persistent_nodemap(tr, self)
2081 2085 self._segmentfile = randomaccessfile.randomaccessfile(
2082 2086 self.opener,
2083 2087 self._datafile,
2084 2088 self._chunkcachesize,
2085 2089 )
2086 2090
2087 2091 if existing_handles:
2088 2092 # switched from inline to conventional reopen the index
2089 2093 ifh = self.__index_write_fp()
2090 2094 self._writinghandles = (ifh, new_dfh, None)
2091 2095 self._segmentfile.writing_handle = new_dfh
2092 2096 new_dfh = None
2093 2097 # No need to deal with sidedata writing handle as it is only
2094 2098 # relevant with revlog-v2 which is never inline, not reaching
2095 2099 # this code
2096 2100 finally:
2097 2101 if new_dfh is not None:
2098 2102 new_dfh.close()
2099 2103
2100 2104 def _nodeduplicatecallback(self, transaction, node):
2101 2105 """called when trying to add a node already stored."""
2102 2106
2103 2107 @contextlib.contextmanager
2104 2108 def reading(self):
2105 2109 """Context manager that keeps data and sidedata files open for reading"""
2106 2110 with self._segmentfile.reading():
2107 2111 with self._segmentfile_sidedata.reading():
2108 2112 yield
2109 2113
2110 2114 @contextlib.contextmanager
2111 2115 def _writing(self, transaction):
2112 2116 if self._trypending:
2113 2117 msg = b'try to write in a `trypending` revlog: %s'
2114 2118 msg %= self.display_id
2115 2119 raise error.ProgrammingError(msg)
2116 2120 if self._writinghandles is not None:
2117 2121 yield
2118 2122 else:
2119 2123 ifh = dfh = sdfh = None
2120 2124 try:
2121 2125 r = len(self)
2122 2126 # opening the data file.
2123 2127 dsize = 0
2124 2128 if r:
2125 2129 dsize = self.end(r - 1)
2126 2130 dfh = None
2127 2131 if not self._inline:
2128 2132 try:
2129 2133 dfh = self._datafp(b"r+")
2130 2134 if self._docket is None:
2131 2135 dfh.seek(0, os.SEEK_END)
2132 2136 else:
2133 2137 dfh.seek(self._docket.data_end, os.SEEK_SET)
2134 2138 except FileNotFoundError:
2135 2139 dfh = self._datafp(b"w+")
2136 2140 transaction.add(self._datafile, dsize)
2137 2141 if self._sidedatafile is not None:
2138 2142 # revlog-v2 does not inline, help Pytype
2139 2143 assert dfh is not None
2140 2144 try:
2141 2145 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2142 2146 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2143 2147 except FileNotFoundError:
2144 2148 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2145 2149 transaction.add(
2146 2150 self._sidedatafile, self._docket.sidedata_end
2147 2151 )
2148 2152
2149 2153 # opening the index file.
2150 2154 isize = r * self.index.entry_size
2151 2155 ifh = self.__index_write_fp()
2152 2156 if self._inline:
2153 2157 transaction.add(self._indexfile, dsize + isize)
2154 2158 else:
2155 2159 transaction.add(self._indexfile, isize)
2156 2160 # exposing all file handle for writing.
2157 2161 self._writinghandles = (ifh, dfh, sdfh)
2158 2162 self._segmentfile.writing_handle = ifh if self._inline else dfh
2159 2163 self._segmentfile_sidedata.writing_handle = sdfh
2160 2164 yield
2161 2165 if self._docket is not None:
2162 2166 self._write_docket(transaction)
2163 2167 finally:
2164 2168 self._writinghandles = None
2165 2169 self._segmentfile.writing_handle = None
2166 2170 self._segmentfile_sidedata.writing_handle = None
2167 2171 if dfh is not None:
2168 2172 dfh.close()
2169 2173 if sdfh is not None:
2170 2174 sdfh.close()
2171 2175 # closing the index file last to avoid exposing referent to
2172 2176 # potential unflushed data content.
2173 2177 if ifh is not None:
2174 2178 ifh.close()
2175 2179
2176 2180 def _write_docket(self, transaction):
2177 2181 """write the current docket on disk
2178 2182
2179 2183 Exist as a method to help changelog to implement transaction logic
2180 2184
2181 2185 We could also imagine using the same transaction logic for all revlog
2182 2186 since docket are cheap."""
2183 2187 self._docket.write(transaction)
2184 2188
2185 2189 def addrevision(
2186 2190 self,
2187 2191 text,
2188 2192 transaction,
2189 2193 link,
2190 2194 p1,
2191 2195 p2,
2192 2196 cachedelta=None,
2193 2197 node=None,
2194 2198 flags=REVIDX_DEFAULT_FLAGS,
2195 2199 deltacomputer=None,
2196 2200 sidedata=None,
2197 2201 ):
2198 2202 """add a revision to the log
2199 2203
2200 2204 text - the revision data to add
2201 2205 transaction - the transaction object used for rollback
2202 2206 link - the linkrev data to add
2203 2207 p1, p2 - the parent nodeids of the revision
2204 2208 cachedelta - an optional precomputed delta
2205 2209 node - nodeid of revision; typically node is not specified, and it is
2206 2210 computed by default as hash(text, p1, p2), however subclasses might
2207 2211 use different hashing method (and override checkhash() in such case)
2208 2212 flags - the known flags to set on the revision
2209 2213 deltacomputer - an optional deltacomputer instance shared between
2210 2214 multiple calls
2211 2215 """
2212 2216 if link == nullrev:
2213 2217 raise error.RevlogError(
2214 2218 _(b"attempted to add linkrev -1 to %s") % self.display_id
2215 2219 )
2216 2220
2217 2221 if sidedata is None:
2218 2222 sidedata = {}
2219 2223 elif sidedata and not self.hassidedata:
2220 2224 raise error.ProgrammingError(
2221 2225 _(b"trying to add sidedata to a revlog who don't support them")
2222 2226 )
2223 2227
2224 2228 if flags:
2225 2229 node = node or self.hash(text, p1, p2)
2226 2230
2227 2231 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2228 2232
2229 2233 # If the flag processor modifies the revision data, ignore any provided
2230 2234 # cachedelta.
2231 2235 if rawtext != text:
2232 2236 cachedelta = None
2233 2237
2234 2238 if len(rawtext) > _maxentrysize:
2235 2239 raise error.RevlogError(
2236 2240 _(
2237 2241 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2238 2242 )
2239 2243 % (self.display_id, len(rawtext))
2240 2244 )
2241 2245
2242 2246 node = node or self.hash(rawtext, p1, p2)
2243 2247 rev = self.index.get_rev(node)
2244 2248 if rev is not None:
2245 2249 return rev
2246 2250
2247 2251 if validatehash:
2248 2252 self.checkhash(rawtext, node, p1=p1, p2=p2)
2249 2253
2250 2254 return self.addrawrevision(
2251 2255 rawtext,
2252 2256 transaction,
2253 2257 link,
2254 2258 p1,
2255 2259 p2,
2256 2260 node,
2257 2261 flags,
2258 2262 cachedelta=cachedelta,
2259 2263 deltacomputer=deltacomputer,
2260 2264 sidedata=sidedata,
2261 2265 )
2262 2266
2263 2267 def addrawrevision(
2264 2268 self,
2265 2269 rawtext,
2266 2270 transaction,
2267 2271 link,
2268 2272 p1,
2269 2273 p2,
2270 2274 node,
2271 2275 flags,
2272 2276 cachedelta=None,
2273 2277 deltacomputer=None,
2274 2278 sidedata=None,
2275 2279 ):
2276 2280 """add a raw revision with known flags, node and parents
2277 2281 useful when reusing a revision not stored in this revlog (ex: received
2278 2282 over wire, or read from an external bundle).
2279 2283 """
2280 2284 with self._writing(transaction):
2281 2285 return self._addrevision(
2282 2286 node,
2283 2287 rawtext,
2284 2288 transaction,
2285 2289 link,
2286 2290 p1,
2287 2291 p2,
2288 2292 flags,
2289 2293 cachedelta,
2290 2294 deltacomputer=deltacomputer,
2291 2295 sidedata=sidedata,
2292 2296 )
2293 2297
2294 2298 def compress(self, data):
2295 2299 """Generate a possibly-compressed representation of data."""
2296 2300 if not data:
2297 2301 return b'', data
2298 2302
2299 2303 compressed = self._compressor.compress(data)
2300 2304
2301 2305 if compressed:
2302 2306 # The revlog compressor added the header in the returned data.
2303 2307 return b'', compressed
2304 2308
2305 2309 if data[0:1] == b'\0':
2306 2310 return b'', data
2307 2311 return b'u', data
2308 2312
2309 2313 def decompress(self, data):
2310 2314 """Decompress a revlog chunk.
2311 2315
2312 2316 The chunk is expected to begin with a header identifying the
2313 2317 format type so it can be routed to an appropriate decompressor.
2314 2318 """
2315 2319 if not data:
2316 2320 return data
2317 2321
2318 2322 # Revlogs are read much more frequently than they are written and many
2319 2323 # chunks only take microseconds to decompress, so performance is
2320 2324 # important here.
2321 2325 #
2322 2326 # We can make a few assumptions about revlogs:
2323 2327 #
2324 2328 # 1) the majority of chunks will be compressed (as opposed to inline
2325 2329 # raw data).
2326 2330 # 2) decompressing *any* data will likely by at least 10x slower than
2327 2331 # returning raw inline data.
2328 2332 # 3) we want to prioritize common and officially supported compression
2329 2333 # engines
2330 2334 #
2331 2335 # It follows that we want to optimize for "decompress compressed data
2332 2336 # when encoded with common and officially supported compression engines"
2333 2337 # case over "raw data" and "data encoded by less common or non-official
2334 2338 # compression engines." That is why we have the inline lookup first
2335 2339 # followed by the compengines lookup.
2336 2340 #
2337 2341 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2338 2342 # compressed chunks. And this matters for changelog and manifest reads.
2339 2343 t = data[0:1]
2340 2344
2341 2345 if t == b'x':
2342 2346 try:
2343 2347 return _zlibdecompress(data)
2344 2348 except zlib.error as e:
2345 2349 raise error.RevlogError(
2346 2350 _(b'revlog decompress error: %s')
2347 2351 % stringutil.forcebytestr(e)
2348 2352 )
2349 2353 # '\0' is more common than 'u' so it goes first.
2350 2354 elif t == b'\0':
2351 2355 return data
2352 2356 elif t == b'u':
2353 2357 return util.buffer(data, 1)
2354 2358
2355 2359 compressor = self._get_decompressor(t)
2356 2360
2357 2361 return compressor.decompress(data)
2358 2362
2359 2363 def _addrevision(
2360 2364 self,
2361 2365 node,
2362 2366 rawtext,
2363 2367 transaction,
2364 2368 link,
2365 2369 p1,
2366 2370 p2,
2367 2371 flags,
2368 2372 cachedelta,
2369 2373 alwayscache=False,
2370 2374 deltacomputer=None,
2371 2375 sidedata=None,
2372 2376 ):
2373 2377 """internal function to add revisions to the log
2374 2378
2375 2379 see addrevision for argument descriptions.
2376 2380
2377 2381 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2378 2382
2379 2383 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2380 2384 be used.
2381 2385
2382 2386 invariants:
2383 2387 - rawtext is optional (can be None); if not set, cachedelta must be set.
2384 2388 if both are set, they must correspond to each other.
2385 2389 """
2386 2390 if node == self.nullid:
2387 2391 raise error.RevlogError(
2388 2392 _(b"%s: attempt to add null revision") % self.display_id
2389 2393 )
2390 2394 if (
2391 2395 node == self.nodeconstants.wdirid
2392 2396 or node in self.nodeconstants.wdirfilenodeids
2393 2397 ):
2394 2398 raise error.RevlogError(
2395 2399 _(b"%s: attempt to add wdir revision") % self.display_id
2396 2400 )
2397 2401 if self._writinghandles is None:
2398 2402 msg = b'adding revision outside `revlog._writing` context'
2399 2403 raise error.ProgrammingError(msg)
2400 2404
2401 2405 if self._inline:
2402 2406 fh = self._writinghandles[0]
2403 2407 else:
2404 2408 fh = self._writinghandles[1]
2405 2409
2406 2410 btext = [rawtext]
2407 2411
2408 2412 curr = len(self)
2409 2413 prev = curr - 1
2410 2414
2411 2415 offset = self._get_data_offset(prev)
2412 2416
2413 2417 if self._concurrencychecker:
2414 2418 ifh, dfh, sdfh = self._writinghandles
2415 2419 # XXX no checking for the sidedata file
2416 2420 if self._inline:
2417 2421 # offset is "as if" it were in the .d file, so we need to add on
2418 2422 # the size of the entry metadata.
2419 2423 self._concurrencychecker(
2420 2424 ifh, self._indexfile, offset + curr * self.index.entry_size
2421 2425 )
2422 2426 else:
2423 2427 # Entries in the .i are a consistent size.
2424 2428 self._concurrencychecker(
2425 2429 ifh, self._indexfile, curr * self.index.entry_size
2426 2430 )
2427 2431 self._concurrencychecker(dfh, self._datafile, offset)
2428 2432
2429 2433 p1r, p2r = self.rev(p1), self.rev(p2)
2430 2434
2431 2435 # full versions are inserted when the needed deltas
2432 2436 # become comparable to the uncompressed text
2433 2437 if rawtext is None:
2434 2438 # need rawtext size, before changed by flag processors, which is
2435 2439 # the non-raw size. use revlog explicitly to avoid filelog's extra
2436 2440 # logic that might remove metadata size.
2437 2441 textlen = mdiff.patchedsize(
2438 2442 revlog.size(self, cachedelta[0]), cachedelta[1]
2439 2443 )
2440 2444 else:
2441 2445 textlen = len(rawtext)
2442 2446
2443 2447 if deltacomputer is None:
2444 2448 write_debug = None
2445 2449 if self._debug_delta:
2446 2450 write_debug = transaction._report
2447 2451 deltacomputer = deltautil.deltacomputer(
2448 2452 self, write_debug=write_debug
2449 2453 )
2450 2454
2451 2455 revinfo = revlogutils.revisioninfo(
2452 2456 node,
2453 2457 p1,
2454 2458 p2,
2455 2459 btext,
2456 2460 textlen,
2457 2461 cachedelta,
2458 2462 flags,
2459 2463 )
2460 2464
2461 2465 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2462 2466
2463 2467 compression_mode = COMP_MODE_INLINE
2464 2468 if self._docket is not None:
2465 2469 default_comp = self._docket.default_compression_header
2466 2470 r = deltautil.delta_compression(default_comp, deltainfo)
2467 2471 compression_mode, deltainfo = r
2468 2472
2469 2473 sidedata_compression_mode = COMP_MODE_INLINE
2470 2474 if sidedata and self.hassidedata:
2471 2475 sidedata_compression_mode = COMP_MODE_PLAIN
2472 2476 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2473 2477 sidedata_offset = self._docket.sidedata_end
2474 2478 h, comp_sidedata = self.compress(serialized_sidedata)
2475 2479 if (
2476 2480 h != b'u'
2477 2481 and comp_sidedata[0:1] != b'\0'
2478 2482 and len(comp_sidedata) < len(serialized_sidedata)
2479 2483 ):
2480 2484 assert not h
2481 2485 if (
2482 2486 comp_sidedata[0:1]
2483 2487 == self._docket.default_compression_header
2484 2488 ):
2485 2489 sidedata_compression_mode = COMP_MODE_DEFAULT
2486 2490 serialized_sidedata = comp_sidedata
2487 2491 else:
2488 2492 sidedata_compression_mode = COMP_MODE_INLINE
2489 2493 serialized_sidedata = comp_sidedata
2490 2494 else:
2491 2495 serialized_sidedata = b""
2492 2496 # Don't store the offset if the sidedata is empty, that way
2493 2497 # we can easily detect empty sidedata and they will be no different
2494 2498 # than ones we manually add.
2495 2499 sidedata_offset = 0
2496 2500
2497 2501 rank = RANK_UNKNOWN
2498 2502 if self._format_version == CHANGELOGV2:
2499 2503 if (p1r, p2r) == (nullrev, nullrev):
2500 2504 rank = 1
2501 2505 elif p1r != nullrev and p2r == nullrev:
2502 2506 rank = 1 + self.fast_rank(p1r)
2503 2507 elif p1r == nullrev and p2r != nullrev:
2504 2508 rank = 1 + self.fast_rank(p2r)
2505 2509 else: # merge node
2506 2510 if rustdagop is not None and self.index.rust_ext_compat:
2507 2511 rank = rustdagop.rank(self.index, p1r, p2r)
2508 2512 else:
2509 2513 pmin, pmax = sorted((p1r, p2r))
2510 2514 rank = 1 + self.fast_rank(pmax)
2511 2515 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2512 2516
2513 2517 e = revlogutils.entry(
2514 2518 flags=flags,
2515 2519 data_offset=offset,
2516 2520 data_compressed_length=deltainfo.deltalen,
2517 2521 data_uncompressed_length=textlen,
2518 2522 data_compression_mode=compression_mode,
2519 2523 data_delta_base=deltainfo.base,
2520 2524 link_rev=link,
2521 2525 parent_rev_1=p1r,
2522 2526 parent_rev_2=p2r,
2523 2527 node_id=node,
2524 2528 sidedata_offset=sidedata_offset,
2525 2529 sidedata_compressed_length=len(serialized_sidedata),
2526 2530 sidedata_compression_mode=sidedata_compression_mode,
2527 2531 rank=rank,
2528 2532 )
2529 2533
2530 2534 self.index.append(e)
2531 2535 entry = self.index.entry_binary(curr)
2532 2536 if curr == 0 and self._docket is None:
2533 2537 header = self._format_flags | self._format_version
2534 2538 header = self.index.pack_header(header)
2535 2539 entry = header + entry
2536 2540 self._writeentry(
2537 2541 transaction,
2538 2542 entry,
2539 2543 deltainfo.data,
2540 2544 link,
2541 2545 offset,
2542 2546 serialized_sidedata,
2543 2547 sidedata_offset,
2544 2548 )
2545 2549
2546 2550 rawtext = btext[0]
2547 2551
2548 2552 if alwayscache and rawtext is None:
2549 2553 rawtext = deltacomputer.buildtext(revinfo, fh)
2550 2554
2551 2555 if type(rawtext) == bytes: # only accept immutable objects
2552 2556 self._revisioncache = (node, curr, rawtext)
2553 2557 self._chainbasecache[curr] = deltainfo.chainbase
2554 2558 return curr
2555 2559
2556 2560 def _get_data_offset(self, prev):
2557 2561 """Returns the current offset in the (in-transaction) data file.
2558 2562 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2559 2563 file to store that information: since sidedata can be rewritten to the
2560 2564 end of the data file within a transaction, you can have cases where, for
2561 2565 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2562 2566 to `n - 1`'s sidedata being written after `n`'s data.
2563 2567
2564 2568 TODO cache this in a docket file before getting out of experimental."""
2565 2569 if self._docket is None:
2566 2570 return self.end(prev)
2567 2571 else:
2568 2572 return self._docket.data_end
2569 2573
2570 2574 def _writeentry(
2571 2575 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2572 2576 ):
2573 2577 # Files opened in a+ mode have inconsistent behavior on various
2574 2578 # platforms. Windows requires that a file positioning call be made
2575 2579 # when the file handle transitions between reads and writes. See
2576 2580 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2577 2581 # platforms, Python or the platform itself can be buggy. Some versions
2578 2582 # of Solaris have been observed to not append at the end of the file
2579 2583 # if the file was seeked to before the end. See issue4943 for more.
2580 2584 #
2581 2585 # We work around this issue by inserting a seek() before writing.
2582 2586 # Note: This is likely not necessary on Python 3. However, because
2583 2587 # the file handle is reused for reads and may be seeked there, we need
2584 2588 # to be careful before changing this.
2585 2589 if self._writinghandles is None:
2586 2590 msg = b'adding revision outside `revlog._writing` context'
2587 2591 raise error.ProgrammingError(msg)
2588 2592 ifh, dfh, sdfh = self._writinghandles
2589 2593 if self._docket is None:
2590 2594 ifh.seek(0, os.SEEK_END)
2591 2595 else:
2592 2596 ifh.seek(self._docket.index_end, os.SEEK_SET)
2593 2597 if dfh:
2594 2598 if self._docket is None:
2595 2599 dfh.seek(0, os.SEEK_END)
2596 2600 else:
2597 2601 dfh.seek(self._docket.data_end, os.SEEK_SET)
2598 2602 if sdfh:
2599 2603 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2600 2604
2601 2605 curr = len(self) - 1
2602 2606 if not self._inline:
2603 2607 transaction.add(self._datafile, offset)
2604 2608 if self._sidedatafile:
2605 2609 transaction.add(self._sidedatafile, sidedata_offset)
2606 2610 transaction.add(self._indexfile, curr * len(entry))
2607 2611 if data[0]:
2608 2612 dfh.write(data[0])
2609 2613 dfh.write(data[1])
2610 2614 if sidedata:
2611 2615 sdfh.write(sidedata)
2612 2616 ifh.write(entry)
2613 2617 else:
2614 2618 offset += curr * self.index.entry_size
2615 2619 transaction.add(self._indexfile, offset)
2616 2620 ifh.write(entry)
2617 2621 ifh.write(data[0])
2618 2622 ifh.write(data[1])
2619 2623 assert not sidedata
2620 2624 self._enforceinlinesize(transaction)
2621 2625 if self._docket is not None:
2622 2626 # revlog-v2 always has 3 writing handles, help Pytype
2623 2627 wh1 = self._writinghandles[0]
2624 2628 wh2 = self._writinghandles[1]
2625 2629 wh3 = self._writinghandles[2]
2626 2630 assert wh1 is not None
2627 2631 assert wh2 is not None
2628 2632 assert wh3 is not None
2629 2633 self._docket.index_end = wh1.tell()
2630 2634 self._docket.data_end = wh2.tell()
2631 2635 self._docket.sidedata_end = wh3.tell()
2632 2636
2633 2637 nodemaputil.setup_persistent_nodemap(transaction, self)
2634 2638
2635 2639 def addgroup(
2636 2640 self,
2637 2641 deltas,
2638 2642 linkmapper,
2639 2643 transaction,
2640 2644 alwayscache=False,
2641 2645 addrevisioncb=None,
2642 2646 duplicaterevisioncb=None,
2643 2647 debug_info=None,
2644 2648 ):
2645 2649 """
2646 2650 add a delta group
2647 2651
2648 2652 given a set of deltas, add them to the revision log. the
2649 2653 first delta is against its parent, which should be in our
2650 2654 log, the rest are against the previous delta.
2651 2655
2652 2656 If ``addrevisioncb`` is defined, it will be called with arguments of
2653 2657 this revlog and the node that was added.
2654 2658 """
2655 2659
2656 2660 if self._adding_group:
2657 2661 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2658 2662
2659 2663 self._adding_group = True
2660 2664 empty = True
2661 2665 try:
2662 2666 with self._writing(transaction):
2663 2667 write_debug = None
2664 2668 if self._debug_delta:
2665 2669 write_debug = transaction._report
2666 2670 deltacomputer = deltautil.deltacomputer(
2667 2671 self,
2668 2672 write_debug=write_debug,
2669 2673 debug_info=debug_info,
2670 2674 )
2671 2675 # loop through our set of deltas
2672 2676 for data in deltas:
2673 2677 (
2674 2678 node,
2675 2679 p1,
2676 2680 p2,
2677 2681 linknode,
2678 2682 deltabase,
2679 2683 delta,
2680 2684 flags,
2681 2685 sidedata,
2682 2686 ) = data
2683 2687 link = linkmapper(linknode)
2684 2688 flags = flags or REVIDX_DEFAULT_FLAGS
2685 2689
2686 2690 rev = self.index.get_rev(node)
2687 2691 if rev is not None:
2688 2692 # this can happen if two branches make the same change
2689 2693 self._nodeduplicatecallback(transaction, rev)
2690 2694 if duplicaterevisioncb:
2691 2695 duplicaterevisioncb(self, rev)
2692 2696 empty = False
2693 2697 continue
2694 2698
2695 2699 for p in (p1, p2):
2696 2700 if not self.index.has_node(p):
2697 2701 raise error.LookupError(
2698 2702 p, self.radix, _(b'unknown parent')
2699 2703 )
2700 2704
2701 2705 if not self.index.has_node(deltabase):
2702 2706 raise error.LookupError(
2703 2707 deltabase, self.display_id, _(b'unknown delta base')
2704 2708 )
2705 2709
2706 2710 baserev = self.rev(deltabase)
2707 2711
2708 2712 if baserev != nullrev and self.iscensored(baserev):
2709 2713 # if base is censored, delta must be full replacement in a
2710 2714 # single patch operation
2711 2715 hlen = struct.calcsize(b">lll")
2712 2716 oldlen = self.rawsize(baserev)
2713 2717 newlen = len(delta) - hlen
2714 2718 if delta[:hlen] != mdiff.replacediffheader(
2715 2719 oldlen, newlen
2716 2720 ):
2717 2721 raise error.CensoredBaseError(
2718 2722 self.display_id, self.node(baserev)
2719 2723 )
2720 2724
2721 2725 if not flags and self._peek_iscensored(baserev, delta):
2722 2726 flags |= REVIDX_ISCENSORED
2723 2727
2724 2728 # We assume consumers of addrevisioncb will want to retrieve
2725 2729 # the added revision, which will require a call to
2726 2730 # revision(). revision() will fast path if there is a cache
2727 2731 # hit. So, we tell _addrevision() to always cache in this case.
2728 2732 # We're only using addgroup() in the context of changegroup
2729 2733 # generation so the revision data can always be handled as raw
2730 2734 # by the flagprocessor.
2731 2735 rev = self._addrevision(
2732 2736 node,
2733 2737 None,
2734 2738 transaction,
2735 2739 link,
2736 2740 p1,
2737 2741 p2,
2738 2742 flags,
2739 2743 (baserev, delta),
2740 2744 alwayscache=alwayscache,
2741 2745 deltacomputer=deltacomputer,
2742 2746 sidedata=sidedata,
2743 2747 )
2744 2748
2745 2749 if addrevisioncb:
2746 2750 addrevisioncb(self, rev)
2747 2751 empty = False
2748 2752 finally:
2749 2753 self._adding_group = False
2750 2754 return not empty
2751 2755
2752 2756 def iscensored(self, rev):
2753 2757 """Check if a file revision is censored."""
2754 2758 if not self._censorable:
2755 2759 return False
2756 2760
2757 2761 return self.flags(rev) & REVIDX_ISCENSORED
2758 2762
2759 2763 def _peek_iscensored(self, baserev, delta):
2760 2764 """Quickly check if a delta produces a censored revision."""
2761 2765 if not self._censorable:
2762 2766 return False
2763 2767
2764 2768 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2765 2769
2766 2770 def getstrippoint(self, minlink):
2767 2771 """find the minimum rev that must be stripped to strip the linkrev
2768 2772
2769 2773 Returns a tuple containing the minimum rev and a set of all revs that
2770 2774 have linkrevs that will be broken by this strip.
2771 2775 """
2772 2776 return storageutil.resolvestripinfo(
2773 2777 minlink,
2774 2778 len(self) - 1,
2775 2779 self.headrevs(),
2776 2780 self.linkrev,
2777 2781 self.parentrevs,
2778 2782 )
2779 2783
2780 2784 def strip(self, minlink, transaction):
2781 2785 """truncate the revlog on the first revision with a linkrev >= minlink
2782 2786
2783 2787 This function is called when we're stripping revision minlink and
2784 2788 its descendants from the repository.
2785 2789
2786 2790 We have to remove all revisions with linkrev >= minlink, because
2787 2791 the equivalent changelog revisions will be renumbered after the
2788 2792 strip.
2789 2793
2790 2794 So we truncate the revlog on the first of these revisions, and
2791 2795 trust that the caller has saved the revisions that shouldn't be
2792 2796 removed and that it'll re-add them after this truncation.
2793 2797 """
2794 2798 if len(self) == 0:
2795 2799 return
2796 2800
2797 2801 rev, _ = self.getstrippoint(minlink)
2798 2802 if rev == len(self):
2799 2803 return
2800 2804
2801 2805 # first truncate the files on disk
2802 2806 data_end = self.start(rev)
2803 2807 if not self._inline:
2804 2808 transaction.add(self._datafile, data_end)
2805 2809 end = rev * self.index.entry_size
2806 2810 else:
2807 2811 end = data_end + (rev * self.index.entry_size)
2808 2812
2809 2813 if self._sidedatafile:
2810 2814 sidedata_end = self.sidedata_cut_off(rev)
2811 2815 transaction.add(self._sidedatafile, sidedata_end)
2812 2816
2813 2817 transaction.add(self._indexfile, end)
2814 2818 if self._docket is not None:
2815 2819 # XXX we could, leverage the docket while stripping. However it is
2816 2820 # not powerfull enough at the time of this comment
2817 2821 self._docket.index_end = end
2818 2822 self._docket.data_end = data_end
2819 2823 self._docket.sidedata_end = sidedata_end
2820 2824 self._docket.write(transaction, stripping=True)
2821 2825
2822 2826 # then reset internal state in memory to forget those revisions
2823 2827 self._revisioncache = None
2824 2828 self._chaininfocache = util.lrucachedict(500)
2825 2829 self._segmentfile.clear_cache()
2826 2830 self._segmentfile_sidedata.clear_cache()
2827 2831
2828 2832 del self.index[rev:-1]
2829 2833
2830 2834 def checksize(self):
2831 2835 """Check size of index and data files
2832 2836
2833 2837 return a (dd, di) tuple.
2834 2838 - dd: extra bytes for the "data" file
2835 2839 - di: extra bytes for the "index" file
2836 2840
2837 2841 A healthy revlog will return (0, 0).
2838 2842 """
2839 2843 expected = 0
2840 2844 if len(self):
2841 2845 expected = max(0, self.end(len(self) - 1))
2842 2846
2843 2847 try:
2844 2848 with self._datafp() as f:
2845 2849 f.seek(0, io.SEEK_END)
2846 2850 actual = f.tell()
2847 2851 dd = actual - expected
2848 2852 except FileNotFoundError:
2849 2853 dd = 0
2850 2854
2851 2855 try:
2852 2856 f = self.opener(self._indexfile)
2853 2857 f.seek(0, io.SEEK_END)
2854 2858 actual = f.tell()
2855 2859 f.close()
2856 2860 s = self.index.entry_size
2857 2861 i = max(0, actual // s)
2858 2862 di = actual - (i * s)
2859 2863 if self._inline:
2860 2864 databytes = 0
2861 2865 for r in self:
2862 2866 databytes += max(0, self.length(r))
2863 2867 dd = 0
2864 2868 di = actual - len(self) * s - databytes
2865 2869 except FileNotFoundError:
2866 2870 di = 0
2867 2871
2868 2872 return (dd, di)
2869 2873
2870 2874 def files(self):
2871 2875 res = [self._indexfile]
2872 2876 if self._docket_file is None:
2873 2877 if not self._inline:
2874 2878 res.append(self._datafile)
2875 2879 else:
2876 2880 res.append(self._docket_file)
2877 2881 res.extend(self._docket.old_index_filepaths(include_empty=False))
2878 2882 if self._docket.data_end:
2879 2883 res.append(self._datafile)
2880 2884 res.extend(self._docket.old_data_filepaths(include_empty=False))
2881 2885 if self._docket.sidedata_end:
2882 2886 res.append(self._sidedatafile)
2883 2887 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2884 2888 return res
2885 2889
2886 2890 def emitrevisions(
2887 2891 self,
2888 2892 nodes,
2889 2893 nodesorder=None,
2890 2894 revisiondata=False,
2891 2895 assumehaveparentrevisions=False,
2892 2896 deltamode=repository.CG_DELTAMODE_STD,
2893 2897 sidedata_helpers=None,
2894 2898 debug_info=None,
2895 2899 ):
2896 2900 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2897 2901 raise error.ProgrammingError(
2898 2902 b'unhandled value for nodesorder: %s' % nodesorder
2899 2903 )
2900 2904
2901 2905 if nodesorder is None and not self._generaldelta:
2902 2906 nodesorder = b'storage'
2903 2907
2904 2908 if (
2905 2909 not self._storedeltachains
2906 2910 and deltamode != repository.CG_DELTAMODE_PREV
2907 2911 ):
2908 2912 deltamode = repository.CG_DELTAMODE_FULL
2909 2913
2910 2914 return storageutil.emitrevisions(
2911 2915 self,
2912 2916 nodes,
2913 2917 nodesorder,
2914 2918 revlogrevisiondelta,
2915 2919 deltaparentfn=self.deltaparent,
2916 2920 candeltafn=self.candelta,
2917 2921 rawsizefn=self.rawsize,
2918 2922 revdifffn=self.revdiff,
2919 2923 flagsfn=self.flags,
2920 2924 deltamode=deltamode,
2921 2925 revisiondata=revisiondata,
2922 2926 assumehaveparentrevisions=assumehaveparentrevisions,
2923 2927 sidedata_helpers=sidedata_helpers,
2924 2928 debug_info=debug_info,
2925 2929 )
2926 2930
2927 2931 DELTAREUSEALWAYS = b'always'
2928 2932 DELTAREUSESAMEREVS = b'samerevs'
2929 2933 DELTAREUSENEVER = b'never'
2930 2934
2931 2935 DELTAREUSEFULLADD = b'fulladd'
2932 2936
2933 2937 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2934 2938
2935 2939 def clone(
2936 2940 self,
2937 2941 tr,
2938 2942 destrevlog,
2939 2943 addrevisioncb=None,
2940 2944 deltareuse=DELTAREUSESAMEREVS,
2941 2945 forcedeltabothparents=None,
2942 2946 sidedata_helpers=None,
2943 2947 ):
2944 2948 """Copy this revlog to another, possibly with format changes.
2945 2949
2946 2950 The destination revlog will contain the same revisions and nodes.
2947 2951 However, it may not be bit-for-bit identical due to e.g. delta encoding
2948 2952 differences.
2949 2953
2950 2954 The ``deltareuse`` argument control how deltas from the existing revlog
2951 2955 are preserved in the destination revlog. The argument can have the
2952 2956 following values:
2953 2957
2954 2958 DELTAREUSEALWAYS
2955 2959 Deltas will always be reused (if possible), even if the destination
2956 2960 revlog would not select the same revisions for the delta. This is the
2957 2961 fastest mode of operation.
2958 2962 DELTAREUSESAMEREVS
2959 2963 Deltas will be reused if the destination revlog would pick the same
2960 2964 revisions for the delta. This mode strikes a balance between speed
2961 2965 and optimization.
2962 2966 DELTAREUSENEVER
2963 2967 Deltas will never be reused. This is the slowest mode of execution.
2964 2968 This mode can be used to recompute deltas (e.g. if the diff/delta
2965 2969 algorithm changes).
2966 2970 DELTAREUSEFULLADD
2967 2971 Revision will be re-added as if their were new content. This is
2968 2972 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2969 2973 eg: large file detection and handling.
2970 2974
2971 2975 Delta computation can be slow, so the choice of delta reuse policy can
2972 2976 significantly affect run time.
2973 2977
2974 2978 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2975 2979 two extremes. Deltas will be reused if they are appropriate. But if the
2976 2980 delta could choose a better revision, it will do so. This means if you
2977 2981 are converting a non-generaldelta revlog to a generaldelta revlog,
2978 2982 deltas will be recomputed if the delta's parent isn't a parent of the
2979 2983 revision.
2980 2984
2981 2985 In addition to the delta policy, the ``forcedeltabothparents``
2982 2986 argument controls whether to force compute deltas against both parents
2983 2987 for merges. By default, the current default is used.
2984 2988
2985 2989 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2986 2990 `sidedata_helpers`.
2987 2991 """
2988 2992 if deltareuse not in self.DELTAREUSEALL:
2989 2993 raise ValueError(
2990 2994 _(b'value for deltareuse invalid: %s') % deltareuse
2991 2995 )
2992 2996
2993 2997 if len(destrevlog):
2994 2998 raise ValueError(_(b'destination revlog is not empty'))
2995 2999
2996 3000 if getattr(self, 'filteredrevs', None):
2997 3001 raise ValueError(_(b'source revlog has filtered revisions'))
2998 3002 if getattr(destrevlog, 'filteredrevs', None):
2999 3003 raise ValueError(_(b'destination revlog has filtered revisions'))
3000 3004
3001 3005 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3002 3006 # if possible.
3003 3007 oldlazydelta = destrevlog._lazydelta
3004 3008 oldlazydeltabase = destrevlog._lazydeltabase
3005 3009 oldamd = destrevlog._deltabothparents
3006 3010
3007 3011 try:
3008 3012 if deltareuse == self.DELTAREUSEALWAYS:
3009 3013 destrevlog._lazydeltabase = True
3010 3014 destrevlog._lazydelta = True
3011 3015 elif deltareuse == self.DELTAREUSESAMEREVS:
3012 3016 destrevlog._lazydeltabase = False
3013 3017 destrevlog._lazydelta = True
3014 3018 elif deltareuse == self.DELTAREUSENEVER:
3015 3019 destrevlog._lazydeltabase = False
3016 3020 destrevlog._lazydelta = False
3017 3021
3018 3022 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3019 3023
3020 3024 self._clone(
3021 3025 tr,
3022 3026 destrevlog,
3023 3027 addrevisioncb,
3024 3028 deltareuse,
3025 3029 forcedeltabothparents,
3026 3030 sidedata_helpers,
3027 3031 )
3028 3032
3029 3033 finally:
3030 3034 destrevlog._lazydelta = oldlazydelta
3031 3035 destrevlog._lazydeltabase = oldlazydeltabase
3032 3036 destrevlog._deltabothparents = oldamd
3033 3037
3034 3038 def _clone(
3035 3039 self,
3036 3040 tr,
3037 3041 destrevlog,
3038 3042 addrevisioncb,
3039 3043 deltareuse,
3040 3044 forcedeltabothparents,
3041 3045 sidedata_helpers,
3042 3046 ):
3043 3047 """perform the core duty of `revlog.clone` after parameter processing"""
3044 3048 write_debug = None
3045 3049 if self._debug_delta:
3046 3050 write_debug = tr._report
3047 3051 deltacomputer = deltautil.deltacomputer(
3048 3052 destrevlog,
3049 3053 write_debug=write_debug,
3050 3054 )
3051 3055 index = self.index
3052 3056 for rev in self:
3053 3057 entry = index[rev]
3054 3058
3055 3059 # Some classes override linkrev to take filtered revs into
3056 3060 # account. Use raw entry from index.
3057 3061 flags = entry[0] & 0xFFFF
3058 3062 linkrev = entry[4]
3059 3063 p1 = index[entry[5]][7]
3060 3064 p2 = index[entry[6]][7]
3061 3065 node = entry[7]
3062 3066
3063 3067 # (Possibly) reuse the delta from the revlog if allowed and
3064 3068 # the revlog chunk is a delta.
3065 3069 cachedelta = None
3066 3070 rawtext = None
3067 3071 if deltareuse == self.DELTAREUSEFULLADD:
3068 3072 text = self._revisiondata(rev)
3069 3073 sidedata = self.sidedata(rev)
3070 3074
3071 3075 if sidedata_helpers is not None:
3072 3076 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3073 3077 self, sidedata_helpers, sidedata, rev
3074 3078 )
3075 3079 flags = flags | new_flags[0] & ~new_flags[1]
3076 3080
3077 3081 destrevlog.addrevision(
3078 3082 text,
3079 3083 tr,
3080 3084 linkrev,
3081 3085 p1,
3082 3086 p2,
3083 3087 cachedelta=cachedelta,
3084 3088 node=node,
3085 3089 flags=flags,
3086 3090 deltacomputer=deltacomputer,
3087 3091 sidedata=sidedata,
3088 3092 )
3089 3093 else:
3090 3094 if destrevlog._lazydelta:
3091 3095 dp = self.deltaparent(rev)
3092 3096 if dp != nullrev:
3093 3097 cachedelta = (dp, bytes(self._chunk(rev)))
3094 3098
3095 3099 sidedata = None
3096 3100 if not cachedelta:
3097 3101 rawtext = self._revisiondata(rev)
3098 3102 sidedata = self.sidedata(rev)
3099 3103 if sidedata is None:
3100 3104 sidedata = self.sidedata(rev)
3101 3105
3102 3106 if sidedata_helpers is not None:
3103 3107 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3104 3108 self, sidedata_helpers, sidedata, rev
3105 3109 )
3106 3110 flags = flags | new_flags[0] & ~new_flags[1]
3107 3111
3108 3112 with destrevlog._writing(tr):
3109 3113 destrevlog._addrevision(
3110 3114 node,
3111 3115 rawtext,
3112 3116 tr,
3113 3117 linkrev,
3114 3118 p1,
3115 3119 p2,
3116 3120 flags,
3117 3121 cachedelta,
3118 3122 deltacomputer=deltacomputer,
3119 3123 sidedata=sidedata,
3120 3124 )
3121 3125
3122 3126 if addrevisioncb:
3123 3127 addrevisioncb(self, rev, node)
3124 3128
3125 3129 def censorrevision(self, tr, censornode, tombstone=b''):
3126 3130 if self._format_version == REVLOGV0:
3127 3131 raise error.RevlogError(
3128 3132 _(b'cannot censor with version %d revlogs')
3129 3133 % self._format_version
3130 3134 )
3131 3135 elif self._format_version == REVLOGV1:
3132 3136 rewrite.v1_censor(self, tr, censornode, tombstone)
3133 3137 else:
3134 3138 rewrite.v2_censor(self, tr, censornode, tombstone)
3135 3139
3136 3140 def verifyintegrity(self, state):
3137 3141 """Verifies the integrity of the revlog.
3138 3142
3139 3143 Yields ``revlogproblem`` instances describing problems that are
3140 3144 found.
3141 3145 """
3142 3146 dd, di = self.checksize()
3143 3147 if dd:
3144 3148 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3145 3149 if di:
3146 3150 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3147 3151
3148 3152 version = self._format_version
3149 3153
3150 3154 # The verifier tells us what version revlog we should be.
3151 3155 if version != state[b'expectedversion']:
3152 3156 yield revlogproblem(
3153 3157 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3154 3158 % (self.display_id, version, state[b'expectedversion'])
3155 3159 )
3156 3160
3157 3161 state[b'skipread'] = set()
3158 3162 state[b'safe_renamed'] = set()
3159 3163
3160 3164 for rev in self:
3161 3165 node = self.node(rev)
3162 3166
3163 3167 # Verify contents. 4 cases to care about:
3164 3168 #
3165 3169 # common: the most common case
3166 3170 # rename: with a rename
3167 3171 # meta: file content starts with b'\1\n', the metadata
3168 3172 # header defined in filelog.py, but without a rename
3169 3173 # ext: content stored externally
3170 3174 #
3171 3175 # More formally, their differences are shown below:
3172 3176 #
3173 3177 # | common | rename | meta | ext
3174 3178 # -------------------------------------------------------
3175 3179 # flags() | 0 | 0 | 0 | not 0
3176 3180 # renamed() | False | True | False | ?
3177 3181 # rawtext[0:2]=='\1\n'| False | True | True | ?
3178 3182 #
3179 3183 # "rawtext" means the raw text stored in revlog data, which
3180 3184 # could be retrieved by "rawdata(rev)". "text"
3181 3185 # mentioned below is "revision(rev)".
3182 3186 #
3183 3187 # There are 3 different lengths stored physically:
3184 3188 # 1. L1: rawsize, stored in revlog index
3185 3189 # 2. L2: len(rawtext), stored in revlog data
3186 3190 # 3. L3: len(text), stored in revlog data if flags==0, or
3187 3191 # possibly somewhere else if flags!=0
3188 3192 #
3189 3193 # L1 should be equal to L2. L3 could be different from them.
3190 3194 # "text" may or may not affect commit hash depending on flag
3191 3195 # processors (see flagutil.addflagprocessor).
3192 3196 #
3193 3197 # | common | rename | meta | ext
3194 3198 # -------------------------------------------------
3195 3199 # rawsize() | L1 | L1 | L1 | L1
3196 3200 # size() | L1 | L2-LM | L1(*) | L1 (?)
3197 3201 # len(rawtext) | L2 | L2 | L2 | L2
3198 3202 # len(text) | L2 | L2 | L2 | L3
3199 3203 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3200 3204 #
3201 3205 # LM: length of metadata, depending on rawtext
3202 3206 # (*): not ideal, see comment in filelog.size
3203 3207 # (?): could be "- len(meta)" if the resolved content has
3204 3208 # rename metadata
3205 3209 #
3206 3210 # Checks needed to be done:
3207 3211 # 1. length check: L1 == L2, in all cases.
3208 3212 # 2. hash check: depending on flag processor, we may need to
3209 3213 # use either "text" (external), or "rawtext" (in revlog).
3210 3214
3211 3215 try:
3212 3216 skipflags = state.get(b'skipflags', 0)
3213 3217 if skipflags:
3214 3218 skipflags &= self.flags(rev)
3215 3219
3216 3220 _verify_revision(self, skipflags, state, node)
3217 3221
3218 3222 l1 = self.rawsize(rev)
3219 3223 l2 = len(self.rawdata(node))
3220 3224
3221 3225 if l1 != l2:
3222 3226 yield revlogproblem(
3223 3227 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3224 3228 node=node,
3225 3229 )
3226 3230
3227 3231 except error.CensoredNodeError:
3228 3232 if state[b'erroroncensored']:
3229 3233 yield revlogproblem(
3230 3234 error=_(b'censored file data'), node=node
3231 3235 )
3232 3236 state[b'skipread'].add(node)
3233 3237 except Exception as e:
3234 3238 yield revlogproblem(
3235 3239 error=_(b'unpacking %s: %s')
3236 3240 % (short(node), stringutil.forcebytestr(e)),
3237 3241 node=node,
3238 3242 )
3239 3243 state[b'skipread'].add(node)
3240 3244
3241 3245 def storageinfo(
3242 3246 self,
3243 3247 exclusivefiles=False,
3244 3248 sharedfiles=False,
3245 3249 revisionscount=False,
3246 3250 trackedsize=False,
3247 3251 storedsize=False,
3248 3252 ):
3249 3253 d = {}
3250 3254
3251 3255 if exclusivefiles:
3252 3256 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3253 3257 if not self._inline:
3254 3258 d[b'exclusivefiles'].append((self.opener, self._datafile))
3255 3259
3256 3260 if sharedfiles:
3257 3261 d[b'sharedfiles'] = []
3258 3262
3259 3263 if revisionscount:
3260 3264 d[b'revisionscount'] = len(self)
3261 3265
3262 3266 if trackedsize:
3263 3267 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3264 3268
3265 3269 if storedsize:
3266 3270 d[b'storedsize'] = sum(
3267 3271 self.opener.stat(path).st_size for path in self.files()
3268 3272 )
3269 3273
3270 3274 return d
3271 3275
3272 3276 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3273 3277 if not self.hassidedata:
3274 3278 return
3275 3279 # revlog formats with sidedata support does not support inline
3276 3280 assert not self._inline
3277 3281 if not helpers[1] and not helpers[2]:
3278 3282 # Nothing to generate or remove
3279 3283 return
3280 3284
3281 3285 new_entries = []
3282 3286 # append the new sidedata
3283 3287 with self._writing(transaction):
3284 3288 ifh, dfh, sdfh = self._writinghandles
3285 3289 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3286 3290
3287 3291 current_offset = sdfh.tell()
3288 3292 for rev in range(startrev, endrev + 1):
3289 3293 entry = self.index[rev]
3290 3294 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3291 3295 store=self,
3292 3296 sidedata_helpers=helpers,
3293 3297 sidedata={},
3294 3298 rev=rev,
3295 3299 )
3296 3300
3297 3301 serialized_sidedata = sidedatautil.serialize_sidedata(
3298 3302 new_sidedata
3299 3303 )
3300 3304
3301 3305 sidedata_compression_mode = COMP_MODE_INLINE
3302 3306 if serialized_sidedata and self.hassidedata:
3303 3307 sidedata_compression_mode = COMP_MODE_PLAIN
3304 3308 h, comp_sidedata = self.compress(serialized_sidedata)
3305 3309 if (
3306 3310 h != b'u'
3307 3311 and comp_sidedata[0] != b'\0'
3308 3312 and len(comp_sidedata) < len(serialized_sidedata)
3309 3313 ):
3310 3314 assert not h
3311 3315 if (
3312 3316 comp_sidedata[0]
3313 3317 == self._docket.default_compression_header
3314 3318 ):
3315 3319 sidedata_compression_mode = COMP_MODE_DEFAULT
3316 3320 serialized_sidedata = comp_sidedata
3317 3321 else:
3318 3322 sidedata_compression_mode = COMP_MODE_INLINE
3319 3323 serialized_sidedata = comp_sidedata
3320 3324 if entry[8] != 0 or entry[9] != 0:
3321 3325 # rewriting entries that already have sidedata is not
3322 3326 # supported yet, because it introduces garbage data in the
3323 3327 # revlog.
3324 3328 msg = b"rewriting existing sidedata is not supported yet"
3325 3329 raise error.Abort(msg)
3326 3330
3327 3331 # Apply (potential) flags to add and to remove after running
3328 3332 # the sidedata helpers
3329 3333 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3330 3334 entry_update = (
3331 3335 current_offset,
3332 3336 len(serialized_sidedata),
3333 3337 new_offset_flags,
3334 3338 sidedata_compression_mode,
3335 3339 )
3336 3340
3337 3341 # the sidedata computation might have move the file cursors around
3338 3342 sdfh.seek(current_offset, os.SEEK_SET)
3339 3343 sdfh.write(serialized_sidedata)
3340 3344 new_entries.append(entry_update)
3341 3345 current_offset += len(serialized_sidedata)
3342 3346 self._docket.sidedata_end = sdfh.tell()
3343 3347
3344 3348 # rewrite the new index entries
3345 3349 ifh.seek(startrev * self.index.entry_size)
3346 3350 for i, e in enumerate(new_entries):
3347 3351 rev = startrev + i
3348 3352 self.index.replace_sidedata_info(rev, *e)
3349 3353 packed = self.index.entry_binary(rev)
3350 3354 if rev == 0 and self._docket is None:
3351 3355 header = self._format_flags | self._format_version
3352 3356 header = self.index.pack_header(header)
3353 3357 packed = header + packed
3354 3358 ifh.write(packed)
@@ -1,1407 +1,1427 b''
1 1 # revlogdeltas.py - Logic around delta computation for revlog
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2018 Octobus <contact@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 """Helper class to compute deltas stored inside revlogs"""
9 9
10 10
11 11 import collections
12 12 import struct
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from ..node import nullrev
16 16 from ..i18n import _
17 17 from ..pycompat import getattr
18 18
19 19 from .constants import (
20 20 COMP_MODE_DEFAULT,
21 21 COMP_MODE_INLINE,
22 22 COMP_MODE_PLAIN,
23 23 KIND_CHANGELOG,
24 24 KIND_FILELOG,
25 25 KIND_MANIFESTLOG,
26 26 REVIDX_ISCENSORED,
27 27 REVIDX_RAWTEXT_CHANGING_FLAGS,
28 28 )
29 29
30 30 from ..thirdparty import attr
31 31
32 32 from .. import (
33 33 error,
34 34 mdiff,
35 35 util,
36 36 )
37 37
38 38 from . import flagutil
39 39
40 40 # maximum <delta-chain-data>/<revision-text-length> ratio
41 41 LIMIT_DELTA2TEXT = 2
42 42
43 43
44 44 class _testrevlog:
45 45 """minimalist fake revlog to use in doctests"""
46 46
47 47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
48 48 """data is an list of revision payload boundaries"""
49 49 self._data = data
50 50 self._srdensitythreshold = density
51 51 self._srmingapsize = mingap
52 52 self._snapshot = set(snapshot)
53 53 self.index = None
54 54
55 55 def start(self, rev):
56 56 if rev == nullrev:
57 57 return 0
58 58 if rev == 0:
59 59 return 0
60 60 return self._data[rev - 1]
61 61
62 62 def end(self, rev):
63 63 if rev == nullrev:
64 64 return 0
65 65 return self._data[rev]
66 66
67 67 def length(self, rev):
68 68 return self.end(rev) - self.start(rev)
69 69
70 70 def __len__(self):
71 71 return len(self._data)
72 72
73 73 def issnapshot(self, rev):
74 74 if rev == nullrev:
75 75 return True
76 76 return rev in self._snapshot
77 77
78 78
79 79 def slicechunk(revlog, revs, targetsize=None):
80 80 """slice revs to reduce the amount of unrelated data to be read from disk.
81 81
82 82 ``revs`` is sliced into groups that should be read in one time.
83 83 Assume that revs are sorted.
84 84
85 85 The initial chunk is sliced until the overall density (payload/chunks-span
86 86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
87 87 `revlog._srmingapsize` is skipped.
88 88
89 89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
90 90 For consistency with other slicing choice, this limit won't go lower than
91 91 `revlog._srmingapsize`.
92 92
93 93 If individual revisions chunk are larger than this limit, they will still
94 94 be raised individually.
95 95
96 96 >>> data = [
97 97 ... 5, #00 (5)
98 98 ... 10, #01 (5)
99 99 ... 12, #02 (2)
100 100 ... 12, #03 (empty)
101 101 ... 27, #04 (15)
102 102 ... 31, #05 (4)
103 103 ... 31, #06 (empty)
104 104 ... 42, #07 (11)
105 105 ... 47, #08 (5)
106 106 ... 47, #09 (empty)
107 107 ... 48, #10 (1)
108 108 ... 51, #11 (3)
109 109 ... 74, #12 (23)
110 110 ... 85, #13 (11)
111 111 ... 86, #14 (1)
112 112 ... 91, #15 (5)
113 113 ... ]
114 114 >>> revlog = _testrevlog(data, snapshot=range(16))
115 115
116 116 >>> list(slicechunk(revlog, list(range(16))))
117 117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
118 118 >>> list(slicechunk(revlog, [0, 15]))
119 119 [[0], [15]]
120 120 >>> list(slicechunk(revlog, [0, 11, 15]))
121 121 [[0], [11], [15]]
122 122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
123 123 [[0], [11, 13, 15]]
124 124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
125 125 [[1, 2], [5, 8, 10, 11], [14]]
126 126
127 127 Slicing with a maximum chunk size
128 128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
129 129 [[0], [11], [13], [15]]
130 130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
131 131 [[0], [11], [13, 15]]
132 132
133 133 Slicing involving nullrev
134 134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
135 135 [[-1, 0], [11], [13, 15]]
136 136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
137 137 [[-1], [13], [15]]
138 138 """
139 139 if targetsize is not None:
140 140 targetsize = max(targetsize, revlog._srmingapsize)
141 141 # targetsize should not be specified when evaluating delta candidates:
142 142 # * targetsize is used to ensure we stay within specification when reading,
143 143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
144 144 if densityslicing is None:
145 145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
146 146 for chunk in densityslicing(
147 147 revs, revlog._srdensitythreshold, revlog._srmingapsize
148 148 ):
149 149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
150 150 yield subchunk
151 151
152 152
153 153 def _slicechunktosize(revlog, revs, targetsize=None):
154 154 """slice revs to match the target size
155 155
156 156 This is intended to be used on chunk that density slicing selected by that
157 157 are still too large compared to the read garantee of revlog. This might
158 158 happens when "minimal gap size" interrupted the slicing or when chain are
159 159 built in a way that create large blocks next to each other.
160 160
161 161 >>> data = [
162 162 ... 3, #0 (3)
163 163 ... 5, #1 (2)
164 164 ... 6, #2 (1)
165 165 ... 8, #3 (2)
166 166 ... 8, #4 (empty)
167 167 ... 11, #5 (3)
168 168 ... 12, #6 (1)
169 169 ... 13, #7 (1)
170 170 ... 14, #8 (1)
171 171 ... ]
172 172
173 173 == All snapshots cases ==
174 174 >>> revlog = _testrevlog(data, snapshot=range(9))
175 175
176 176 Cases where chunk is already small enough
177 177 >>> list(_slicechunktosize(revlog, [0], 3))
178 178 [[0]]
179 179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
180 180 [[6, 7]]
181 181 >>> list(_slicechunktosize(revlog, [0], None))
182 182 [[0]]
183 183 >>> list(_slicechunktosize(revlog, [6, 7], None))
184 184 [[6, 7]]
185 185
186 186 cases where we need actual slicing
187 187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
188 188 [[0], [1]]
189 189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
190 190 [[1], [3]]
191 191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
192 192 [[1, 2], [3]]
193 193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
194 194 [[3], [5]]
195 195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
196 196 [[3], [5]]
197 197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
198 198 [[5], [6, 7, 8]]
199 199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
200 200 [[0], [1, 2], [3], [5], [6, 7, 8]]
201 201
202 202 Case with too large individual chunk (must return valid chunk)
203 203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
204 204 [[0], [1]]
205 205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
206 206 [[1], [3]]
207 207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
208 208 [[3], [5]]
209 209
210 210 == No Snapshot cases ==
211 211 >>> revlog = _testrevlog(data)
212 212
213 213 Cases where chunk is already small enough
214 214 >>> list(_slicechunktosize(revlog, [0], 3))
215 215 [[0]]
216 216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
217 217 [[6, 7]]
218 218 >>> list(_slicechunktosize(revlog, [0], None))
219 219 [[0]]
220 220 >>> list(_slicechunktosize(revlog, [6, 7], None))
221 221 [[6, 7]]
222 222
223 223 cases where we need actual slicing
224 224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
225 225 [[0], [1]]
226 226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
227 227 [[1], [3]]
228 228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
229 229 [[1], [2, 3]]
230 230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
231 231 [[3], [5]]
232 232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
233 233 [[3], [4, 5]]
234 234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
235 235 [[5], [6, 7, 8]]
236 236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
237 237 [[0], [1, 2], [3], [5], [6, 7, 8]]
238 238
239 239 Case with too large individual chunk (must return valid chunk)
240 240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
241 241 [[0], [1]]
242 242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
243 243 [[1], [3]]
244 244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
245 245 [[3], [5]]
246 246
247 247 == mixed case ==
248 248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
249 249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
250 250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
251 251 """
252 252 assert targetsize is None or 0 <= targetsize
253 253 startdata = revlog.start(revs[0])
254 254 enddata = revlog.end(revs[-1])
255 255 fullspan = enddata - startdata
256 256 if targetsize is None or fullspan <= targetsize:
257 257 yield revs
258 258 return
259 259
260 260 startrevidx = 0
261 261 endrevidx = 1
262 262 iterrevs = enumerate(revs)
263 263 next(iterrevs) # skip first rev.
264 264 # first step: get snapshots out of the way
265 265 for idx, r in iterrevs:
266 266 span = revlog.end(r) - startdata
267 267 snapshot = revlog.issnapshot(r)
268 268 if span <= targetsize and snapshot:
269 269 endrevidx = idx + 1
270 270 else:
271 271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
272 272 if chunk:
273 273 yield chunk
274 274 startrevidx = idx
275 275 startdata = revlog.start(r)
276 276 endrevidx = idx + 1
277 277 if not snapshot:
278 278 break
279 279
280 280 # for the others, we use binary slicing to quickly converge toward valid
281 281 # chunks (otherwise, we might end up looking for start/end of many
282 282 # revisions). This logic is not looking for the perfect slicing point, it
283 283 # focuses on quickly converging toward valid chunks.
284 284 nbitem = len(revs)
285 285 while (enddata - startdata) > targetsize:
286 286 endrevidx = nbitem
287 287 if nbitem - startrevidx <= 1:
288 288 break # protect against individual chunk larger than limit
289 289 localenddata = revlog.end(revs[endrevidx - 1])
290 290 span = localenddata - startdata
291 291 while span > targetsize:
292 292 if endrevidx - startrevidx <= 1:
293 293 break # protect against individual chunk larger than limit
294 294 endrevidx -= (endrevidx - startrevidx) // 2
295 295 localenddata = revlog.end(revs[endrevidx - 1])
296 296 span = localenddata - startdata
297 297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
298 298 if chunk:
299 299 yield chunk
300 300 startrevidx = endrevidx
301 301 startdata = revlog.start(revs[startrevidx])
302 302
303 303 chunk = _trimchunk(revlog, revs, startrevidx)
304 304 if chunk:
305 305 yield chunk
306 306
307 307
308 308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
309 309 """slice revs to reduce the amount of unrelated data to be read from disk.
310 310
311 311 ``revs`` is sliced into groups that should be read in one time.
312 312 Assume that revs are sorted.
313 313
314 314 The initial chunk is sliced until the overall density (payload/chunks-span
315 315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
316 316 skipped.
317 317
318 318 >>> revlog = _testrevlog([
319 319 ... 5, #00 (5)
320 320 ... 10, #01 (5)
321 321 ... 12, #02 (2)
322 322 ... 12, #03 (empty)
323 323 ... 27, #04 (15)
324 324 ... 31, #05 (4)
325 325 ... 31, #06 (empty)
326 326 ... 42, #07 (11)
327 327 ... 47, #08 (5)
328 328 ... 47, #09 (empty)
329 329 ... 48, #10 (1)
330 330 ... 51, #11 (3)
331 331 ... 74, #12 (23)
332 332 ... 85, #13 (11)
333 333 ... 86, #14 (1)
334 334 ... 91, #15 (5)
335 335 ... ])
336 336
337 337 >>> list(_slicechunktodensity(revlog, list(range(16))))
338 338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
339 339 >>> list(_slicechunktodensity(revlog, [0, 15]))
340 340 [[0], [15]]
341 341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
342 342 [[0], [11], [15]]
343 343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
344 344 [[0], [11, 13, 15]]
345 345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
346 346 [[1, 2], [5, 8, 10, 11], [14]]
347 347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
348 348 ... mingapsize=20))
349 349 [[1, 2, 3, 5, 8, 10, 11], [14]]
350 350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
351 351 ... targetdensity=0.95))
352 352 [[1, 2], [5], [8, 10, 11], [14]]
353 353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
354 354 ... targetdensity=0.95, mingapsize=12))
355 355 [[1, 2], [5, 8, 10, 11], [14]]
356 356 """
357 357 start = revlog.start
358 358 length = revlog.length
359 359
360 360 if len(revs) <= 1:
361 361 yield revs
362 362 return
363 363
364 364 deltachainspan = segmentspan(revlog, revs)
365 365
366 366 if deltachainspan < mingapsize:
367 367 yield revs
368 368 return
369 369
370 370 readdata = deltachainspan
371 371 chainpayload = sum(length(r) for r in revs)
372 372
373 373 if deltachainspan:
374 374 density = chainpayload / float(deltachainspan)
375 375 else:
376 376 density = 1.0
377 377
378 378 if density >= targetdensity:
379 379 yield revs
380 380 return
381 381
382 382 # Store the gaps in a heap to have them sorted by decreasing size
383 383 gaps = []
384 384 prevend = None
385 385 for i, rev in enumerate(revs):
386 386 revstart = start(rev)
387 387 revlen = length(rev)
388 388
389 389 # Skip empty revisions to form larger holes
390 390 if revlen == 0:
391 391 continue
392 392
393 393 if prevend is not None:
394 394 gapsize = revstart - prevend
395 395 # only consider holes that are large enough
396 396 if gapsize > mingapsize:
397 397 gaps.append((gapsize, i))
398 398
399 399 prevend = revstart + revlen
400 400 # sort the gaps to pop them from largest to small
401 401 gaps.sort()
402 402
403 403 # Collect the indices of the largest holes until the density is acceptable
404 404 selected = []
405 405 while gaps and density < targetdensity:
406 406 gapsize, gapidx = gaps.pop()
407 407
408 408 selected.append(gapidx)
409 409
410 410 # the gap sizes are stored as negatives to be sorted decreasingly
411 411 # by the heap
412 412 readdata -= gapsize
413 413 if readdata > 0:
414 414 density = chainpayload / float(readdata)
415 415 else:
416 416 density = 1.0
417 417 selected.sort()
418 418
419 419 # Cut the revs at collected indices
420 420 previdx = 0
421 421 for idx in selected:
422 422
423 423 chunk = _trimchunk(revlog, revs, previdx, idx)
424 424 if chunk:
425 425 yield chunk
426 426
427 427 previdx = idx
428 428
429 429 chunk = _trimchunk(revlog, revs, previdx)
430 430 if chunk:
431 431 yield chunk
432 432
433 433
434 434 def _trimchunk(revlog, revs, startidx, endidx=None):
435 435 """returns revs[startidx:endidx] without empty trailing revs
436 436
437 437 Doctest Setup
438 438 >>> revlog = _testrevlog([
439 439 ... 5, #0
440 440 ... 10, #1
441 441 ... 12, #2
442 442 ... 12, #3 (empty)
443 443 ... 17, #4
444 444 ... 21, #5
445 445 ... 21, #6 (empty)
446 446 ... ])
447 447
448 448 Contiguous cases:
449 449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
450 450 [0, 1, 2, 3, 4, 5]
451 451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
452 452 [0, 1, 2, 3, 4]
453 453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
454 454 [0, 1, 2]
455 455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
456 456 [2]
457 457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
458 458 [3, 4, 5]
459 459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
460 460 [3, 4]
461 461
462 462 Discontiguous cases:
463 463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
464 464 [1, 3, 5]
465 465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
466 466 [1]
467 467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
468 468 [3, 5]
469 469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
470 470 [3, 5]
471 471 """
472 472 length = revlog.length
473 473
474 474 if endidx is None:
475 475 endidx = len(revs)
476 476
477 477 # If we have a non-emtpy delta candidate, there are nothing to trim
478 478 if revs[endidx - 1] < len(revlog):
479 479 # Trim empty revs at the end, except the very first revision of a chain
480 480 while (
481 481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
482 482 ):
483 483 endidx -= 1
484 484
485 485 return revs[startidx:endidx]
486 486
487 487
488 488 def segmentspan(revlog, revs):
489 489 """Get the byte span of a segment of revisions
490 490
491 491 revs is a sorted array of revision numbers
492 492
493 493 >>> revlog = _testrevlog([
494 494 ... 5, #0
495 495 ... 10, #1
496 496 ... 12, #2
497 497 ... 12, #3 (empty)
498 498 ... 17, #4
499 499 ... ])
500 500
501 501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
502 502 17
503 503 >>> segmentspan(revlog, [0, 4])
504 504 17
505 505 >>> segmentspan(revlog, [3, 4])
506 506 5
507 507 >>> segmentspan(revlog, [1, 2, 3,])
508 508 7
509 509 >>> segmentspan(revlog, [1, 3])
510 510 7
511 511 """
512 512 if not revs:
513 513 return 0
514 514 end = revlog.end(revs[-1])
515 515 return end - revlog.start(revs[0])
516 516
517 517
518 518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
519 519 """build full text from a (base, delta) pair and other metadata"""
520 520 # special case deltas which replace entire base; no need to decode
521 521 # base revision. this neatly avoids censored bases, which throw when
522 522 # they're decoded.
523 523 hlen = struct.calcsize(b">lll")
524 524 if delta[:hlen] == mdiff.replacediffheader(
525 525 revlog.rawsize(baserev), len(delta) - hlen
526 526 ):
527 527 fulltext = delta[hlen:]
528 528 else:
529 529 # deltabase is rawtext before changed by flag processors, which is
530 530 # equivalent to non-raw text
531 531 basetext = revlog.revision(baserev, _df=fh)
532 532 fulltext = mdiff.patch(basetext, delta)
533 533
534 534 try:
535 535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
536 536 if validatehash:
537 537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
538 538 if flags & REVIDX_ISCENSORED:
539 539 raise error.StorageError(
540 540 _(b'node %s is not censored') % expectednode
541 541 )
542 542 except error.CensoredNodeError:
543 543 # must pass the censored index flag to add censored revisions
544 544 if not flags & REVIDX_ISCENSORED:
545 545 raise
546 546 return fulltext
547 547
548 548
549 549 @attr.s(slots=True, frozen=True)
550 550 class _deltainfo:
551 551 distance = attr.ib()
552 552 deltalen = attr.ib()
553 553 data = attr.ib()
554 554 base = attr.ib()
555 555 chainbase = attr.ib()
556 556 chainlen = attr.ib()
557 557 compresseddeltalen = attr.ib()
558 558 snapshotdepth = attr.ib()
559 559
560 560
561 561 def drop_u_compression(delta):
562 562 """turn into a "u" (no-compression) into no-compression without header
563 563
564 564 This is useful for revlog format that has better compression method.
565 565 """
566 566 assert delta.data[0] == b'u', delta.data[0]
567 567 return _deltainfo(
568 568 delta.distance,
569 569 delta.deltalen - 1,
570 570 (b'', delta.data[1]),
571 571 delta.base,
572 572 delta.chainbase,
573 573 delta.chainlen,
574 574 delta.compresseddeltalen,
575 575 delta.snapshotdepth,
576 576 )
577 577
578 578
579 579 def isgooddeltainfo(revlog, deltainfo, revinfo):
580 580 """Returns True if the given delta is good. Good means that it is within
581 581 the disk span, disk size, and chain length bounds that we know to be
582 582 performant."""
583 583 if deltainfo is None:
584 584 return False
585 585
586 586 # - 'deltainfo.distance' is the distance from the base revision --
587 587 # bounding it limits the amount of I/O we need to do.
588 588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
589 589 # deltas we need to apply -- bounding it limits the amount of CPU
590 590 # we consume.
591 591
592 592 textlen = revinfo.textlen
593 593 defaultmax = textlen * 4
594 594 maxdist = revlog._maxdeltachainspan
595 595 if not maxdist:
596 596 maxdist = deltainfo.distance # ensure the conditional pass
597 597 maxdist = max(maxdist, defaultmax)
598 598
599 599 # Bad delta from read span:
600 600 #
601 601 # If the span of data read is larger than the maximum allowed.
602 602 #
603 603 # In the sparse-revlog case, we rely on the associated "sparse reading"
604 604 # to avoid issue related to the span of data. In theory, it would be
605 605 # possible to build pathological revlog where delta pattern would lead
606 606 # to too many reads. However, they do not happen in practice at all. So
607 607 # we skip the span check entirely.
608 608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
609 609 return False
610 610
611 611 # Bad delta from new delta size:
612 612 #
613 613 # If the delta size is larger than the target text, storing the
614 614 # delta will be inefficient.
615 615 if textlen < deltainfo.deltalen:
616 616 return False
617 617
618 618 # Bad delta from cumulated payload size:
619 619 #
620 620 # If the sum of delta get larger than K * target text length.
621 621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
622 622 return False
623 623
624 624 # Bad delta from chain length:
625 625 #
626 626 # If the number of delta in the chain gets too high.
627 627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
628 628 return False
629 629
630 630 # bad delta from intermediate snapshot size limit
631 631 #
632 632 # If an intermediate snapshot size is higher than the limit. The
633 633 # limit exist to prevent endless chain of intermediate delta to be
634 634 # created.
635 635 if (
636 636 deltainfo.snapshotdepth is not None
637 637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
638 638 ):
639 639 return False
640 640
641 641 # bad delta if new intermediate snapshot is larger than the previous
642 642 # snapshot
643 643 if (
644 644 deltainfo.snapshotdepth
645 645 and revlog.length(deltainfo.base) < deltainfo.deltalen
646 646 ):
647 647 return False
648 648
649 649 return True
650 650
651 651
652 652 # If a revision's full text is that much bigger than a base candidate full
653 653 # text's, it is very unlikely that it will produce a valid delta. We no longer
654 654 # consider these candidates.
655 655 LIMIT_BASE2TEXT = 500
656 656
657 657
658 658 def _candidategroups(
659 659 revlog,
660 660 textlen,
661 661 p1,
662 662 p2,
663 663 cachedelta,
664 664 excluded_bases=None,
665 665 target_rev=None,
666 666 ):
667 667 """Provides group of revision to be tested as delta base
668 668
669 669 This top level function focus on emitting groups with unique and worthwhile
670 670 content. See _raw_candidate_groups for details about the group order.
671 671 """
672 672 # should we try to build a delta?
673 673 if not (len(revlog) and revlog._storedeltachains):
674 674 yield None
675 675 return
676 676
677 677 deltalength = revlog.length
678 678 deltaparent = revlog.deltaparent
679 679 sparse = revlog._sparserevlog
680 680 good = None
681 681
682 682 deltas_limit = textlen * LIMIT_DELTA2TEXT
683 group_chunk_size = revlog._candidate_group_chunk_size
683 684
684 685 tested = {nullrev}
685 686 candidates = _refinedgroups(
686 687 revlog,
687 688 p1,
688 689 p2,
689 690 cachedelta,
690 691 )
691 692 while True:
692 693 temptative = candidates.send(good)
693 694 if temptative is None:
694 695 break
695 696 group = []
696 697 for rev in temptative:
697 698 # skip over empty delta (no need to include them in a chain)
698 699 while revlog._generaldelta and not (
699 700 rev == nullrev or rev in tested or deltalength(rev)
700 701 ):
701 702 tested.add(rev)
702 703 rev = deltaparent(rev)
703 704 # no need to try a delta against nullrev, this will be done as a
704 705 # last resort.
705 706 if rev == nullrev:
706 707 continue
707 708 # filter out revision we tested already
708 709 if rev in tested:
709 710 continue
710 711 # an higher authority deamed the base unworthy (e.g. censored)
711 712 if excluded_bases is not None and rev in excluded_bases:
712 713 tested.add(rev)
713 714 continue
714 715 # We are in some recomputation cases and that rev is too high in
715 716 # the revlog
716 717 if target_rev is not None and rev >= target_rev:
717 718 tested.add(rev)
718 719 continue
719 720 # filter out delta base that will never produce good delta
720 721 if deltas_limit < revlog.length(rev):
721 722 tested.add(rev)
722 723 continue
723 724 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
724 725 tested.add(rev)
725 726 continue
726 727 # no delta for rawtext-changing revs (see "candelta" for why)
727 728 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
728 729 tested.add(rev)
729 730 continue
730 731
731 732 # If we reach here, we are about to build and test a delta.
732 733 # The delta building process will compute the chaininfo in all
733 734 # case, since that computation is cached, it is fine to access it
734 735 # here too.
735 736 chainlen, chainsize = revlog._chaininfo(rev)
736 737 # if chain will be too long, skip base
737 738 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
738 739 tested.add(rev)
739 740 continue
740 741 # if chain already have too much data, skip base
741 742 if deltas_limit < chainsize:
742 743 tested.add(rev)
743 744 continue
744 745 if sparse and revlog.upperboundcomp is not None:
745 746 maxcomp = revlog.upperboundcomp
746 747 basenotsnap = (p1, p2, nullrev)
747 748 if rev not in basenotsnap and revlog.issnapshot(rev):
748 749 snapshotdepth = revlog.snapshotdepth(rev)
749 750 # If text is significantly larger than the base, we can
750 751 # expect the resulting delta to be proportional to the size
751 752 # difference
752 753 revsize = revlog.rawsize(rev)
753 754 rawsizedistance = max(textlen - revsize, 0)
754 755 # use an estimate of the compression upper bound.
755 756 lowestrealisticdeltalen = rawsizedistance // maxcomp
756 757
757 758 # check the absolute constraint on the delta size
758 759 snapshotlimit = textlen >> snapshotdepth
759 760 if snapshotlimit < lowestrealisticdeltalen:
760 761 # delta lower bound is larger than accepted upper bound
761 762 tested.add(rev)
762 763 continue
763 764
764 765 # check the relative constraint on the delta size
765 766 revlength = revlog.length(rev)
766 767 if revlength < lowestrealisticdeltalen:
767 768 # delta probable lower bound is larger than target base
768 769 tested.add(rev)
769 770 continue
770 771
771 772 group.append(rev)
772 773 if group:
773 # XXX: in the sparse revlog case, group can become large,
774 # impacting performances. Some bounding or slicing mecanism
775 # would help to reduce this impact.
774 # When the size of the candidate group is big, it can result in a
775 # quite significant performance impact. To reduce this, we can send
776 # them in smaller batches until the new batch does not provide any
777 # improvements.
778 #
779 # This might reduce the overall efficiency of the compression in
780 # some corner cases, but that should also prevent very pathological
781 # cases from being an issue. (eg. 20 000 candidates).
782 #
783 # XXX note that the ordering of the group becomes important as it
784 # now impacts the final result. The current order is unprocessed
785 # and can be improved.
786 if group_chunk_size == 0:
776 787 tested.update(group)
777 788 good = yield tuple(group)
789 else:
790 prev_good = good
791 for start in range(0, len(group), group_chunk_size):
792 sub_group = group[start : start + group_chunk_size]
793 tested.update(sub_group)
794 good = yield tuple(sub_group)
795 if prev_good == good:
796 break
797
778 798 yield None
779 799
780 800
781 801 def _findsnapshots(revlog, cache, start_rev):
782 802 """find snapshot from start_rev to tip"""
783 803 if util.safehasattr(revlog.index, b'findsnapshots'):
784 804 revlog.index.findsnapshots(cache, start_rev)
785 805 else:
786 806 deltaparent = revlog.deltaparent
787 807 issnapshot = revlog.issnapshot
788 808 for rev in revlog.revs(start_rev):
789 809 if issnapshot(rev):
790 810 cache[deltaparent(rev)].append(rev)
791 811
792 812
793 813 def _refinedgroups(revlog, p1, p2, cachedelta):
794 814 good = None
795 815 # First we try to reuse a the delta contained in the bundle.
796 816 # (or from the source revlog)
797 817 #
798 818 # This logic only applies to general delta repositories and can be disabled
799 819 # through configuration. Disabling reuse source delta is useful when
800 820 # we want to make sure we recomputed "optimal" deltas.
801 821 debug_info = None
802 822 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
803 823 # Assume what we received from the server is a good choice
804 824 # build delta will reuse the cache
805 825 if debug_info is not None:
806 826 debug_info['cached-delta.tested'] += 1
807 827 good = yield (cachedelta[0],)
808 828 if good is not None:
809 829 if debug_info is not None:
810 830 debug_info['cached-delta.accepted'] += 1
811 831 yield None
812 832 return
813 833 # XXX cache me higher
814 834 snapshots = collections.defaultdict(list)
815 835 groups = _rawgroups(
816 836 revlog,
817 837 p1,
818 838 p2,
819 839 cachedelta,
820 840 snapshots,
821 841 )
822 842 for candidates in groups:
823 843 good = yield candidates
824 844 if good is not None:
825 845 break
826 846
827 847 # If sparse revlog is enabled, we can try to refine the available deltas
828 848 if not revlog._sparserevlog:
829 849 yield None
830 850 return
831 851
832 852 # if we have a refinable value, try to refine it
833 853 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
834 854 # refine snapshot down
835 855 previous = None
836 856 while previous != good:
837 857 previous = good
838 858 base = revlog.deltaparent(good)
839 859 if base == nullrev:
840 860 break
841 861 good = yield (base,)
842 862 # refine snapshot up
843 863 if not snapshots:
844 864 _findsnapshots(revlog, snapshots, good + 1)
845 865 previous = None
846 866 while good != previous:
847 867 previous = good
848 868 children = tuple(sorted(c for c in snapshots[good]))
849 869 good = yield children
850 870
851 871 if debug_info is not None:
852 872 if good is None:
853 873 debug_info['no-solution'] += 1
854 874
855 875 yield None
856 876
857 877
858 878 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
859 879 """Provides group of revision to be tested as delta base
860 880
861 881 This lower level function focus on emitting delta theorically interresting
862 882 without looking it any practical details.
863 883
864 884 The group order aims at providing fast or small candidates first.
865 885 """
866 886 gdelta = revlog._generaldelta
867 887 # gate sparse behind general-delta because of issue6056
868 888 sparse = gdelta and revlog._sparserevlog
869 889 curr = len(revlog)
870 890 prev = curr - 1
871 891 deltachain = lambda rev: revlog._deltachain(rev)[0]
872 892
873 893 if gdelta:
874 894 # exclude already lazy tested base if any
875 895 parents = [p for p in (p1, p2) if p != nullrev]
876 896
877 897 if not revlog._deltabothparents and len(parents) == 2:
878 898 parents.sort()
879 899 # To minimize the chance of having to build a fulltext,
880 900 # pick first whichever parent is closest to us (max rev)
881 901 yield (parents[1],)
882 902 # then the other one (min rev) if the first did not fit
883 903 yield (parents[0],)
884 904 elif len(parents) > 0:
885 905 # Test all parents (1 or 2), and keep the best candidate
886 906 yield parents
887 907
888 908 if sparse and parents:
889 909 if snapshots is None:
890 910 # map: base-rev: [snapshot-revs]
891 911 snapshots = collections.defaultdict(list)
892 912 # See if we can use an existing snapshot in the parent chains to use as
893 913 # a base for a new intermediate-snapshot
894 914 #
895 915 # search for snapshot in parents delta chain
896 916 # map: snapshot-level: snapshot-rev
897 917 parents_snaps = collections.defaultdict(set)
898 918 candidate_chains = [deltachain(p) for p in parents]
899 919 for chain in candidate_chains:
900 920 for idx, s in enumerate(chain):
901 921 if not revlog.issnapshot(s):
902 922 break
903 923 parents_snaps[idx].add(s)
904 924 snapfloor = min(parents_snaps[0]) + 1
905 925 _findsnapshots(revlog, snapshots, snapfloor)
906 926 # search for the highest "unrelated" revision
907 927 #
908 928 # Adding snapshots used by "unrelated" revision increase the odd we
909 929 # reuse an independant, yet better snapshot chain.
910 930 #
911 931 # XXX instead of building a set of revisions, we could lazily enumerate
912 932 # over the chains. That would be more efficient, however we stick to
913 933 # simple code for now.
914 934 all_revs = set()
915 935 for chain in candidate_chains:
916 936 all_revs.update(chain)
917 937 other = None
918 938 for r in revlog.revs(prev, snapfloor):
919 939 if r not in all_revs:
920 940 other = r
921 941 break
922 942 if other is not None:
923 943 # To avoid unfair competition, we won't use unrelated intermediate
924 944 # snapshot that are deeper than the ones from the parent delta
925 945 # chain.
926 946 max_depth = max(parents_snaps.keys())
927 947 chain = deltachain(other)
928 948 for depth, s in enumerate(chain):
929 949 if s < snapfloor:
930 950 continue
931 951 if max_depth < depth:
932 952 break
933 953 if not revlog.issnapshot(s):
934 954 break
935 955 parents_snaps[depth].add(s)
936 956 # Test them as possible intermediate snapshot base
937 957 # We test them from highest to lowest level. High level one are more
938 958 # likely to result in small delta
939 959 floor = None
940 960 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
941 961 siblings = set()
942 962 for s in snaps:
943 963 siblings.update(snapshots[s])
944 964 # Before considering making a new intermediate snapshot, we check
945 965 # if an existing snapshot, children of base we consider, would be
946 966 # suitable.
947 967 #
948 968 # It give a change to reuse a delta chain "unrelated" to the
949 969 # current revision instead of starting our own. Without such
950 970 # re-use, topological branches would keep reopening new chains.
951 971 # Creating more and more snapshot as the repository grow.
952 972
953 973 if floor is not None:
954 974 # We only do this for siblings created after the one in our
955 975 # parent's delta chain. Those created before has less chances
956 976 # to be valid base since our ancestors had to create a new
957 977 # snapshot.
958 978 siblings = [r for r in siblings if floor < r]
959 979 yield tuple(sorted(siblings))
960 980 # then test the base from our parent's delta chain.
961 981 yield tuple(sorted(snaps))
962 982 floor = min(snaps)
963 983 # No suitable base found in the parent chain, search if any full
964 984 # snapshots emitted since parent's base would be a suitable base for an
965 985 # intermediate snapshot.
966 986 #
967 987 # It give a chance to reuse a delta chain unrelated to the current
968 988 # revisions instead of starting our own. Without such re-use,
969 989 # topological branches would keep reopening new full chains. Creating
970 990 # more and more snapshot as the repository grow.
971 991 yield tuple(snapshots[nullrev])
972 992
973 993 if not sparse:
974 994 # other approach failed try against prev to hopefully save us a
975 995 # fulltext.
976 996 yield (prev,)
977 997
978 998
979 999 class deltacomputer:
980 1000 def __init__(
981 1001 self,
982 1002 revlog,
983 1003 write_debug=None,
984 1004 debug_search=False,
985 1005 debug_info=None,
986 1006 ):
987 1007 self.revlog = revlog
988 1008 self._write_debug = write_debug
989 1009 self._debug_search = debug_search
990 1010 self._debug_info = debug_info
991 1011
992 1012 def buildtext(self, revinfo, fh):
993 1013 """Builds a fulltext version of a revision
994 1014
995 1015 revinfo: revisioninfo instance that contains all needed info
996 1016 fh: file handle to either the .i or the .d revlog file,
997 1017 depending on whether it is inlined or not
998 1018 """
999 1019 btext = revinfo.btext
1000 1020 if btext[0] is not None:
1001 1021 return btext[0]
1002 1022
1003 1023 revlog = self.revlog
1004 1024 cachedelta = revinfo.cachedelta
1005 1025 baserev = cachedelta[0]
1006 1026 delta = cachedelta[1]
1007 1027
1008 1028 fulltext = btext[0] = _textfromdelta(
1009 1029 fh,
1010 1030 revlog,
1011 1031 baserev,
1012 1032 delta,
1013 1033 revinfo.p1,
1014 1034 revinfo.p2,
1015 1035 revinfo.flags,
1016 1036 revinfo.node,
1017 1037 )
1018 1038 return fulltext
1019 1039
1020 1040 def _builddeltadiff(self, base, revinfo, fh):
1021 1041 revlog = self.revlog
1022 1042 t = self.buildtext(revinfo, fh)
1023 1043 if revlog.iscensored(base):
1024 1044 # deltas based on a censored revision must replace the
1025 1045 # full content in one patch, so delta works everywhere
1026 1046 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
1027 1047 delta = header + t
1028 1048 else:
1029 1049 ptext = revlog.rawdata(base, _df=fh)
1030 1050 delta = mdiff.textdiff(ptext, t)
1031 1051
1032 1052 return delta
1033 1053
1034 1054 def _builddeltainfo(self, revinfo, base, fh):
1035 1055 # can we use the cached delta?
1036 1056 revlog = self.revlog
1037 1057 debug_search = self._write_debug is not None and self._debug_search
1038 1058 chainbase = revlog.chainbase(base)
1039 1059 if revlog._generaldelta:
1040 1060 deltabase = base
1041 1061 else:
1042 1062 deltabase = chainbase
1043 1063 snapshotdepth = None
1044 1064 if revlog._sparserevlog and deltabase == nullrev:
1045 1065 snapshotdepth = 0
1046 1066 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
1047 1067 # A delta chain should always be one full snapshot,
1048 1068 # zero or more semi-snapshots, and zero or more deltas
1049 1069 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
1050 1070 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
1051 1071 snapshotdepth = len(revlog._deltachain(deltabase)[0])
1052 1072 delta = None
1053 1073 if revinfo.cachedelta:
1054 1074 cachebase, cachediff = revinfo.cachedelta
1055 1075 # check if the diff still apply
1056 1076 currentbase = cachebase
1057 1077 while (
1058 1078 currentbase != nullrev
1059 1079 and currentbase != base
1060 1080 and self.revlog.length(currentbase) == 0
1061 1081 ):
1062 1082 currentbase = self.revlog.deltaparent(currentbase)
1063 1083 if self.revlog._lazydelta and currentbase == base:
1064 1084 delta = revinfo.cachedelta[1]
1065 1085 if delta is None:
1066 1086 delta = self._builddeltadiff(base, revinfo, fh)
1067 1087 if debug_search:
1068 1088 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1069 1089 msg %= len(delta)
1070 1090 self._write_debug(msg)
1071 1091 # snapshotdept need to be neither None nor 0 level snapshot
1072 1092 if revlog.upperboundcomp is not None and snapshotdepth:
1073 1093 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1074 1094 snapshotlimit = revinfo.textlen >> snapshotdepth
1075 1095 if debug_search:
1076 1096 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1077 1097 msg %= lowestrealisticdeltalen
1078 1098 self._write_debug(msg)
1079 1099 if snapshotlimit < lowestrealisticdeltalen:
1080 1100 if debug_search:
1081 1101 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1082 1102 self._write_debug(msg)
1083 1103 return None
1084 1104 if revlog.length(base) < lowestrealisticdeltalen:
1085 1105 if debug_search:
1086 1106 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1087 1107 self._write_debug(msg)
1088 1108 return None
1089 1109 header, data = revlog.compress(delta)
1090 1110 deltalen = len(header) + len(data)
1091 1111 offset = revlog.end(len(revlog) - 1)
1092 1112 dist = deltalen + offset - revlog.start(chainbase)
1093 1113 chainlen, compresseddeltalen = revlog._chaininfo(base)
1094 1114 chainlen += 1
1095 1115 compresseddeltalen += deltalen
1096 1116
1097 1117 return _deltainfo(
1098 1118 dist,
1099 1119 deltalen,
1100 1120 (header, data),
1101 1121 deltabase,
1102 1122 chainbase,
1103 1123 chainlen,
1104 1124 compresseddeltalen,
1105 1125 snapshotdepth,
1106 1126 )
1107 1127
1108 1128 def _fullsnapshotinfo(self, fh, revinfo, curr):
1109 1129 rawtext = self.buildtext(revinfo, fh)
1110 1130 data = self.revlog.compress(rawtext)
1111 1131 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1112 1132 deltabase = chainbase = curr
1113 1133 snapshotdepth = 0
1114 1134 chainlen = 1
1115 1135
1116 1136 return _deltainfo(
1117 1137 dist,
1118 1138 deltalen,
1119 1139 data,
1120 1140 deltabase,
1121 1141 chainbase,
1122 1142 chainlen,
1123 1143 compresseddeltalen,
1124 1144 snapshotdepth,
1125 1145 )
1126 1146
1127 1147 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1128 1148 """Find an acceptable delta against a candidate revision
1129 1149
1130 1150 revinfo: information about the revision (instance of _revisioninfo)
1131 1151 fh: file handle to either the .i or the .d revlog file,
1132 1152 depending on whether it is inlined or not
1133 1153
1134 1154 Returns the first acceptable candidate revision, as ordered by
1135 1155 _candidategroups
1136 1156
1137 1157 If no suitable deltabase is found, we return delta info for a full
1138 1158 snapshot.
1139 1159
1140 1160 `excluded_bases` is an optional set of revision that cannot be used as
1141 1161 a delta base. Use this to recompute delta suitable in censor or strip
1142 1162 context.
1143 1163 """
1144 1164 if target_rev is None:
1145 1165 target_rev = len(self.revlog)
1146 1166
1147 1167 if not revinfo.textlen:
1148 1168 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1149 1169
1150 1170 if excluded_bases is None:
1151 1171 excluded_bases = set()
1152 1172
1153 1173 # no delta for flag processor revision (see "candelta" for why)
1154 1174 # not calling candelta since only one revision needs test, also to
1155 1175 # avoid overhead fetching flags again.
1156 1176 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1157 1177 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1158 1178
1159 1179 gather_debug = (
1160 1180 self._write_debug is not None or self._debug_info is not None
1161 1181 )
1162 1182 debug_search = self._write_debug is not None and self._debug_search
1163 1183
1164 1184 if gather_debug:
1165 1185 start = util.timer()
1166 1186
1167 1187 # count the number of different delta we tried (for debug purpose)
1168 1188 dbg_try_count = 0
1169 1189 # count the number of "search round" we did. (for debug purpose)
1170 1190 dbg_try_rounds = 0
1171 1191 dbg_type = b'unknown'
1172 1192
1173 1193 cachedelta = revinfo.cachedelta
1174 1194 p1 = revinfo.p1
1175 1195 p2 = revinfo.p2
1176 1196 revlog = self.revlog
1177 1197
1178 1198 deltainfo = None
1179 1199 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1180 1200
1181 1201 if gather_debug:
1182 1202 if p1r != nullrev:
1183 1203 p1_chain_len = revlog._chaininfo(p1r)[0]
1184 1204 else:
1185 1205 p1_chain_len = -1
1186 1206 if p2r != nullrev:
1187 1207 p2_chain_len = revlog._chaininfo(p2r)[0]
1188 1208 else:
1189 1209 p2_chain_len = -1
1190 1210 if debug_search:
1191 1211 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1192 1212 msg %= target_rev
1193 1213 self._write_debug(msg)
1194 1214
1195 1215 groups = _candidategroups(
1196 1216 self.revlog,
1197 1217 revinfo.textlen,
1198 1218 p1r,
1199 1219 p2r,
1200 1220 cachedelta,
1201 1221 excluded_bases,
1202 1222 target_rev,
1203 1223 )
1204 1224 candidaterevs = next(groups)
1205 1225 while candidaterevs is not None:
1206 1226 dbg_try_rounds += 1
1207 1227 if debug_search:
1208 1228 prev = None
1209 1229 if deltainfo is not None:
1210 1230 prev = deltainfo.base
1211 1231
1212 1232 if (
1213 1233 cachedelta is not None
1214 1234 and len(candidaterevs) == 1
1215 1235 and cachedelta[0] in candidaterevs
1216 1236 ):
1217 1237 round_type = b"cached-delta"
1218 1238 elif p1 in candidaterevs or p2 in candidaterevs:
1219 1239 round_type = b"parents"
1220 1240 elif prev is not None and all(c < prev for c in candidaterevs):
1221 1241 round_type = b"refine-down"
1222 1242 elif prev is not None and all(c > prev for c in candidaterevs):
1223 1243 round_type = b"refine-up"
1224 1244 else:
1225 1245 round_type = b"search-down"
1226 1246 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1227 1247 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1228 1248 self._write_debug(msg)
1229 1249 nominateddeltas = []
1230 1250 if deltainfo is not None:
1231 1251 if debug_search:
1232 1252 msg = (
1233 1253 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1234 1254 )
1235 1255 msg %= (deltainfo.base, deltainfo.deltalen)
1236 1256 self._write_debug(msg)
1237 1257 # if we already found a good delta,
1238 1258 # challenge it against refined candidates
1239 1259 nominateddeltas.append(deltainfo)
1240 1260 for candidaterev in candidaterevs:
1241 1261 if debug_search:
1242 1262 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1243 1263 msg %= candidaterev
1244 1264 self._write_debug(msg)
1245 1265 candidate_type = None
1246 1266 if candidaterev == p1:
1247 1267 candidate_type = b"p1"
1248 1268 elif candidaterev == p2:
1249 1269 candidate_type = b"p2"
1250 1270 elif self.revlog.issnapshot(candidaterev):
1251 1271 candidate_type = b"snapshot-%d"
1252 1272 candidate_type %= self.revlog.snapshotdepth(
1253 1273 candidaterev
1254 1274 )
1255 1275
1256 1276 if candidate_type is not None:
1257 1277 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1258 1278 msg %= candidate_type
1259 1279 self._write_debug(msg)
1260 1280 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1261 1281 msg %= self.revlog.length(candidaterev)
1262 1282 self._write_debug(msg)
1263 1283 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1264 1284 msg %= self.revlog.deltaparent(candidaterev)
1265 1285 self._write_debug(msg)
1266 1286
1267 1287 dbg_try_count += 1
1268 1288
1269 1289 if debug_search:
1270 1290 delta_start = util.timer()
1271 1291 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1272 1292 if debug_search:
1273 1293 delta_end = util.timer()
1274 1294 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1275 1295 msg %= delta_end - delta_start
1276 1296 self._write_debug(msg)
1277 1297 if candidatedelta is not None:
1278 1298 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1279 1299 if debug_search:
1280 1300 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1281 1301 msg %= candidatedelta.deltalen
1282 1302 self._write_debug(msg)
1283 1303 nominateddeltas.append(candidatedelta)
1284 1304 elif debug_search:
1285 1305 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1286 1306 msg %= candidatedelta.deltalen
1287 1307 self._write_debug(msg)
1288 1308 elif debug_search:
1289 1309 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1290 1310 self._write_debug(msg)
1291 1311 if nominateddeltas:
1292 1312 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1293 1313 if deltainfo is not None:
1294 1314 candidaterevs = groups.send(deltainfo.base)
1295 1315 else:
1296 1316 candidaterevs = next(groups)
1297 1317
1298 1318 if deltainfo is None:
1299 1319 dbg_type = b"full"
1300 1320 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1301 1321 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1302 1322 dbg_type = b"snapshot"
1303 1323 else:
1304 1324 dbg_type = b"delta"
1305 1325
1306 1326 if gather_debug:
1307 1327 end = util.timer()
1308 1328 used_cached = (
1309 1329 cachedelta is not None
1310 1330 and dbg_try_rounds == 1
1311 1331 and dbg_try_count == 1
1312 1332 and deltainfo.base == cachedelta[0]
1313 1333 )
1314 1334 dbg = {
1315 1335 'duration': end - start,
1316 1336 'revision': target_rev,
1317 1337 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1318 1338 'search_round_count': dbg_try_rounds,
1319 1339 'using-cached-base': used_cached,
1320 1340 'delta_try_count': dbg_try_count,
1321 1341 'type': dbg_type,
1322 1342 'p1-chain-len': p1_chain_len,
1323 1343 'p2-chain-len': p2_chain_len,
1324 1344 }
1325 1345 if (
1326 1346 deltainfo.snapshotdepth # pytype: disable=attribute-error
1327 1347 is not None
1328 1348 ):
1329 1349 dbg[
1330 1350 'snapshot-depth'
1331 1351 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1332 1352 else:
1333 1353 dbg['snapshot-depth'] = 0
1334 1354 target_revlog = b"UNKNOWN"
1335 1355 target_type = self.revlog.target[0]
1336 1356 target_key = self.revlog.target[1]
1337 1357 if target_type == KIND_CHANGELOG:
1338 1358 target_revlog = b'CHANGELOG:'
1339 1359 elif target_type == KIND_MANIFESTLOG:
1340 1360 target_revlog = b'MANIFESTLOG:'
1341 1361 if target_key:
1342 1362 target_revlog += b'%s:' % target_key
1343 1363 elif target_type == KIND_FILELOG:
1344 1364 target_revlog = b'FILELOG:'
1345 1365 if target_key:
1346 1366 target_revlog += b'%s:' % target_key
1347 1367 dbg['target-revlog'] = target_revlog
1348 1368
1349 1369 if self._debug_info is not None:
1350 1370 self._debug_info.append(dbg)
1351 1371
1352 1372 if self._write_debug is not None:
1353 1373 msg = (
1354 1374 b"DBG-DELTAS:"
1355 1375 b" %-12s"
1356 1376 b" rev=%d:"
1357 1377 b" delta-base=%d"
1358 1378 b" is-cached=%d"
1359 1379 b" - search-rounds=%d"
1360 1380 b" try-count=%d"
1361 1381 b" - delta-type=%-6s"
1362 1382 b" snap-depth=%d"
1363 1383 b" - p1-chain-length=%d"
1364 1384 b" p2-chain-length=%d"
1365 1385 b" - duration=%f"
1366 1386 b"\n"
1367 1387 )
1368 1388 msg %= (
1369 1389 dbg["target-revlog"],
1370 1390 dbg["revision"],
1371 1391 dbg["delta-base"],
1372 1392 dbg["using-cached-base"],
1373 1393 dbg["search_round_count"],
1374 1394 dbg["delta_try_count"],
1375 1395 dbg["type"],
1376 1396 dbg["snapshot-depth"],
1377 1397 dbg["p1-chain-len"],
1378 1398 dbg["p2-chain-len"],
1379 1399 dbg["duration"],
1380 1400 )
1381 1401 self._write_debug(msg)
1382 1402 return deltainfo
1383 1403
1384 1404
1385 1405 def delta_compression(default_compression_header, deltainfo):
1386 1406 """return (COMPRESSION_MODE, deltainfo)
1387 1407
1388 1408 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1389 1409 compression.
1390 1410 """
1391 1411 h, d = deltainfo.data
1392 1412 compression_mode = COMP_MODE_INLINE
1393 1413 if not h and not d:
1394 1414 # not data to store at all... declare them uncompressed
1395 1415 compression_mode = COMP_MODE_PLAIN
1396 1416 elif not h:
1397 1417 t = d[0:1]
1398 1418 if t == b'\0':
1399 1419 compression_mode = COMP_MODE_PLAIN
1400 1420 elif t == default_compression_header:
1401 1421 compression_mode = COMP_MODE_DEFAULT
1402 1422 elif h == b'u':
1403 1423 # we have a more efficient way to declare uncompressed
1404 1424 h = b''
1405 1425 compression_mode = COMP_MODE_PLAIN
1406 1426 deltainfo = drop_u_compression(deltainfo)
1407 1427 return compression_mode, deltainfo
General Comments 0
You need to be logged in to leave comments. Login now