##// END OF EJS Templates
persistent-nodemap: add a "warn" option to the slow-path config...
marmoute -
r47028:fc2d5c0a default
parent child Browse files
Show More
@@ -1,2595 +1,2595 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'debug',
574 574 b'dirstate.delaywrite',
575 575 default=0,
576 576 )
577 577 coreconfigitem(
578 578 b'defaults',
579 579 b'.*',
580 580 default=None,
581 581 generic=True,
582 582 )
583 583 coreconfigitem(
584 584 b'devel',
585 585 b'all-warnings',
586 586 default=False,
587 587 )
588 588 coreconfigitem(
589 589 b'devel',
590 590 b'bundle2.debug',
591 591 default=False,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'bundle.delta',
596 596 default=b'',
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'cache-vfs',
601 601 default=None,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'check-locks',
606 606 default=False,
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'check-relroot',
611 611 default=False,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'default-date',
616 616 default=None,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'deprec-warn',
621 621 default=False,
622 622 )
623 623 coreconfigitem(
624 624 b'devel',
625 625 b'disableloaddefaultcerts',
626 626 default=False,
627 627 )
628 628 coreconfigitem(
629 629 b'devel',
630 630 b'warn-empty-changegroup',
631 631 default=False,
632 632 )
633 633 coreconfigitem(
634 634 b'devel',
635 635 b'legacy.exchange',
636 636 default=list,
637 637 )
638 638 # When True, revlogs use a special reference version of the nodemap, that is not
639 639 # performant but is "known" to behave properly.
640 640 coreconfigitem(
641 641 b'devel',
642 642 b'persistent-nodemap',
643 643 default=False,
644 644 )
645 645 coreconfigitem(
646 646 b'devel',
647 647 b'servercafile',
648 648 default=b'',
649 649 )
650 650 coreconfigitem(
651 651 b'devel',
652 652 b'serverexactprotocol',
653 653 default=b'',
654 654 )
655 655 coreconfigitem(
656 656 b'devel',
657 657 b'serverrequirecert',
658 658 default=False,
659 659 )
660 660 coreconfigitem(
661 661 b'devel',
662 662 b'strip-obsmarkers',
663 663 default=True,
664 664 )
665 665 coreconfigitem(
666 666 b'devel',
667 667 b'warn-config',
668 668 default=None,
669 669 )
670 670 coreconfigitem(
671 671 b'devel',
672 672 b'warn-config-default',
673 673 default=None,
674 674 )
675 675 coreconfigitem(
676 676 b'devel',
677 677 b'user.obsmarker',
678 678 default=None,
679 679 )
680 680 coreconfigitem(
681 681 b'devel',
682 682 b'warn-config-unknown',
683 683 default=None,
684 684 )
685 685 coreconfigitem(
686 686 b'devel',
687 687 b'debug.copies',
688 688 default=False,
689 689 )
690 690 coreconfigitem(
691 691 b'devel',
692 692 b'debug.extensions',
693 693 default=False,
694 694 )
695 695 coreconfigitem(
696 696 b'devel',
697 697 b'debug.repo-filters',
698 698 default=False,
699 699 )
700 700 coreconfigitem(
701 701 b'devel',
702 702 b'debug.peer-request',
703 703 default=False,
704 704 )
705 705 # If discovery.grow-sample is False, the sample size used in set discovery will
706 706 # not be increased through the process
707 707 coreconfigitem(
708 708 b'devel',
709 709 b'discovery.grow-sample',
710 710 default=True,
711 711 )
712 712 # discovery.grow-sample.rate control the rate at which the sample grow
713 713 coreconfigitem(
714 714 b'devel',
715 715 b'discovery.grow-sample.rate',
716 716 default=1.05,
717 717 )
718 718 # If discovery.randomize is False, random sampling during discovery are
719 719 # deterministic. It is meant for integration tests.
720 720 coreconfigitem(
721 721 b'devel',
722 722 b'discovery.randomize',
723 723 default=True,
724 724 )
725 725 _registerdiffopts(section=b'diff')
726 726 coreconfigitem(
727 727 b'email',
728 728 b'bcc',
729 729 default=None,
730 730 )
731 731 coreconfigitem(
732 732 b'email',
733 733 b'cc',
734 734 default=None,
735 735 )
736 736 coreconfigitem(
737 737 b'email',
738 738 b'charsets',
739 739 default=list,
740 740 )
741 741 coreconfigitem(
742 742 b'email',
743 743 b'from',
744 744 default=None,
745 745 )
746 746 coreconfigitem(
747 747 b'email',
748 748 b'method',
749 749 default=b'smtp',
750 750 )
751 751 coreconfigitem(
752 752 b'email',
753 753 b'reply-to',
754 754 default=None,
755 755 )
756 756 coreconfigitem(
757 757 b'email',
758 758 b'to',
759 759 default=None,
760 760 )
761 761 coreconfigitem(
762 762 b'experimental',
763 763 b'archivemetatemplate',
764 764 default=dynamicdefault,
765 765 )
766 766 coreconfigitem(
767 767 b'experimental',
768 768 b'auto-publish',
769 769 default=b'publish',
770 770 )
771 771 coreconfigitem(
772 772 b'experimental',
773 773 b'bundle-phases',
774 774 default=False,
775 775 )
776 776 coreconfigitem(
777 777 b'experimental',
778 778 b'bundle2-advertise',
779 779 default=True,
780 780 )
781 781 coreconfigitem(
782 782 b'experimental',
783 783 b'bundle2-output-capture',
784 784 default=False,
785 785 )
786 786 coreconfigitem(
787 787 b'experimental',
788 788 b'bundle2.pushback',
789 789 default=False,
790 790 )
791 791 coreconfigitem(
792 792 b'experimental',
793 793 b'bundle2lazylocking',
794 794 default=False,
795 795 )
796 796 coreconfigitem(
797 797 b'experimental',
798 798 b'bundlecomplevel',
799 799 default=None,
800 800 )
801 801 coreconfigitem(
802 802 b'experimental',
803 803 b'bundlecomplevel.bzip2',
804 804 default=None,
805 805 )
806 806 coreconfigitem(
807 807 b'experimental',
808 808 b'bundlecomplevel.gzip',
809 809 default=None,
810 810 )
811 811 coreconfigitem(
812 812 b'experimental',
813 813 b'bundlecomplevel.none',
814 814 default=None,
815 815 )
816 816 coreconfigitem(
817 817 b'experimental',
818 818 b'bundlecomplevel.zstd',
819 819 default=None,
820 820 )
821 821 coreconfigitem(
822 822 b'experimental',
823 823 b'changegroup3',
824 824 default=False,
825 825 )
826 826 coreconfigitem(
827 827 b'experimental',
828 828 b'cleanup-as-archived',
829 829 default=False,
830 830 )
831 831 coreconfigitem(
832 832 b'experimental',
833 833 b'clientcompressionengines',
834 834 default=list,
835 835 )
836 836 coreconfigitem(
837 837 b'experimental',
838 838 b'copytrace',
839 839 default=b'on',
840 840 )
841 841 coreconfigitem(
842 842 b'experimental',
843 843 b'copytrace.movecandidateslimit',
844 844 default=100,
845 845 )
846 846 coreconfigitem(
847 847 b'experimental',
848 848 b'copytrace.sourcecommitlimit',
849 849 default=100,
850 850 )
851 851 coreconfigitem(
852 852 b'experimental',
853 853 b'copies.read-from',
854 854 default=b"filelog-only",
855 855 )
856 856 coreconfigitem(
857 857 b'experimental',
858 858 b'copies.write-to',
859 859 default=b'filelog-only',
860 860 )
861 861 coreconfigitem(
862 862 b'experimental',
863 863 b'crecordtest',
864 864 default=None,
865 865 )
866 866 coreconfigitem(
867 867 b'experimental',
868 868 b'directaccess',
869 869 default=False,
870 870 )
871 871 coreconfigitem(
872 872 b'experimental',
873 873 b'directaccess.revnums',
874 874 default=False,
875 875 )
876 876 coreconfigitem(
877 877 b'experimental',
878 878 b'editortmpinhg',
879 879 default=False,
880 880 )
881 881 coreconfigitem(
882 882 b'experimental',
883 883 b'evolution',
884 884 default=list,
885 885 )
886 886 coreconfigitem(
887 887 b'experimental',
888 888 b'evolution.allowdivergence',
889 889 default=False,
890 890 alias=[(b'experimental', b'allowdivergence')],
891 891 )
892 892 coreconfigitem(
893 893 b'experimental',
894 894 b'evolution.allowunstable',
895 895 default=None,
896 896 )
897 897 coreconfigitem(
898 898 b'experimental',
899 899 b'evolution.createmarkers',
900 900 default=None,
901 901 )
902 902 coreconfigitem(
903 903 b'experimental',
904 904 b'evolution.effect-flags',
905 905 default=True,
906 906 alias=[(b'experimental', b'effect-flags')],
907 907 )
908 908 coreconfigitem(
909 909 b'experimental',
910 910 b'evolution.exchange',
911 911 default=None,
912 912 )
913 913 coreconfigitem(
914 914 b'experimental',
915 915 b'evolution.bundle-obsmarker',
916 916 default=False,
917 917 )
918 918 coreconfigitem(
919 919 b'experimental',
920 920 b'evolution.bundle-obsmarker:mandatory',
921 921 default=True,
922 922 )
923 923 coreconfigitem(
924 924 b'experimental',
925 925 b'log.topo',
926 926 default=False,
927 927 )
928 928 coreconfigitem(
929 929 b'experimental',
930 930 b'evolution.report-instabilities',
931 931 default=True,
932 932 )
933 933 coreconfigitem(
934 934 b'experimental',
935 935 b'evolution.track-operation',
936 936 default=True,
937 937 )
938 938 # repo-level config to exclude a revset visibility
939 939 #
940 940 # The target use case is to use `share` to expose different subset of the same
941 941 # repository, especially server side. See also `server.view`.
942 942 coreconfigitem(
943 943 b'experimental',
944 944 b'extra-filter-revs',
945 945 default=None,
946 946 )
947 947 coreconfigitem(
948 948 b'experimental',
949 949 b'maxdeltachainspan',
950 950 default=-1,
951 951 )
952 952 # tracks files which were undeleted (merge might delete them but we explicitly
953 953 # kept/undeleted them) and creates new filenodes for them
954 954 coreconfigitem(
955 955 b'experimental',
956 956 b'merge-track-salvaged',
957 957 default=False,
958 958 )
959 959 coreconfigitem(
960 960 b'experimental',
961 961 b'mergetempdirprefix',
962 962 default=None,
963 963 )
964 964 coreconfigitem(
965 965 b'experimental',
966 966 b'mmapindexthreshold',
967 967 default=None,
968 968 )
969 969 coreconfigitem(
970 970 b'experimental',
971 971 b'narrow',
972 972 default=False,
973 973 )
974 974 coreconfigitem(
975 975 b'experimental',
976 976 b'nonnormalparanoidcheck',
977 977 default=False,
978 978 )
979 979 coreconfigitem(
980 980 b'experimental',
981 981 b'exportableenviron',
982 982 default=list,
983 983 )
984 984 coreconfigitem(
985 985 b'experimental',
986 986 b'extendedheader.index',
987 987 default=None,
988 988 )
989 989 coreconfigitem(
990 990 b'experimental',
991 991 b'extendedheader.similarity',
992 992 default=False,
993 993 )
994 994 coreconfigitem(
995 995 b'experimental',
996 996 b'graphshorten',
997 997 default=False,
998 998 )
999 999 coreconfigitem(
1000 1000 b'experimental',
1001 1001 b'graphstyle.parent',
1002 1002 default=dynamicdefault,
1003 1003 )
1004 1004 coreconfigitem(
1005 1005 b'experimental',
1006 1006 b'graphstyle.missing',
1007 1007 default=dynamicdefault,
1008 1008 )
1009 1009 coreconfigitem(
1010 1010 b'experimental',
1011 1011 b'graphstyle.grandparent',
1012 1012 default=dynamicdefault,
1013 1013 )
1014 1014 coreconfigitem(
1015 1015 b'experimental',
1016 1016 b'hook-track-tags',
1017 1017 default=False,
1018 1018 )
1019 1019 coreconfigitem(
1020 1020 b'experimental',
1021 1021 b'httppeer.advertise-v2',
1022 1022 default=False,
1023 1023 )
1024 1024 coreconfigitem(
1025 1025 b'experimental',
1026 1026 b'httppeer.v2-encoder-order',
1027 1027 default=None,
1028 1028 )
1029 1029 coreconfigitem(
1030 1030 b'experimental',
1031 1031 b'httppostargs',
1032 1032 default=False,
1033 1033 )
1034 1034 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1035 1035 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1036 1036
1037 1037 coreconfigitem(
1038 1038 b'experimental',
1039 1039 b'obsmarkers-exchange-debug',
1040 1040 default=False,
1041 1041 )
1042 1042 coreconfigitem(
1043 1043 b'experimental',
1044 1044 b'remotenames',
1045 1045 default=False,
1046 1046 )
1047 1047 coreconfigitem(
1048 1048 b'experimental',
1049 1049 b'removeemptydirs',
1050 1050 default=True,
1051 1051 )
1052 1052 coreconfigitem(
1053 1053 b'experimental',
1054 1054 b'revert.interactive.select-to-keep',
1055 1055 default=False,
1056 1056 )
1057 1057 coreconfigitem(
1058 1058 b'experimental',
1059 1059 b'revisions.prefixhexnode',
1060 1060 default=False,
1061 1061 )
1062 1062 coreconfigitem(
1063 1063 b'experimental',
1064 1064 b'revlogv2',
1065 1065 default=None,
1066 1066 )
1067 1067 coreconfigitem(
1068 1068 b'experimental',
1069 1069 b'revisions.disambiguatewithin',
1070 1070 default=None,
1071 1071 )
1072 1072 coreconfigitem(
1073 1073 b'experimental',
1074 1074 b'rust.index',
1075 1075 default=False,
1076 1076 )
1077 1077 coreconfigitem(
1078 1078 b'experimental',
1079 1079 b'server.filesdata.recommended-batch-size',
1080 1080 default=50000,
1081 1081 )
1082 1082 coreconfigitem(
1083 1083 b'experimental',
1084 1084 b'server.manifestdata.recommended-batch-size',
1085 1085 default=100000,
1086 1086 )
1087 1087 coreconfigitem(
1088 1088 b'experimental',
1089 1089 b'server.stream-narrow-clones',
1090 1090 default=False,
1091 1091 )
1092 1092 coreconfigitem(
1093 1093 b'experimental',
1094 1094 b'sharesafe-auto-downgrade-shares',
1095 1095 default=False,
1096 1096 )
1097 1097 coreconfigitem(
1098 1098 b'experimental',
1099 1099 b'sharesafe-auto-upgrade-shares',
1100 1100 default=False,
1101 1101 )
1102 1102 coreconfigitem(
1103 1103 b'experimental',
1104 1104 b'sharesafe-auto-upgrade-fail-error',
1105 1105 default=False,
1106 1106 )
1107 1107 coreconfigitem(
1108 1108 b'experimental',
1109 1109 b'sharesafe-warn-outdated-shares',
1110 1110 default=True,
1111 1111 )
1112 1112 coreconfigitem(
1113 1113 b'experimental',
1114 1114 b'single-head-per-branch',
1115 1115 default=False,
1116 1116 )
1117 1117 coreconfigitem(
1118 1118 b'experimental',
1119 1119 b'single-head-per-branch:account-closed-heads',
1120 1120 default=False,
1121 1121 )
1122 1122 coreconfigitem(
1123 1123 b'experimental',
1124 1124 b'single-head-per-branch:public-changes-only',
1125 1125 default=False,
1126 1126 )
1127 1127 coreconfigitem(
1128 1128 b'experimental',
1129 1129 b'sshserver.support-v2',
1130 1130 default=False,
1131 1131 )
1132 1132 coreconfigitem(
1133 1133 b'experimental',
1134 1134 b'sparse-read',
1135 1135 default=False,
1136 1136 )
1137 1137 coreconfigitem(
1138 1138 b'experimental',
1139 1139 b'sparse-read.density-threshold',
1140 1140 default=0.50,
1141 1141 )
1142 1142 coreconfigitem(
1143 1143 b'experimental',
1144 1144 b'sparse-read.min-gap-size',
1145 1145 default=b'65K',
1146 1146 )
1147 1147 coreconfigitem(
1148 1148 b'experimental',
1149 1149 b'treemanifest',
1150 1150 default=False,
1151 1151 )
1152 1152 coreconfigitem(
1153 1153 b'experimental',
1154 1154 b'update.atomic-file',
1155 1155 default=False,
1156 1156 )
1157 1157 coreconfigitem(
1158 1158 b'experimental',
1159 1159 b'sshpeer.advertise-v2',
1160 1160 default=False,
1161 1161 )
1162 1162 coreconfigitem(
1163 1163 b'experimental',
1164 1164 b'web.apiserver',
1165 1165 default=False,
1166 1166 )
1167 1167 coreconfigitem(
1168 1168 b'experimental',
1169 1169 b'web.api.http-v2',
1170 1170 default=False,
1171 1171 )
1172 1172 coreconfigitem(
1173 1173 b'experimental',
1174 1174 b'web.api.debugreflect',
1175 1175 default=False,
1176 1176 )
1177 1177 coreconfigitem(
1178 1178 b'experimental',
1179 1179 b'worker.wdir-get-thread-safe',
1180 1180 default=False,
1181 1181 )
1182 1182 coreconfigitem(
1183 1183 b'experimental',
1184 1184 b'worker.repository-upgrade',
1185 1185 default=False,
1186 1186 )
1187 1187 coreconfigitem(
1188 1188 b'experimental',
1189 1189 b'xdiff',
1190 1190 default=False,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'extensions',
1194 1194 b'.*',
1195 1195 default=None,
1196 1196 generic=True,
1197 1197 )
1198 1198 coreconfigitem(
1199 1199 b'extdata',
1200 1200 b'.*',
1201 1201 default=None,
1202 1202 generic=True,
1203 1203 )
1204 1204 coreconfigitem(
1205 1205 b'format',
1206 1206 b'bookmarks-in-store',
1207 1207 default=False,
1208 1208 )
1209 1209 coreconfigitem(
1210 1210 b'format',
1211 1211 b'chunkcachesize',
1212 1212 default=None,
1213 1213 experimental=True,
1214 1214 )
1215 1215 coreconfigitem(
1216 1216 b'format',
1217 1217 b'dotencode',
1218 1218 default=True,
1219 1219 )
1220 1220 coreconfigitem(
1221 1221 b'format',
1222 1222 b'generaldelta',
1223 1223 default=False,
1224 1224 experimental=True,
1225 1225 )
1226 1226 coreconfigitem(
1227 1227 b'format',
1228 1228 b'manifestcachesize',
1229 1229 default=None,
1230 1230 experimental=True,
1231 1231 )
1232 1232 coreconfigitem(
1233 1233 b'format',
1234 1234 b'maxchainlen',
1235 1235 default=dynamicdefault,
1236 1236 experimental=True,
1237 1237 )
1238 1238 coreconfigitem(
1239 1239 b'format',
1240 1240 b'obsstore-version',
1241 1241 default=None,
1242 1242 )
1243 1243 coreconfigitem(
1244 1244 b'format',
1245 1245 b'sparse-revlog',
1246 1246 default=True,
1247 1247 )
1248 1248 coreconfigitem(
1249 1249 b'format',
1250 1250 b'revlog-compression',
1251 1251 default=lambda: [b'zlib'],
1252 1252 alias=[(b'experimental', b'format.compression')],
1253 1253 )
1254 1254 coreconfigitem(
1255 1255 b'format',
1256 1256 b'usefncache',
1257 1257 default=True,
1258 1258 )
1259 1259 coreconfigitem(
1260 1260 b'format',
1261 1261 b'usegeneraldelta',
1262 1262 default=True,
1263 1263 )
1264 1264 coreconfigitem(
1265 1265 b'format',
1266 1266 b'usestore',
1267 1267 default=True,
1268 1268 )
1269 1269 # Right now, the only efficient implement of the nodemap logic is in Rust,
1270 1270 #
1271 1271 # The case was discussed that the 5.6 sprint and the following was decided for
1272 1272 # feature that have an optional fast implementation (and are a performance
1273 1273 # regression in the others)
1274 1274 #
1275 1275 # * If the fast implementation is not available, Mercurial will refuse to
1276 1276 # access repository that requires it. Pointing to proper documentation
1277 1277 #
1278 1278 # * An option exist to lift that limitation and allow repository access.
1279 1279 #
1280 1280 # Such access will emit a warning unless configured not to.
1281 1281 #
1282 1282 # * When sufficiently mature, the feature can be enabled by default only for
1283 1283 # installation that supports it.
1284 1284 coreconfigitem(
1285 1285 b'format', b'use-persistent-nodemap', default=False, experimental=True
1286 1286 )
1287 1287 coreconfigitem(
1288 1288 b'format',
1289 1289 b'exp-use-copies-side-data-changeset',
1290 1290 default=False,
1291 1291 experimental=True,
1292 1292 )
1293 1293 coreconfigitem(
1294 1294 b'format',
1295 1295 b'exp-use-side-data',
1296 1296 default=False,
1297 1297 experimental=True,
1298 1298 )
1299 1299 coreconfigitem(
1300 1300 b'format',
1301 1301 b'exp-share-safe',
1302 1302 default=False,
1303 1303 experimental=True,
1304 1304 )
1305 1305 coreconfigitem(
1306 1306 b'format',
1307 1307 b'internal-phase',
1308 1308 default=False,
1309 1309 experimental=True,
1310 1310 )
1311 1311 coreconfigitem(
1312 1312 b'fsmonitor',
1313 1313 b'warn_when_unused',
1314 1314 default=True,
1315 1315 )
1316 1316 coreconfigitem(
1317 1317 b'fsmonitor',
1318 1318 b'warn_update_file_count',
1319 1319 default=50000,
1320 1320 )
1321 1321 coreconfigitem(
1322 1322 b'fsmonitor',
1323 1323 b'warn_update_file_count_rust',
1324 1324 default=400000,
1325 1325 )
1326 1326 coreconfigitem(
1327 1327 b'help',
1328 1328 br'hidden-command\..*',
1329 1329 default=False,
1330 1330 generic=True,
1331 1331 )
1332 1332 coreconfigitem(
1333 1333 b'help',
1334 1334 br'hidden-topic\..*',
1335 1335 default=False,
1336 1336 generic=True,
1337 1337 )
1338 1338 coreconfigitem(
1339 1339 b'hooks',
1340 1340 b'.*',
1341 1341 default=dynamicdefault,
1342 1342 generic=True,
1343 1343 )
1344 1344 coreconfigitem(
1345 1345 b'hgweb-paths',
1346 1346 b'.*',
1347 1347 default=list,
1348 1348 generic=True,
1349 1349 )
1350 1350 coreconfigitem(
1351 1351 b'hostfingerprints',
1352 1352 b'.*',
1353 1353 default=list,
1354 1354 generic=True,
1355 1355 )
1356 1356 coreconfigitem(
1357 1357 b'hostsecurity',
1358 1358 b'ciphers',
1359 1359 default=None,
1360 1360 )
1361 1361 coreconfigitem(
1362 1362 b'hostsecurity',
1363 1363 b'minimumprotocol',
1364 1364 default=dynamicdefault,
1365 1365 )
1366 1366 coreconfigitem(
1367 1367 b'hostsecurity',
1368 1368 b'.*:minimumprotocol$',
1369 1369 default=dynamicdefault,
1370 1370 generic=True,
1371 1371 )
1372 1372 coreconfigitem(
1373 1373 b'hostsecurity',
1374 1374 b'.*:ciphers$',
1375 1375 default=dynamicdefault,
1376 1376 generic=True,
1377 1377 )
1378 1378 coreconfigitem(
1379 1379 b'hostsecurity',
1380 1380 b'.*:fingerprints$',
1381 1381 default=list,
1382 1382 generic=True,
1383 1383 )
1384 1384 coreconfigitem(
1385 1385 b'hostsecurity',
1386 1386 b'.*:verifycertsfile$',
1387 1387 default=None,
1388 1388 generic=True,
1389 1389 )
1390 1390
1391 1391 coreconfigitem(
1392 1392 b'http_proxy',
1393 1393 b'always',
1394 1394 default=False,
1395 1395 )
1396 1396 coreconfigitem(
1397 1397 b'http_proxy',
1398 1398 b'host',
1399 1399 default=None,
1400 1400 )
1401 1401 coreconfigitem(
1402 1402 b'http_proxy',
1403 1403 b'no',
1404 1404 default=list,
1405 1405 )
1406 1406 coreconfigitem(
1407 1407 b'http_proxy',
1408 1408 b'passwd',
1409 1409 default=None,
1410 1410 )
1411 1411 coreconfigitem(
1412 1412 b'http_proxy',
1413 1413 b'user',
1414 1414 default=None,
1415 1415 )
1416 1416
1417 1417 coreconfigitem(
1418 1418 b'http',
1419 1419 b'timeout',
1420 1420 default=None,
1421 1421 )
1422 1422
1423 1423 coreconfigitem(
1424 1424 b'logtoprocess',
1425 1425 b'commandexception',
1426 1426 default=None,
1427 1427 )
1428 1428 coreconfigitem(
1429 1429 b'logtoprocess',
1430 1430 b'commandfinish',
1431 1431 default=None,
1432 1432 )
1433 1433 coreconfigitem(
1434 1434 b'logtoprocess',
1435 1435 b'command',
1436 1436 default=None,
1437 1437 )
1438 1438 coreconfigitem(
1439 1439 b'logtoprocess',
1440 1440 b'develwarn',
1441 1441 default=None,
1442 1442 )
1443 1443 coreconfigitem(
1444 1444 b'logtoprocess',
1445 1445 b'uiblocked',
1446 1446 default=None,
1447 1447 )
1448 1448 coreconfigitem(
1449 1449 b'merge',
1450 1450 b'checkunknown',
1451 1451 default=b'abort',
1452 1452 )
1453 1453 coreconfigitem(
1454 1454 b'merge',
1455 1455 b'checkignored',
1456 1456 default=b'abort',
1457 1457 )
1458 1458 coreconfigitem(
1459 1459 b'experimental',
1460 1460 b'merge.checkpathconflicts',
1461 1461 default=False,
1462 1462 )
1463 1463 coreconfigitem(
1464 1464 b'merge',
1465 1465 b'followcopies',
1466 1466 default=True,
1467 1467 )
1468 1468 coreconfigitem(
1469 1469 b'merge',
1470 1470 b'on-failure',
1471 1471 default=b'continue',
1472 1472 )
1473 1473 coreconfigitem(
1474 1474 b'merge',
1475 1475 b'preferancestor',
1476 1476 default=lambda: [b'*'],
1477 1477 experimental=True,
1478 1478 )
1479 1479 coreconfigitem(
1480 1480 b'merge',
1481 1481 b'strict-capability-check',
1482 1482 default=False,
1483 1483 )
1484 1484 coreconfigitem(
1485 1485 b'merge-tools',
1486 1486 b'.*',
1487 1487 default=None,
1488 1488 generic=True,
1489 1489 )
1490 1490 coreconfigitem(
1491 1491 b'merge-tools',
1492 1492 br'.*\.args$',
1493 1493 default=b"$local $base $other",
1494 1494 generic=True,
1495 1495 priority=-1,
1496 1496 )
1497 1497 coreconfigitem(
1498 1498 b'merge-tools',
1499 1499 br'.*\.binary$',
1500 1500 default=False,
1501 1501 generic=True,
1502 1502 priority=-1,
1503 1503 )
1504 1504 coreconfigitem(
1505 1505 b'merge-tools',
1506 1506 br'.*\.check$',
1507 1507 default=list,
1508 1508 generic=True,
1509 1509 priority=-1,
1510 1510 )
1511 1511 coreconfigitem(
1512 1512 b'merge-tools',
1513 1513 br'.*\.checkchanged$',
1514 1514 default=False,
1515 1515 generic=True,
1516 1516 priority=-1,
1517 1517 )
1518 1518 coreconfigitem(
1519 1519 b'merge-tools',
1520 1520 br'.*\.executable$',
1521 1521 default=dynamicdefault,
1522 1522 generic=True,
1523 1523 priority=-1,
1524 1524 )
1525 1525 coreconfigitem(
1526 1526 b'merge-tools',
1527 1527 br'.*\.fixeol$',
1528 1528 default=False,
1529 1529 generic=True,
1530 1530 priority=-1,
1531 1531 )
1532 1532 coreconfigitem(
1533 1533 b'merge-tools',
1534 1534 br'.*\.gui$',
1535 1535 default=False,
1536 1536 generic=True,
1537 1537 priority=-1,
1538 1538 )
1539 1539 coreconfigitem(
1540 1540 b'merge-tools',
1541 1541 br'.*\.mergemarkers$',
1542 1542 default=b'basic',
1543 1543 generic=True,
1544 1544 priority=-1,
1545 1545 )
1546 1546 coreconfigitem(
1547 1547 b'merge-tools',
1548 1548 br'.*\.mergemarkertemplate$',
1549 1549 default=dynamicdefault, # take from command-templates.mergemarker
1550 1550 generic=True,
1551 1551 priority=-1,
1552 1552 )
1553 1553 coreconfigitem(
1554 1554 b'merge-tools',
1555 1555 br'.*\.priority$',
1556 1556 default=0,
1557 1557 generic=True,
1558 1558 priority=-1,
1559 1559 )
1560 1560 coreconfigitem(
1561 1561 b'merge-tools',
1562 1562 br'.*\.premerge$',
1563 1563 default=dynamicdefault,
1564 1564 generic=True,
1565 1565 priority=-1,
1566 1566 )
1567 1567 coreconfigitem(
1568 1568 b'merge-tools',
1569 1569 br'.*\.symlink$',
1570 1570 default=False,
1571 1571 generic=True,
1572 1572 priority=-1,
1573 1573 )
1574 1574 coreconfigitem(
1575 1575 b'pager',
1576 1576 b'attend-.*',
1577 1577 default=dynamicdefault,
1578 1578 generic=True,
1579 1579 )
1580 1580 coreconfigitem(
1581 1581 b'pager',
1582 1582 b'ignore',
1583 1583 default=list,
1584 1584 )
1585 1585 coreconfigitem(
1586 1586 b'pager',
1587 1587 b'pager',
1588 1588 default=dynamicdefault,
1589 1589 )
1590 1590 coreconfigitem(
1591 1591 b'patch',
1592 1592 b'eol',
1593 1593 default=b'strict',
1594 1594 )
1595 1595 coreconfigitem(
1596 1596 b'patch',
1597 1597 b'fuzz',
1598 1598 default=2,
1599 1599 )
1600 1600 coreconfigitem(
1601 1601 b'paths',
1602 1602 b'default',
1603 1603 default=None,
1604 1604 )
1605 1605 coreconfigitem(
1606 1606 b'paths',
1607 1607 b'default-push',
1608 1608 default=None,
1609 1609 )
1610 1610 coreconfigitem(
1611 1611 b'paths',
1612 1612 b'.*',
1613 1613 default=None,
1614 1614 generic=True,
1615 1615 )
1616 1616 coreconfigitem(
1617 1617 b'phases',
1618 1618 b'checksubrepos',
1619 1619 default=b'follow',
1620 1620 )
1621 1621 coreconfigitem(
1622 1622 b'phases',
1623 1623 b'new-commit',
1624 1624 default=b'draft',
1625 1625 )
1626 1626 coreconfigitem(
1627 1627 b'phases',
1628 1628 b'publish',
1629 1629 default=True,
1630 1630 )
1631 1631 coreconfigitem(
1632 1632 b'profiling',
1633 1633 b'enabled',
1634 1634 default=False,
1635 1635 )
1636 1636 coreconfigitem(
1637 1637 b'profiling',
1638 1638 b'format',
1639 1639 default=b'text',
1640 1640 )
1641 1641 coreconfigitem(
1642 1642 b'profiling',
1643 1643 b'freq',
1644 1644 default=1000,
1645 1645 )
1646 1646 coreconfigitem(
1647 1647 b'profiling',
1648 1648 b'limit',
1649 1649 default=30,
1650 1650 )
1651 1651 coreconfigitem(
1652 1652 b'profiling',
1653 1653 b'nested',
1654 1654 default=0,
1655 1655 )
1656 1656 coreconfigitem(
1657 1657 b'profiling',
1658 1658 b'output',
1659 1659 default=None,
1660 1660 )
1661 1661 coreconfigitem(
1662 1662 b'profiling',
1663 1663 b'showmax',
1664 1664 default=0.999,
1665 1665 )
1666 1666 coreconfigitem(
1667 1667 b'profiling',
1668 1668 b'showmin',
1669 1669 default=dynamicdefault,
1670 1670 )
1671 1671 coreconfigitem(
1672 1672 b'profiling',
1673 1673 b'showtime',
1674 1674 default=True,
1675 1675 )
1676 1676 coreconfigitem(
1677 1677 b'profiling',
1678 1678 b'sort',
1679 1679 default=b'inlinetime',
1680 1680 )
1681 1681 coreconfigitem(
1682 1682 b'profiling',
1683 1683 b'statformat',
1684 1684 default=b'hotpath',
1685 1685 )
1686 1686 coreconfigitem(
1687 1687 b'profiling',
1688 1688 b'time-track',
1689 1689 default=dynamicdefault,
1690 1690 )
1691 1691 coreconfigitem(
1692 1692 b'profiling',
1693 1693 b'type',
1694 1694 default=b'stat',
1695 1695 )
1696 1696 coreconfigitem(
1697 1697 b'progress',
1698 1698 b'assume-tty',
1699 1699 default=False,
1700 1700 )
1701 1701 coreconfigitem(
1702 1702 b'progress',
1703 1703 b'changedelay',
1704 1704 default=1,
1705 1705 )
1706 1706 coreconfigitem(
1707 1707 b'progress',
1708 1708 b'clear-complete',
1709 1709 default=True,
1710 1710 )
1711 1711 coreconfigitem(
1712 1712 b'progress',
1713 1713 b'debug',
1714 1714 default=False,
1715 1715 )
1716 1716 coreconfigitem(
1717 1717 b'progress',
1718 1718 b'delay',
1719 1719 default=3,
1720 1720 )
1721 1721 coreconfigitem(
1722 1722 b'progress',
1723 1723 b'disable',
1724 1724 default=False,
1725 1725 )
1726 1726 coreconfigitem(
1727 1727 b'progress',
1728 1728 b'estimateinterval',
1729 1729 default=60.0,
1730 1730 )
1731 1731 coreconfigitem(
1732 1732 b'progress',
1733 1733 b'format',
1734 1734 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1735 1735 )
1736 1736 coreconfigitem(
1737 1737 b'progress',
1738 1738 b'refresh',
1739 1739 default=0.1,
1740 1740 )
1741 1741 coreconfigitem(
1742 1742 b'progress',
1743 1743 b'width',
1744 1744 default=dynamicdefault,
1745 1745 )
1746 1746 coreconfigitem(
1747 1747 b'pull',
1748 1748 b'confirm',
1749 1749 default=False,
1750 1750 )
1751 1751 coreconfigitem(
1752 1752 b'push',
1753 1753 b'pushvars.server',
1754 1754 default=False,
1755 1755 )
1756 1756 coreconfigitem(
1757 1757 b'rewrite',
1758 1758 b'backup-bundle',
1759 1759 default=True,
1760 1760 alias=[(b'ui', b'history-editing-backup')],
1761 1761 )
1762 1762 coreconfigitem(
1763 1763 b'rewrite',
1764 1764 b'update-timestamp',
1765 1765 default=False,
1766 1766 )
1767 1767 coreconfigitem(
1768 1768 b'rewrite',
1769 1769 b'empty-successor',
1770 1770 default=b'skip',
1771 1771 experimental=True,
1772 1772 )
1773 1773 coreconfigitem(
1774 1774 b'storage',
1775 1775 b'new-repo-backend',
1776 1776 default=b'revlogv1',
1777 1777 experimental=True,
1778 1778 )
1779 1779 coreconfigitem(
1780 1780 b'storage',
1781 1781 b'revlog.optimize-delta-parent-choice',
1782 1782 default=True,
1783 1783 alias=[(b'format', b'aggressivemergedeltas')],
1784 1784 )
1785 1785 # experimental as long as rust is experimental (or a C version is implemented)
1786 1786 coreconfigitem(
1787 1787 b'storage',
1788 1788 b'revlog.persistent-nodemap.mmap',
1789 1789 default=True,
1790 1790 experimental=True,
1791 1791 )
1792 1792 # experimental as long as format.use-persistent-nodemap is.
1793 1793 coreconfigitem(
1794 1794 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1795 1795 )
1796 1796 # experimental as long as format.use-persistent-nodemap is.
1797 1797 coreconfigitem(
1798 1798 b'storage',
1799 1799 b'revlog.persistent-nodemap.slow-path',
1800 default=b"allow",
1800 default=b"warn",
1801 1801 experimental=True,
1802 1802 )
1803 1803
1804 1804 coreconfigitem(
1805 1805 b'storage',
1806 1806 b'revlog.reuse-external-delta',
1807 1807 default=True,
1808 1808 )
1809 1809 coreconfigitem(
1810 1810 b'storage',
1811 1811 b'revlog.reuse-external-delta-parent',
1812 1812 default=None,
1813 1813 )
1814 1814 coreconfigitem(
1815 1815 b'storage',
1816 1816 b'revlog.zlib.level',
1817 1817 default=None,
1818 1818 )
1819 1819 coreconfigitem(
1820 1820 b'storage',
1821 1821 b'revlog.zstd.level',
1822 1822 default=None,
1823 1823 )
1824 1824 coreconfigitem(
1825 1825 b'server',
1826 1826 b'bookmarks-pushkey-compat',
1827 1827 default=True,
1828 1828 )
1829 1829 coreconfigitem(
1830 1830 b'server',
1831 1831 b'bundle1',
1832 1832 default=True,
1833 1833 )
1834 1834 coreconfigitem(
1835 1835 b'server',
1836 1836 b'bundle1gd',
1837 1837 default=None,
1838 1838 )
1839 1839 coreconfigitem(
1840 1840 b'server',
1841 1841 b'bundle1.pull',
1842 1842 default=None,
1843 1843 )
1844 1844 coreconfigitem(
1845 1845 b'server',
1846 1846 b'bundle1gd.pull',
1847 1847 default=None,
1848 1848 )
1849 1849 coreconfigitem(
1850 1850 b'server',
1851 1851 b'bundle1.push',
1852 1852 default=None,
1853 1853 )
1854 1854 coreconfigitem(
1855 1855 b'server',
1856 1856 b'bundle1gd.push',
1857 1857 default=None,
1858 1858 )
1859 1859 coreconfigitem(
1860 1860 b'server',
1861 1861 b'bundle2.stream',
1862 1862 default=True,
1863 1863 alias=[(b'experimental', b'bundle2.stream')],
1864 1864 )
1865 1865 coreconfigitem(
1866 1866 b'server',
1867 1867 b'compressionengines',
1868 1868 default=list,
1869 1869 )
1870 1870 coreconfigitem(
1871 1871 b'server',
1872 1872 b'concurrent-push-mode',
1873 1873 default=b'check-related',
1874 1874 )
1875 1875 coreconfigitem(
1876 1876 b'server',
1877 1877 b'disablefullbundle',
1878 1878 default=False,
1879 1879 )
1880 1880 coreconfigitem(
1881 1881 b'server',
1882 1882 b'maxhttpheaderlen',
1883 1883 default=1024,
1884 1884 )
1885 1885 coreconfigitem(
1886 1886 b'server',
1887 1887 b'pullbundle',
1888 1888 default=False,
1889 1889 )
1890 1890 coreconfigitem(
1891 1891 b'server',
1892 1892 b'preferuncompressed',
1893 1893 default=False,
1894 1894 )
1895 1895 coreconfigitem(
1896 1896 b'server',
1897 1897 b'streamunbundle',
1898 1898 default=False,
1899 1899 )
1900 1900 coreconfigitem(
1901 1901 b'server',
1902 1902 b'uncompressed',
1903 1903 default=True,
1904 1904 )
1905 1905 coreconfigitem(
1906 1906 b'server',
1907 1907 b'uncompressedallowsecret',
1908 1908 default=False,
1909 1909 )
1910 1910 coreconfigitem(
1911 1911 b'server',
1912 1912 b'view',
1913 1913 default=b'served',
1914 1914 )
1915 1915 coreconfigitem(
1916 1916 b'server',
1917 1917 b'validate',
1918 1918 default=False,
1919 1919 )
1920 1920 coreconfigitem(
1921 1921 b'server',
1922 1922 b'zliblevel',
1923 1923 default=-1,
1924 1924 )
1925 1925 coreconfigitem(
1926 1926 b'server',
1927 1927 b'zstdlevel',
1928 1928 default=3,
1929 1929 )
1930 1930 coreconfigitem(
1931 1931 b'share',
1932 1932 b'pool',
1933 1933 default=None,
1934 1934 )
1935 1935 coreconfigitem(
1936 1936 b'share',
1937 1937 b'poolnaming',
1938 1938 default=b'identity',
1939 1939 )
1940 1940 coreconfigitem(
1941 1941 b'shelve',
1942 1942 b'maxbackups',
1943 1943 default=10,
1944 1944 )
1945 1945 coreconfigitem(
1946 1946 b'smtp',
1947 1947 b'host',
1948 1948 default=None,
1949 1949 )
1950 1950 coreconfigitem(
1951 1951 b'smtp',
1952 1952 b'local_hostname',
1953 1953 default=None,
1954 1954 )
1955 1955 coreconfigitem(
1956 1956 b'smtp',
1957 1957 b'password',
1958 1958 default=None,
1959 1959 )
1960 1960 coreconfigitem(
1961 1961 b'smtp',
1962 1962 b'port',
1963 1963 default=dynamicdefault,
1964 1964 )
1965 1965 coreconfigitem(
1966 1966 b'smtp',
1967 1967 b'tls',
1968 1968 default=b'none',
1969 1969 )
1970 1970 coreconfigitem(
1971 1971 b'smtp',
1972 1972 b'username',
1973 1973 default=None,
1974 1974 )
1975 1975 coreconfigitem(
1976 1976 b'sparse',
1977 1977 b'missingwarning',
1978 1978 default=True,
1979 1979 experimental=True,
1980 1980 )
1981 1981 coreconfigitem(
1982 1982 b'subrepos',
1983 1983 b'allowed',
1984 1984 default=dynamicdefault, # to make backporting simpler
1985 1985 )
1986 1986 coreconfigitem(
1987 1987 b'subrepos',
1988 1988 b'hg:allowed',
1989 1989 default=dynamicdefault,
1990 1990 )
1991 1991 coreconfigitem(
1992 1992 b'subrepos',
1993 1993 b'git:allowed',
1994 1994 default=dynamicdefault,
1995 1995 )
1996 1996 coreconfigitem(
1997 1997 b'subrepos',
1998 1998 b'svn:allowed',
1999 1999 default=dynamicdefault,
2000 2000 )
2001 2001 coreconfigitem(
2002 2002 b'templates',
2003 2003 b'.*',
2004 2004 default=None,
2005 2005 generic=True,
2006 2006 )
2007 2007 coreconfigitem(
2008 2008 b'templateconfig',
2009 2009 b'.*',
2010 2010 default=dynamicdefault,
2011 2011 generic=True,
2012 2012 )
2013 2013 coreconfigitem(
2014 2014 b'trusted',
2015 2015 b'groups',
2016 2016 default=list,
2017 2017 )
2018 2018 coreconfigitem(
2019 2019 b'trusted',
2020 2020 b'users',
2021 2021 default=list,
2022 2022 )
2023 2023 coreconfigitem(
2024 2024 b'ui',
2025 2025 b'_usedassubrepo',
2026 2026 default=False,
2027 2027 )
2028 2028 coreconfigitem(
2029 2029 b'ui',
2030 2030 b'allowemptycommit',
2031 2031 default=False,
2032 2032 )
2033 2033 coreconfigitem(
2034 2034 b'ui',
2035 2035 b'archivemeta',
2036 2036 default=True,
2037 2037 )
2038 2038 coreconfigitem(
2039 2039 b'ui',
2040 2040 b'askusername',
2041 2041 default=False,
2042 2042 )
2043 2043 coreconfigitem(
2044 2044 b'ui',
2045 2045 b'available-memory',
2046 2046 default=None,
2047 2047 )
2048 2048
2049 2049 coreconfigitem(
2050 2050 b'ui',
2051 2051 b'clonebundlefallback',
2052 2052 default=False,
2053 2053 )
2054 2054 coreconfigitem(
2055 2055 b'ui',
2056 2056 b'clonebundleprefers',
2057 2057 default=list,
2058 2058 )
2059 2059 coreconfigitem(
2060 2060 b'ui',
2061 2061 b'clonebundles',
2062 2062 default=True,
2063 2063 )
2064 2064 coreconfigitem(
2065 2065 b'ui',
2066 2066 b'color',
2067 2067 default=b'auto',
2068 2068 )
2069 2069 coreconfigitem(
2070 2070 b'ui',
2071 2071 b'commitsubrepos',
2072 2072 default=False,
2073 2073 )
2074 2074 coreconfigitem(
2075 2075 b'ui',
2076 2076 b'debug',
2077 2077 default=False,
2078 2078 )
2079 2079 coreconfigitem(
2080 2080 b'ui',
2081 2081 b'debugger',
2082 2082 default=None,
2083 2083 )
2084 2084 coreconfigitem(
2085 2085 b'ui',
2086 2086 b'editor',
2087 2087 default=dynamicdefault,
2088 2088 )
2089 2089 coreconfigitem(
2090 2090 b'ui',
2091 2091 b'detailed-exit-code',
2092 2092 default=False,
2093 2093 experimental=True,
2094 2094 )
2095 2095 coreconfigitem(
2096 2096 b'ui',
2097 2097 b'fallbackencoding',
2098 2098 default=None,
2099 2099 )
2100 2100 coreconfigitem(
2101 2101 b'ui',
2102 2102 b'forcecwd',
2103 2103 default=None,
2104 2104 )
2105 2105 coreconfigitem(
2106 2106 b'ui',
2107 2107 b'forcemerge',
2108 2108 default=None,
2109 2109 )
2110 2110 coreconfigitem(
2111 2111 b'ui',
2112 2112 b'formatdebug',
2113 2113 default=False,
2114 2114 )
2115 2115 coreconfigitem(
2116 2116 b'ui',
2117 2117 b'formatjson',
2118 2118 default=False,
2119 2119 )
2120 2120 coreconfigitem(
2121 2121 b'ui',
2122 2122 b'formatted',
2123 2123 default=None,
2124 2124 )
2125 2125 coreconfigitem(
2126 2126 b'ui',
2127 2127 b'interactive',
2128 2128 default=None,
2129 2129 )
2130 2130 coreconfigitem(
2131 2131 b'ui',
2132 2132 b'interface',
2133 2133 default=None,
2134 2134 )
2135 2135 coreconfigitem(
2136 2136 b'ui',
2137 2137 b'interface.chunkselector',
2138 2138 default=None,
2139 2139 )
2140 2140 coreconfigitem(
2141 2141 b'ui',
2142 2142 b'large-file-limit',
2143 2143 default=10000000,
2144 2144 )
2145 2145 coreconfigitem(
2146 2146 b'ui',
2147 2147 b'logblockedtimes',
2148 2148 default=False,
2149 2149 )
2150 2150 coreconfigitem(
2151 2151 b'ui',
2152 2152 b'merge',
2153 2153 default=None,
2154 2154 )
2155 2155 coreconfigitem(
2156 2156 b'ui',
2157 2157 b'mergemarkers',
2158 2158 default=b'basic',
2159 2159 )
2160 2160 coreconfigitem(
2161 2161 b'ui',
2162 2162 b'message-output',
2163 2163 default=b'stdio',
2164 2164 )
2165 2165 coreconfigitem(
2166 2166 b'ui',
2167 2167 b'nontty',
2168 2168 default=False,
2169 2169 )
2170 2170 coreconfigitem(
2171 2171 b'ui',
2172 2172 b'origbackuppath',
2173 2173 default=None,
2174 2174 )
2175 2175 coreconfigitem(
2176 2176 b'ui',
2177 2177 b'paginate',
2178 2178 default=True,
2179 2179 )
2180 2180 coreconfigitem(
2181 2181 b'ui',
2182 2182 b'patch',
2183 2183 default=None,
2184 2184 )
2185 2185 coreconfigitem(
2186 2186 b'ui',
2187 2187 b'portablefilenames',
2188 2188 default=b'warn',
2189 2189 )
2190 2190 coreconfigitem(
2191 2191 b'ui',
2192 2192 b'promptecho',
2193 2193 default=False,
2194 2194 )
2195 2195 coreconfigitem(
2196 2196 b'ui',
2197 2197 b'quiet',
2198 2198 default=False,
2199 2199 )
2200 2200 coreconfigitem(
2201 2201 b'ui',
2202 2202 b'quietbookmarkmove',
2203 2203 default=False,
2204 2204 )
2205 2205 coreconfigitem(
2206 2206 b'ui',
2207 2207 b'relative-paths',
2208 2208 default=b'legacy',
2209 2209 )
2210 2210 coreconfigitem(
2211 2211 b'ui',
2212 2212 b'remotecmd',
2213 2213 default=b'hg',
2214 2214 )
2215 2215 coreconfigitem(
2216 2216 b'ui',
2217 2217 b'report_untrusted',
2218 2218 default=True,
2219 2219 )
2220 2220 coreconfigitem(
2221 2221 b'ui',
2222 2222 b'rollback',
2223 2223 default=True,
2224 2224 )
2225 2225 coreconfigitem(
2226 2226 b'ui',
2227 2227 b'signal-safe-lock',
2228 2228 default=True,
2229 2229 )
2230 2230 coreconfigitem(
2231 2231 b'ui',
2232 2232 b'slash',
2233 2233 default=False,
2234 2234 )
2235 2235 coreconfigitem(
2236 2236 b'ui',
2237 2237 b'ssh',
2238 2238 default=b'ssh',
2239 2239 )
2240 2240 coreconfigitem(
2241 2241 b'ui',
2242 2242 b'ssherrorhint',
2243 2243 default=None,
2244 2244 )
2245 2245 coreconfigitem(
2246 2246 b'ui',
2247 2247 b'statuscopies',
2248 2248 default=False,
2249 2249 )
2250 2250 coreconfigitem(
2251 2251 b'ui',
2252 2252 b'strict',
2253 2253 default=False,
2254 2254 )
2255 2255 coreconfigitem(
2256 2256 b'ui',
2257 2257 b'style',
2258 2258 default=b'',
2259 2259 )
2260 2260 coreconfigitem(
2261 2261 b'ui',
2262 2262 b'supportcontact',
2263 2263 default=None,
2264 2264 )
2265 2265 coreconfigitem(
2266 2266 b'ui',
2267 2267 b'textwidth',
2268 2268 default=78,
2269 2269 )
2270 2270 coreconfigitem(
2271 2271 b'ui',
2272 2272 b'timeout',
2273 2273 default=b'600',
2274 2274 )
2275 2275 coreconfigitem(
2276 2276 b'ui',
2277 2277 b'timeout.warn',
2278 2278 default=0,
2279 2279 )
2280 2280 coreconfigitem(
2281 2281 b'ui',
2282 2282 b'timestamp-output',
2283 2283 default=False,
2284 2284 )
2285 2285 coreconfigitem(
2286 2286 b'ui',
2287 2287 b'traceback',
2288 2288 default=False,
2289 2289 )
2290 2290 coreconfigitem(
2291 2291 b'ui',
2292 2292 b'tweakdefaults',
2293 2293 default=False,
2294 2294 )
2295 2295 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2296 2296 coreconfigitem(
2297 2297 b'ui',
2298 2298 b'verbose',
2299 2299 default=False,
2300 2300 )
2301 2301 coreconfigitem(
2302 2302 b'verify',
2303 2303 b'skipflags',
2304 2304 default=None,
2305 2305 )
2306 2306 coreconfigitem(
2307 2307 b'web',
2308 2308 b'allowbz2',
2309 2309 default=False,
2310 2310 )
2311 2311 coreconfigitem(
2312 2312 b'web',
2313 2313 b'allowgz',
2314 2314 default=False,
2315 2315 )
2316 2316 coreconfigitem(
2317 2317 b'web',
2318 2318 b'allow-pull',
2319 2319 alias=[(b'web', b'allowpull')],
2320 2320 default=True,
2321 2321 )
2322 2322 coreconfigitem(
2323 2323 b'web',
2324 2324 b'allow-push',
2325 2325 alias=[(b'web', b'allow_push')],
2326 2326 default=list,
2327 2327 )
2328 2328 coreconfigitem(
2329 2329 b'web',
2330 2330 b'allowzip',
2331 2331 default=False,
2332 2332 )
2333 2333 coreconfigitem(
2334 2334 b'web',
2335 2335 b'archivesubrepos',
2336 2336 default=False,
2337 2337 )
2338 2338 coreconfigitem(
2339 2339 b'web',
2340 2340 b'cache',
2341 2341 default=True,
2342 2342 )
2343 2343 coreconfigitem(
2344 2344 b'web',
2345 2345 b'comparisoncontext',
2346 2346 default=5,
2347 2347 )
2348 2348 coreconfigitem(
2349 2349 b'web',
2350 2350 b'contact',
2351 2351 default=None,
2352 2352 )
2353 2353 coreconfigitem(
2354 2354 b'web',
2355 2355 b'deny_push',
2356 2356 default=list,
2357 2357 )
2358 2358 coreconfigitem(
2359 2359 b'web',
2360 2360 b'guessmime',
2361 2361 default=False,
2362 2362 )
2363 2363 coreconfigitem(
2364 2364 b'web',
2365 2365 b'hidden',
2366 2366 default=False,
2367 2367 )
2368 2368 coreconfigitem(
2369 2369 b'web',
2370 2370 b'labels',
2371 2371 default=list,
2372 2372 )
2373 2373 coreconfigitem(
2374 2374 b'web',
2375 2375 b'logoimg',
2376 2376 default=b'hglogo.png',
2377 2377 )
2378 2378 coreconfigitem(
2379 2379 b'web',
2380 2380 b'logourl',
2381 2381 default=b'https://mercurial-scm.org/',
2382 2382 )
2383 2383 coreconfigitem(
2384 2384 b'web',
2385 2385 b'accesslog',
2386 2386 default=b'-',
2387 2387 )
2388 2388 coreconfigitem(
2389 2389 b'web',
2390 2390 b'address',
2391 2391 default=b'',
2392 2392 )
2393 2393 coreconfigitem(
2394 2394 b'web',
2395 2395 b'allow-archive',
2396 2396 alias=[(b'web', b'allow_archive')],
2397 2397 default=list,
2398 2398 )
2399 2399 coreconfigitem(
2400 2400 b'web',
2401 2401 b'allow_read',
2402 2402 default=list,
2403 2403 )
2404 2404 coreconfigitem(
2405 2405 b'web',
2406 2406 b'baseurl',
2407 2407 default=None,
2408 2408 )
2409 2409 coreconfigitem(
2410 2410 b'web',
2411 2411 b'cacerts',
2412 2412 default=None,
2413 2413 )
2414 2414 coreconfigitem(
2415 2415 b'web',
2416 2416 b'certificate',
2417 2417 default=None,
2418 2418 )
2419 2419 coreconfigitem(
2420 2420 b'web',
2421 2421 b'collapse',
2422 2422 default=False,
2423 2423 )
2424 2424 coreconfigitem(
2425 2425 b'web',
2426 2426 b'csp',
2427 2427 default=None,
2428 2428 )
2429 2429 coreconfigitem(
2430 2430 b'web',
2431 2431 b'deny_read',
2432 2432 default=list,
2433 2433 )
2434 2434 coreconfigitem(
2435 2435 b'web',
2436 2436 b'descend',
2437 2437 default=True,
2438 2438 )
2439 2439 coreconfigitem(
2440 2440 b'web',
2441 2441 b'description',
2442 2442 default=b"",
2443 2443 )
2444 2444 coreconfigitem(
2445 2445 b'web',
2446 2446 b'encoding',
2447 2447 default=lambda: encoding.encoding,
2448 2448 )
2449 2449 coreconfigitem(
2450 2450 b'web',
2451 2451 b'errorlog',
2452 2452 default=b'-',
2453 2453 )
2454 2454 coreconfigitem(
2455 2455 b'web',
2456 2456 b'ipv6',
2457 2457 default=False,
2458 2458 )
2459 2459 coreconfigitem(
2460 2460 b'web',
2461 2461 b'maxchanges',
2462 2462 default=10,
2463 2463 )
2464 2464 coreconfigitem(
2465 2465 b'web',
2466 2466 b'maxfiles',
2467 2467 default=10,
2468 2468 )
2469 2469 coreconfigitem(
2470 2470 b'web',
2471 2471 b'maxshortchanges',
2472 2472 default=60,
2473 2473 )
2474 2474 coreconfigitem(
2475 2475 b'web',
2476 2476 b'motd',
2477 2477 default=b'',
2478 2478 )
2479 2479 coreconfigitem(
2480 2480 b'web',
2481 2481 b'name',
2482 2482 default=dynamicdefault,
2483 2483 )
2484 2484 coreconfigitem(
2485 2485 b'web',
2486 2486 b'port',
2487 2487 default=8000,
2488 2488 )
2489 2489 coreconfigitem(
2490 2490 b'web',
2491 2491 b'prefix',
2492 2492 default=b'',
2493 2493 )
2494 2494 coreconfigitem(
2495 2495 b'web',
2496 2496 b'push_ssl',
2497 2497 default=True,
2498 2498 )
2499 2499 coreconfigitem(
2500 2500 b'web',
2501 2501 b'refreshinterval',
2502 2502 default=20,
2503 2503 )
2504 2504 coreconfigitem(
2505 2505 b'web',
2506 2506 b'server-header',
2507 2507 default=None,
2508 2508 )
2509 2509 coreconfigitem(
2510 2510 b'web',
2511 2511 b'static',
2512 2512 default=None,
2513 2513 )
2514 2514 coreconfigitem(
2515 2515 b'web',
2516 2516 b'staticurl',
2517 2517 default=None,
2518 2518 )
2519 2519 coreconfigitem(
2520 2520 b'web',
2521 2521 b'stripes',
2522 2522 default=1,
2523 2523 )
2524 2524 coreconfigitem(
2525 2525 b'web',
2526 2526 b'style',
2527 2527 default=b'paper',
2528 2528 )
2529 2529 coreconfigitem(
2530 2530 b'web',
2531 2531 b'templates',
2532 2532 default=None,
2533 2533 )
2534 2534 coreconfigitem(
2535 2535 b'web',
2536 2536 b'view',
2537 2537 default=b'served',
2538 2538 experimental=True,
2539 2539 )
2540 2540 coreconfigitem(
2541 2541 b'worker',
2542 2542 b'backgroundclose',
2543 2543 default=dynamicdefault,
2544 2544 )
2545 2545 # Windows defaults to a limit of 512 open files. A buffer of 128
2546 2546 # should give us enough headway.
2547 2547 coreconfigitem(
2548 2548 b'worker',
2549 2549 b'backgroundclosemaxqueue',
2550 2550 default=384,
2551 2551 )
2552 2552 coreconfigitem(
2553 2553 b'worker',
2554 2554 b'backgroundcloseminfilecount',
2555 2555 default=2048,
2556 2556 )
2557 2557 coreconfigitem(
2558 2558 b'worker',
2559 2559 b'backgroundclosethreadcount',
2560 2560 default=4,
2561 2561 )
2562 2562 coreconfigitem(
2563 2563 b'worker',
2564 2564 b'enabled',
2565 2565 default=True,
2566 2566 )
2567 2567 coreconfigitem(
2568 2568 b'worker',
2569 2569 b'numcpus',
2570 2570 default=None,
2571 2571 )
2572 2572
2573 2573 # Rebase related configuration moved to core because other extension are doing
2574 2574 # strange things. For example, shelve import the extensions to reuse some bit
2575 2575 # without formally loading it.
2576 2576 coreconfigitem(
2577 2577 b'commands',
2578 2578 b'rebase.requiredest',
2579 2579 default=False,
2580 2580 )
2581 2581 coreconfigitem(
2582 2582 b'experimental',
2583 2583 b'rebaseskipobsolete',
2584 2584 default=True,
2585 2585 )
2586 2586 coreconfigitem(
2587 2587 b'rebase',
2588 2588 b'singletransaction',
2589 2589 default=False,
2590 2590 )
2591 2591 coreconfigitem(
2592 2592 b'rebase',
2593 2593 b'experimental.inmemory',
2594 2594 default=False,
2595 2595 )
@@ -1,2985 +1,2986 b''
1 1 The Mercurial system uses a set of configuration files to control
2 2 aspects of its behavior.
3 3
4 4 Troubleshooting
5 5 ===============
6 6
7 7 If you're having problems with your configuration,
8 8 :hg:`config --debug` can help you understand what is introducing
9 9 a setting into your environment.
10 10
11 11 See :hg:`help config.syntax` and :hg:`help config.files`
12 12 for information about how and where to override things.
13 13
14 14 Structure
15 15 =========
16 16
17 17 The configuration files use a simple ini-file format. A configuration
18 18 file consists of sections, led by a ``[section]`` header and followed
19 19 by ``name = value`` entries::
20 20
21 21 [ui]
22 22 username = Firstname Lastname <firstname.lastname@example.net>
23 23 verbose = True
24 24
25 25 The above entries will be referred to as ``ui.username`` and
26 26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27 27
28 28 Files
29 29 =====
30 30
31 31 Mercurial reads configuration data from several files, if they exist.
32 32 These files do not exist by default and you will have to create the
33 33 appropriate configuration files yourself:
34 34
35 35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36 36
37 37 Global configuration like the username setting is typically put into:
38 38
39 39 .. container:: windows
40 40
41 41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42 42
43 43 .. container:: unix.plan9
44 44
45 45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46 46
47 47 The names of these files depend on the system on which Mercurial is
48 48 installed. ``*.rc`` files from a single directory are read in
49 49 alphabetical order, later ones overriding earlier ones. Where multiple
50 50 paths are given below, settings from earlier paths override later
51 51 ones.
52 52
53 53 .. container:: verbose.unix
54 54
55 55 On Unix, the following files are consulted:
56 56
57 57 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
58 58 - ``<repo>/.hg/hgrc`` (per-repository)
59 59 - ``$HOME/.hgrc`` (per-user)
60 60 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
61 61 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
62 62 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
63 63 - ``/etc/mercurial/hgrc`` (per-system)
64 64 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
65 65 - ``<internal>/*.rc`` (defaults)
66 66
67 67 .. container:: verbose.windows
68 68
69 69 On Windows, the following files are consulted:
70 70
71 71 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
72 72 - ``<repo>/.hg/hgrc`` (per-repository)
73 73 - ``%USERPROFILE%\.hgrc`` (per-user)
74 74 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
75 75 - ``%HOME%\.hgrc`` (per-user)
76 76 - ``%HOME%\Mercurial.ini`` (per-user)
77 77 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
78 78 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
79 79 - ``<install-dir>\Mercurial.ini`` (per-installation)
80 80 - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
81 81 - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
82 82 - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
83 83 - ``<internal>/*.rc`` (defaults)
84 84
85 85 .. note::
86 86
87 87 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
88 88 is used when running 32-bit Python on 64-bit Windows.
89 89
90 90 .. container:: verbose.plan9
91 91
92 92 On Plan9, the following files are consulted:
93 93
94 94 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
95 95 - ``<repo>/.hg/hgrc`` (per-repository)
96 96 - ``$home/lib/hgrc`` (per-user)
97 97 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
98 98 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
99 99 - ``/lib/mercurial/hgrc`` (per-system)
100 100 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
101 101 - ``<internal>/*.rc`` (defaults)
102 102
103 103 Per-repository configuration options only apply in a
104 104 particular repository. This file is not version-controlled, and
105 105 will not get transferred during a "clone" operation. Options in
106 106 this file override options in all other configuration files.
107 107
108 108 .. container:: unix.plan9
109 109
110 110 On Plan 9 and Unix, most of this file will be ignored if it doesn't
111 111 belong to a trusted user or to a trusted group. See
112 112 :hg:`help config.trusted` for more details.
113 113
114 114 Per-user configuration file(s) are for the user running Mercurial. Options
115 115 in these files apply to all Mercurial commands executed by this user in any
116 116 directory. Options in these files override per-system and per-installation
117 117 options.
118 118
119 119 Per-installation configuration files are searched for in the
120 120 directory where Mercurial is installed. ``<install-root>`` is the
121 121 parent directory of the **hg** executable (or symlink) being run.
122 122
123 123 .. container:: unix.plan9
124 124
125 125 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
126 126 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
127 127 files apply to all Mercurial commands executed by any user in any
128 128 directory.
129 129
130 130 Per-installation configuration files are for the system on
131 131 which Mercurial is running. Options in these files apply to all
132 132 Mercurial commands executed by any user in any directory. Registry
133 133 keys contain PATH-like strings, every part of which must reference
134 134 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
135 135 be read. Mercurial checks each of these locations in the specified
136 136 order until one or more configuration files are detected.
137 137
138 138 Per-system configuration files are for the system on which Mercurial
139 139 is running. Options in these files apply to all Mercurial commands
140 140 executed by any user in any directory. Options in these files
141 141 override per-installation options.
142 142
143 143 Mercurial comes with some default configuration. The default configuration
144 144 files are installed with Mercurial and will be overwritten on upgrades. Default
145 145 configuration files should never be edited by users or administrators but can
146 146 be overridden in other configuration files. So far the directory only contains
147 147 merge tool configuration but packagers can also put other default configuration
148 148 there.
149 149
150 150 .. container:: verbose
151 151
152 152 On versions 5.7 and later, if share-safe functionality is enabled,
153 153 shares will read config file of share source too.
154 154 `<share-source/.hg/hgrc>` is read before reading `<repo/.hg/hgrc>`.
155 155
156 156 For configs which should not be shared, `<repo/.hg/hgrc-not-shared>`
157 157 should be used.
158 158
159 159 Syntax
160 160 ======
161 161
162 162 A configuration file consists of sections, led by a ``[section]`` header
163 163 and followed by ``name = value`` entries (sometimes called
164 164 ``configuration keys``)::
165 165
166 166 [spam]
167 167 eggs=ham
168 168 green=
169 169 eggs
170 170
171 171 Each line contains one entry. If the lines that follow are indented,
172 172 they are treated as continuations of that entry. Leading whitespace is
173 173 removed from values. Empty lines are skipped. Lines beginning with
174 174 ``#`` or ``;`` are ignored and may be used to provide comments.
175 175
176 176 Configuration keys can be set multiple times, in which case Mercurial
177 177 will use the value that was configured last. As an example::
178 178
179 179 [spam]
180 180 eggs=large
181 181 ham=serrano
182 182 eggs=small
183 183
184 184 This would set the configuration key named ``eggs`` to ``small``.
185 185
186 186 It is also possible to define a section multiple times. A section can
187 187 be redefined on the same and/or on different configuration files. For
188 188 example::
189 189
190 190 [foo]
191 191 eggs=large
192 192 ham=serrano
193 193 eggs=small
194 194
195 195 [bar]
196 196 eggs=ham
197 197 green=
198 198 eggs
199 199
200 200 [foo]
201 201 ham=prosciutto
202 202 eggs=medium
203 203 bread=toasted
204 204
205 205 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
206 206 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
207 207 respectively. As you can see there only thing that matters is the last
208 208 value that was set for each of the configuration keys.
209 209
210 210 If a configuration key is set multiple times in different
211 211 configuration files the final value will depend on the order in which
212 212 the different configuration files are read, with settings from earlier
213 213 paths overriding later ones as described on the ``Files`` section
214 214 above.
215 215
216 216 A line of the form ``%include file`` will include ``file`` into the
217 217 current configuration file. The inclusion is recursive, which means
218 218 that included files can include other files. Filenames are relative to
219 219 the configuration file in which the ``%include`` directive is found.
220 220 Environment variables and ``~user`` constructs are expanded in
221 221 ``file``. This lets you do something like::
222 222
223 223 %include ~/.hgrc.d/$HOST.rc
224 224
225 225 to include a different configuration file on each computer you use.
226 226
227 227 A line with ``%unset name`` will remove ``name`` from the current
228 228 section, if it has been set previously.
229 229
230 230 The values are either free-form text strings, lists of text strings,
231 231 or Boolean values. Boolean values can be set to true using any of "1",
232 232 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
233 233 (all case insensitive).
234 234
235 235 List values are separated by whitespace or comma, except when values are
236 236 placed in double quotation marks::
237 237
238 238 allow_read = "John Doe, PhD", brian, betty
239 239
240 240 Quotation marks can be escaped by prefixing them with a backslash. Only
241 241 quotation marks at the beginning of a word is counted as a quotation
242 242 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
243 243
244 244 Sections
245 245 ========
246 246
247 247 This section describes the different sections that may appear in a
248 248 Mercurial configuration file, the purpose of each section, its possible
249 249 keys, and their possible values.
250 250
251 251 ``alias``
252 252 ---------
253 253
254 254 Defines command aliases.
255 255
256 256 Aliases allow you to define your own commands in terms of other
257 257 commands (or aliases), optionally including arguments. Positional
258 258 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
259 259 are expanded by Mercurial before execution. Positional arguments not
260 260 already used by ``$N`` in the definition are put at the end of the
261 261 command to be executed.
262 262
263 263 Alias definitions consist of lines of the form::
264 264
265 265 <alias> = <command> [<argument>]...
266 266
267 267 For example, this definition::
268 268
269 269 latest = log --limit 5
270 270
271 271 creates a new command ``latest`` that shows only the five most recent
272 272 changesets. You can define subsequent aliases using earlier ones::
273 273
274 274 stable5 = latest -b stable
275 275
276 276 .. note::
277 277
278 278 It is possible to create aliases with the same names as
279 279 existing commands, which will then override the original
280 280 definitions. This is almost always a bad idea!
281 281
282 282 An alias can start with an exclamation point (``!``) to make it a
283 283 shell alias. A shell alias is executed with the shell and will let you
284 284 run arbitrary commands. As an example, ::
285 285
286 286 echo = !echo $@
287 287
288 288 will let you do ``hg echo foo`` to have ``foo`` printed in your
289 289 terminal. A better example might be::
290 290
291 291 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
292 292
293 293 which will make ``hg purge`` delete all unknown files in the
294 294 repository in the same manner as the purge extension.
295 295
296 296 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
297 297 expand to the command arguments. Unmatched arguments are
298 298 removed. ``$0`` expands to the alias name and ``$@`` expands to all
299 299 arguments separated by a space. ``"$@"`` (with quotes) expands to all
300 300 arguments quoted individually and separated by a space. These expansions
301 301 happen before the command is passed to the shell.
302 302
303 303 Shell aliases are executed in an environment where ``$HG`` expands to
304 304 the path of the Mercurial that was used to execute the alias. This is
305 305 useful when you want to call further Mercurial commands in a shell
306 306 alias, as was done above for the purge alias. In addition,
307 307 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
308 308 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
309 309
310 310 .. note::
311 311
312 312 Some global configuration options such as ``-R`` are
313 313 processed before shell aliases and will thus not be passed to
314 314 aliases.
315 315
316 316
317 317 ``annotate``
318 318 ------------
319 319
320 320 Settings used when displaying file annotations. All values are
321 321 Booleans and default to False. See :hg:`help config.diff` for
322 322 related options for the diff command.
323 323
324 324 ``ignorews``
325 325 Ignore white space when comparing lines.
326 326
327 327 ``ignorewseol``
328 328 Ignore white space at the end of a line when comparing lines.
329 329
330 330 ``ignorewsamount``
331 331 Ignore changes in the amount of white space.
332 332
333 333 ``ignoreblanklines``
334 334 Ignore changes whose lines are all blank.
335 335
336 336
337 337 ``auth``
338 338 --------
339 339
340 340 Authentication credentials and other authentication-like configuration
341 341 for HTTP connections. This section allows you to store usernames and
342 342 passwords for use when logging *into* HTTP servers. See
343 343 :hg:`help config.web` if you want to configure *who* can login to
344 344 your HTTP server.
345 345
346 346 The following options apply to all hosts.
347 347
348 348 ``cookiefile``
349 349 Path to a file containing HTTP cookie lines. Cookies matching a
350 350 host will be sent automatically.
351 351
352 352 The file format uses the Mozilla cookies.txt format, which defines cookies
353 353 on their own lines. Each line contains 7 fields delimited by the tab
354 354 character (domain, is_domain_cookie, path, is_secure, expires, name,
355 355 value). For more info, do an Internet search for "Netscape cookies.txt
356 356 format."
357 357
358 358 Note: the cookies parser does not handle port numbers on domains. You
359 359 will need to remove ports from the domain for the cookie to be recognized.
360 360 This could result in a cookie being disclosed to an unwanted server.
361 361
362 362 The cookies file is read-only.
363 363
364 364 Other options in this section are grouped by name and have the following
365 365 format::
366 366
367 367 <name>.<argument> = <value>
368 368
369 369 where ``<name>`` is used to group arguments into authentication
370 370 entries. Example::
371 371
372 372 foo.prefix = hg.intevation.de/mercurial
373 373 foo.username = foo
374 374 foo.password = bar
375 375 foo.schemes = http https
376 376
377 377 bar.prefix = secure.example.org
378 378 bar.key = path/to/file.key
379 379 bar.cert = path/to/file.cert
380 380 bar.schemes = https
381 381
382 382 Supported arguments:
383 383
384 384 ``prefix``
385 385 Either ``*`` or a URI prefix with or without the scheme part.
386 386 The authentication entry with the longest matching prefix is used
387 387 (where ``*`` matches everything and counts as a match of length
388 388 1). If the prefix doesn't include a scheme, the match is performed
389 389 against the URI with its scheme stripped as well, and the schemes
390 390 argument, q.v., is then subsequently consulted.
391 391
392 392 ``username``
393 393 Optional. Username to authenticate with. If not given, and the
394 394 remote site requires basic or digest authentication, the user will
395 395 be prompted for it. Environment variables are expanded in the
396 396 username letting you do ``foo.username = $USER``. If the URI
397 397 includes a username, only ``[auth]`` entries with a matching
398 398 username or without a username will be considered.
399 399
400 400 ``password``
401 401 Optional. Password to authenticate with. If not given, and the
402 402 remote site requires basic or digest authentication, the user
403 403 will be prompted for it.
404 404
405 405 ``key``
406 406 Optional. PEM encoded client certificate key file. Environment
407 407 variables are expanded in the filename.
408 408
409 409 ``cert``
410 410 Optional. PEM encoded client certificate chain file. Environment
411 411 variables are expanded in the filename.
412 412
413 413 ``schemes``
414 414 Optional. Space separated list of URI schemes to use this
415 415 authentication entry with. Only used if the prefix doesn't include
416 416 a scheme. Supported schemes are http and https. They will match
417 417 static-http and static-https respectively, as well.
418 418 (default: https)
419 419
420 420 If no suitable authentication entry is found, the user is prompted
421 421 for credentials as usual if required by the remote.
422 422
423 423 ``cmdserver``
424 424 -------------
425 425
426 426 Controls command server settings. (ADVANCED)
427 427
428 428 ``message-encodings``
429 429 List of encodings for the ``m`` (message) channel. The first encoding
430 430 supported by the server will be selected and advertised in the hello
431 431 message. This is useful only when ``ui.message-output`` is set to
432 432 ``channel``. Supported encodings are ``cbor``.
433 433
434 434 ``shutdown-on-interrupt``
435 435 If set to false, the server's main loop will continue running after
436 436 SIGINT received. ``runcommand`` requests can still be interrupted by
437 437 SIGINT. Close the write end of the pipe to shut down the server
438 438 process gracefully.
439 439 (default: True)
440 440
441 441 ``color``
442 442 ---------
443 443
444 444 Configure the Mercurial color mode. For details about how to define your custom
445 445 effect and style see :hg:`help color`.
446 446
447 447 ``mode``
448 448 String: control the method used to output color. One of ``auto``, ``ansi``,
449 449 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
450 450 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
451 451 terminal. Any invalid value will disable color.
452 452
453 453 ``pagermode``
454 454 String: optional override of ``color.mode`` used with pager.
455 455
456 456 On some systems, terminfo mode may cause problems when using
457 457 color with ``less -R`` as a pager program. less with the -R option
458 458 will only display ECMA-48 color codes, and terminfo mode may sometimes
459 459 emit codes that less doesn't understand. You can work around this by
460 460 either using ansi mode (or auto mode), or by using less -r (which will
461 461 pass through all terminal control codes, not just color control
462 462 codes).
463 463
464 464 On some systems (such as MSYS in Windows), the terminal may support
465 465 a different color mode than the pager program.
466 466
467 467 ``commands``
468 468 ------------
469 469
470 470 ``commit.post-status``
471 471 Show status of files in the working directory after successful commit.
472 472 (default: False)
473 473
474 474 ``merge.require-rev``
475 475 Require that the revision to merge the current commit with be specified on
476 476 the command line. If this is enabled and a revision is not specified, the
477 477 command aborts.
478 478 (default: False)
479 479
480 480 ``push.require-revs``
481 481 Require revisions to push be specified using one or more mechanisms such as
482 482 specifying them positionally on the command line, using ``-r``, ``-b``,
483 483 and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
484 484 configuration. If this is enabled and revisions are not specified, the
485 485 command aborts.
486 486 (default: False)
487 487
488 488 ``resolve.confirm``
489 489 Confirm before performing action if no filename is passed.
490 490 (default: False)
491 491
492 492 ``resolve.explicit-re-merge``
493 493 Require uses of ``hg resolve`` to specify which action it should perform,
494 494 instead of re-merging files by default.
495 495 (default: False)
496 496
497 497 ``resolve.mark-check``
498 498 Determines what level of checking :hg:`resolve --mark` will perform before
499 499 marking files as resolved. Valid values are ``none`, ``warn``, and
500 500 ``abort``. ``warn`` will output a warning listing the file(s) that still
501 501 have conflict markers in them, but will still mark everything resolved.
502 502 ``abort`` will output the same warning but will not mark things as resolved.
503 503 If --all is passed and this is set to ``abort``, only a warning will be
504 504 shown (an error will not be raised).
505 505 (default: ``none``)
506 506
507 507 ``status.relative``
508 508 Make paths in :hg:`status` output relative to the current directory.
509 509 (default: False)
510 510
511 511 ``status.terse``
512 512 Default value for the --terse flag, which condenses status output.
513 513 (default: empty)
514 514
515 515 ``update.check``
516 516 Determines what level of checking :hg:`update` will perform before moving
517 517 to a destination revision. Valid values are ``abort``, ``none``,
518 518 ``linear``, and ``noconflict``. ``abort`` always fails if the working
519 519 directory has uncommitted changes. ``none`` performs no checking, and may
520 520 result in a merge with uncommitted changes. ``linear`` allows any update
521 521 as long as it follows a straight line in the revision history, and may
522 522 trigger a merge with uncommitted changes. ``noconflict`` will allow any
523 523 update which would not trigger a merge with uncommitted changes, if any
524 524 are present.
525 525 (default: ``linear``)
526 526
527 527 ``update.requiredest``
528 528 Require that the user pass a destination when running :hg:`update`.
529 529 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
530 530 will be disallowed.
531 531 (default: False)
532 532
533 533 ``committemplate``
534 534 ------------------
535 535
536 536 ``changeset``
537 537 String: configuration in this section is used as the template to
538 538 customize the text shown in the editor when committing.
539 539
540 540 In addition to pre-defined template keywords, commit log specific one
541 541 below can be used for customization:
542 542
543 543 ``extramsg``
544 544 String: Extra message (typically 'Leave message empty to abort
545 545 commit.'). This may be changed by some commands or extensions.
546 546
547 547 For example, the template configuration below shows as same text as
548 548 one shown by default::
549 549
550 550 [committemplate]
551 551 changeset = {desc}\n\n
552 552 HG: Enter commit message. Lines beginning with 'HG:' are removed.
553 553 HG: {extramsg}
554 554 HG: --
555 555 HG: user: {author}\n{ifeq(p2rev, "-1", "",
556 556 "HG: branch merge\n")
557 557 }HG: branch '{branch}'\n{if(activebookmark,
558 558 "HG: bookmark '{activebookmark}'\n") }{subrepos %
559 559 "HG: subrepo {subrepo}\n" }{file_adds %
560 560 "HG: added {file}\n" }{file_mods %
561 561 "HG: changed {file}\n" }{file_dels %
562 562 "HG: removed {file}\n" }{if(files, "",
563 563 "HG: no files changed\n")}
564 564
565 565 ``diff()``
566 566 String: show the diff (see :hg:`help templates` for detail)
567 567
568 568 Sometimes it is helpful to show the diff of the changeset in the editor without
569 569 having to prefix 'HG: ' to each line so that highlighting works correctly. For
570 570 this, Mercurial provides a special string which will ignore everything below
571 571 it::
572 572
573 573 HG: ------------------------ >8 ------------------------
574 574
575 575 For example, the template configuration below will show the diff below the
576 576 extra message::
577 577
578 578 [committemplate]
579 579 changeset = {desc}\n\n
580 580 HG: Enter commit message. Lines beginning with 'HG:' are removed.
581 581 HG: {extramsg}
582 582 HG: ------------------------ >8 ------------------------
583 583 HG: Do not touch the line above.
584 584 HG: Everything below will be removed.
585 585 {diff()}
586 586
587 587 .. note::
588 588
589 589 For some problematic encodings (see :hg:`help win32mbcs` for
590 590 detail), this customization should be configured carefully, to
591 591 avoid showing broken characters.
592 592
593 593 For example, if a multibyte character ending with backslash (0x5c) is
594 594 followed by the ASCII character 'n' in the customized template,
595 595 the sequence of backslash and 'n' is treated as line-feed unexpectedly
596 596 (and the multibyte character is broken, too).
597 597
598 598 Customized template is used for commands below (``--edit`` may be
599 599 required):
600 600
601 601 - :hg:`backout`
602 602 - :hg:`commit`
603 603 - :hg:`fetch` (for merge commit only)
604 604 - :hg:`graft`
605 605 - :hg:`histedit`
606 606 - :hg:`import`
607 607 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
608 608 - :hg:`rebase`
609 609 - :hg:`shelve`
610 610 - :hg:`sign`
611 611 - :hg:`tag`
612 612 - :hg:`transplant`
613 613
614 614 Configuring items below instead of ``changeset`` allows showing
615 615 customized message only for specific actions, or showing different
616 616 messages for each action.
617 617
618 618 - ``changeset.backout`` for :hg:`backout`
619 619 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
620 620 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
621 621 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
622 622 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
623 623 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
624 624 - ``changeset.gpg.sign`` for :hg:`sign`
625 625 - ``changeset.graft`` for :hg:`graft`
626 626 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
627 627 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
628 628 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
629 629 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
630 630 - ``changeset.import.bypass`` for :hg:`import --bypass`
631 631 - ``changeset.import.normal.merge`` for :hg:`import` on merges
632 632 - ``changeset.import.normal.normal`` for :hg:`import` on other
633 633 - ``changeset.mq.qnew`` for :hg:`qnew`
634 634 - ``changeset.mq.qfold`` for :hg:`qfold`
635 635 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
636 636 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
637 637 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
638 638 - ``changeset.rebase.normal`` for :hg:`rebase` on other
639 639 - ``changeset.shelve.shelve`` for :hg:`shelve`
640 640 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
641 641 - ``changeset.tag.remove`` for :hg:`tag --remove`
642 642 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
643 643 - ``changeset.transplant.normal`` for :hg:`transplant` on other
644 644
645 645 These dot-separated lists of names are treated as hierarchical ones.
646 646 For example, ``changeset.tag.remove`` customizes the commit message
647 647 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
648 648 commit message for :hg:`tag` regardless of ``--remove`` option.
649 649
650 650 When the external editor is invoked for a commit, the corresponding
651 651 dot-separated list of names without the ``changeset.`` prefix
652 652 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
653 653 variable.
654 654
655 655 In this section, items other than ``changeset`` can be referred from
656 656 others. For example, the configuration to list committed files up
657 657 below can be referred as ``{listupfiles}``::
658 658
659 659 [committemplate]
660 660 listupfiles = {file_adds %
661 661 "HG: added {file}\n" }{file_mods %
662 662 "HG: changed {file}\n" }{file_dels %
663 663 "HG: removed {file}\n" }{if(files, "",
664 664 "HG: no files changed\n")}
665 665
666 666 ``decode/encode``
667 667 -----------------
668 668
669 669 Filters for transforming files on checkout/checkin. This would
670 670 typically be used for newline processing or other
671 671 localization/canonicalization of files.
672 672
673 673 Filters consist of a filter pattern followed by a filter command.
674 674 Filter patterns are globs by default, rooted at the repository root.
675 675 For example, to match any file ending in ``.txt`` in the root
676 676 directory only, use the pattern ``*.txt``. To match any file ending
677 677 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
678 678 For each file only the first matching filter applies.
679 679
680 680 The filter command can start with a specifier, either ``pipe:`` or
681 681 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
682 682
683 683 A ``pipe:`` command must accept data on stdin and return the transformed
684 684 data on stdout.
685 685
686 686 Pipe example::
687 687
688 688 [encode]
689 689 # uncompress gzip files on checkin to improve delta compression
690 690 # note: not necessarily a good idea, just an example
691 691 *.gz = pipe: gunzip
692 692
693 693 [decode]
694 694 # recompress gzip files when writing them to the working dir (we
695 695 # can safely omit "pipe:", because it's the default)
696 696 *.gz = gzip
697 697
698 698 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
699 699 with the name of a temporary file that contains the data to be
700 700 filtered by the command. The string ``OUTFILE`` is replaced with the name
701 701 of an empty temporary file, where the filtered data must be written by
702 702 the command.
703 703
704 704 .. container:: windows
705 705
706 706 .. note::
707 707
708 708 The tempfile mechanism is recommended for Windows systems,
709 709 where the standard shell I/O redirection operators often have
710 710 strange effects and may corrupt the contents of your files.
711 711
712 712 This filter mechanism is used internally by the ``eol`` extension to
713 713 translate line ending characters between Windows (CRLF) and Unix (LF)
714 714 format. We suggest you use the ``eol`` extension for convenience.
715 715
716 716
717 717 ``defaults``
718 718 ------------
719 719
720 720 (defaults are deprecated. Don't use them. Use aliases instead.)
721 721
722 722 Use the ``[defaults]`` section to define command defaults, i.e. the
723 723 default options/arguments to pass to the specified commands.
724 724
725 725 The following example makes :hg:`log` run in verbose mode, and
726 726 :hg:`status` show only the modified files, by default::
727 727
728 728 [defaults]
729 729 log = -v
730 730 status = -m
731 731
732 732 The actual commands, instead of their aliases, must be used when
733 733 defining command defaults. The command defaults will also be applied
734 734 to the aliases of the commands defined.
735 735
736 736
737 737 ``diff``
738 738 --------
739 739
740 740 Settings used when displaying diffs. Everything except for ``unified``
741 741 is a Boolean and defaults to False. See :hg:`help config.annotate`
742 742 for related options for the annotate command.
743 743
744 744 ``git``
745 745 Use git extended diff format.
746 746
747 747 ``nobinary``
748 748 Omit git binary patches.
749 749
750 750 ``nodates``
751 751 Don't include dates in diff headers.
752 752
753 753 ``noprefix``
754 754 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
755 755
756 756 ``showfunc``
757 757 Show which function each change is in.
758 758
759 759 ``ignorews``
760 760 Ignore white space when comparing lines.
761 761
762 762 ``ignorewsamount``
763 763 Ignore changes in the amount of white space.
764 764
765 765 ``ignoreblanklines``
766 766 Ignore changes whose lines are all blank.
767 767
768 768 ``unified``
769 769 Number of lines of context to show.
770 770
771 771 ``word-diff``
772 772 Highlight changed words.
773 773
774 774 ``email``
775 775 ---------
776 776
777 777 Settings for extensions that send email messages.
778 778
779 779 ``from``
780 780 Optional. Email address to use in "From" header and SMTP envelope
781 781 of outgoing messages.
782 782
783 783 ``to``
784 784 Optional. Comma-separated list of recipients' email addresses.
785 785
786 786 ``cc``
787 787 Optional. Comma-separated list of carbon copy recipients'
788 788 email addresses.
789 789
790 790 ``bcc``
791 791 Optional. Comma-separated list of blind carbon copy recipients'
792 792 email addresses.
793 793
794 794 ``method``
795 795 Optional. Method to use to send email messages. If value is ``smtp``
796 796 (default), use SMTP (see the ``[smtp]`` section for configuration).
797 797 Otherwise, use as name of program to run that acts like sendmail
798 798 (takes ``-f`` option for sender, list of recipients on command line,
799 799 message on stdin). Normally, setting this to ``sendmail`` or
800 800 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
801 801
802 802 ``charsets``
803 803 Optional. Comma-separated list of character sets considered
804 804 convenient for recipients. Addresses, headers, and parts not
805 805 containing patches of outgoing messages will be encoded in the
806 806 first character set to which conversion from local encoding
807 807 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
808 808 conversion fails, the text in question is sent as is.
809 809 (default: '')
810 810
811 811 Order of outgoing email character sets:
812 812
813 813 1. ``us-ascii``: always first, regardless of settings
814 814 2. ``email.charsets``: in order given by user
815 815 3. ``ui.fallbackencoding``: if not in email.charsets
816 816 4. ``$HGENCODING``: if not in email.charsets
817 817 5. ``utf-8``: always last, regardless of settings
818 818
819 819 Email example::
820 820
821 821 [email]
822 822 from = Joseph User <joe.user@example.com>
823 823 method = /usr/sbin/sendmail
824 824 # charsets for western Europeans
825 825 # us-ascii, utf-8 omitted, as they are tried first and last
826 826 charsets = iso-8859-1, iso-8859-15, windows-1252
827 827
828 828
829 829 ``extensions``
830 830 --------------
831 831
832 832 Mercurial has an extension mechanism for adding new features. To
833 833 enable an extension, create an entry for it in this section.
834 834
835 835 If you know that the extension is already in Python's search path,
836 836 you can give the name of the module, followed by ``=``, with nothing
837 837 after the ``=``.
838 838
839 839 Otherwise, give a name that you choose, followed by ``=``, followed by
840 840 the path to the ``.py`` file (including the file name extension) that
841 841 defines the extension.
842 842
843 843 To explicitly disable an extension that is enabled in an hgrc of
844 844 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
845 845 or ``foo = !`` when path is not supplied.
846 846
847 847 Example for ``~/.hgrc``::
848 848
849 849 [extensions]
850 850 # (the churn extension will get loaded from Mercurial's path)
851 851 churn =
852 852 # (this extension will get loaded from the file specified)
853 853 myfeature = ~/.hgext/myfeature.py
854 854
855 855
856 856 ``format``
857 857 ----------
858 858
859 859 Configuration that controls the repository format. Newer format options are more
860 860 powerful, but incompatible with some older versions of Mercurial. Format options
861 861 are considered at repository initialization only. You need to make a new clone
862 862 for config changes to be taken into account.
863 863
864 864 For more details about repository format and version compatibility, see
865 865 https://www.mercurial-scm.org/wiki/MissingRequirement
866 866
867 867 ``usegeneraldelta``
868 868 Enable or disable the "generaldelta" repository format which improves
869 869 repository compression by allowing "revlog" to store deltas against
870 870 arbitrary revisions instead of the previously stored one. This provides
871 871 significant improvement for repositories with branches.
872 872
873 873 Repositories with this on-disk format require Mercurial version 1.9.
874 874
875 875 Enabled by default.
876 876
877 877 ``dotencode``
878 878 Enable or disable the "dotencode" repository format which enhances
879 879 the "fncache" repository format (which has to be enabled to use
880 880 dotencode) to avoid issues with filenames starting with "._" on
881 881 Mac OS X and spaces on Windows.
882 882
883 883 Repositories with this on-disk format require Mercurial version 1.7.
884 884
885 885 Enabled by default.
886 886
887 887 ``usefncache``
888 888 Enable or disable the "fncache" repository format which enhances
889 889 the "store" repository format (which has to be enabled to use
890 890 fncache) to allow longer filenames and avoids using Windows
891 891 reserved names, e.g. "nul".
892 892
893 893 Repositories with this on-disk format require Mercurial version 1.1.
894 894
895 895 Enabled by default.
896 896
897 897 ``use-persistent-nodemap``
898 898 Enable or disable the "persistent-nodemap" feature which improves
899 899 performance if the rust extensions are available.
900 900
901 901 The "persistence-nodemap" persist the "node -> rev" on disk removing the
902 902 need to dynamically build that mapping for each Mercurial invocation. This
903 903 significantly reduce the startup cost of various local and server-side
904 904 operation for larger repository.
905 905
906 906 The performance improving version of this feature is currently only
907 907 implemented in Rust, so people using a version of Mercurial compiled
908 908 without the Rust part might actually suffer some slowdown.
909 909
910 910 Repository with this on-disk format require Mercurial version 5.4 or above.
911 911
912 912 Disabled by default.
913 913
914 914 ``usestore``
915 915 Enable or disable the "store" repository format which improves
916 916 compatibility with systems that fold case or otherwise mangle
917 917 filenames. Disabling this option will allow you to store longer filenames
918 918 in some situations at the expense of compatibility.
919 919
920 920 Repositories with this on-disk format require Mercurial version 0.9.4.
921 921
922 922 Enabled by default.
923 923
924 924 ``sparse-revlog``
925 925 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
926 926 delta re-use inside revlog. For very branchy repositories, it results in a
927 927 smaller store. For repositories with many revisions, it also helps
928 928 performance (by using shortened delta chains.)
929 929
930 930 Repositories with this on-disk format require Mercurial version 4.7
931 931
932 932 Enabled by default.
933 933
934 934 ``revlog-compression``
935 935 Compression algorithm used by revlog. Supported values are `zlib` and
936 936 `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
937 937 a newer format that is usually a net win over `zlib`, operating faster at
938 938 better compression rates. Use `zstd` to reduce CPU usage. Multiple values
939 939 can be specified, the first available one will be used.
940 940
941 941 On some systems, the Mercurial installation may lack `zstd` support.
942 942
943 943 Default is `zlib`.
944 944
945 945 ``bookmarks-in-store``
946 946 Store bookmarks in .hg/store/. This means that bookmarks are shared when
947 947 using `hg share` regardless of the `-B` option.
948 948
949 949 Repositories with this on-disk format require Mercurial version 5.1.
950 950
951 951 Disabled by default.
952 952
953 953
954 954 ``graph``
955 955 ---------
956 956
957 957 Web graph view configuration. This section let you change graph
958 958 elements display properties by branches, for instance to make the
959 959 ``default`` branch stand out.
960 960
961 961 Each line has the following format::
962 962
963 963 <branch>.<argument> = <value>
964 964
965 965 where ``<branch>`` is the name of the branch being
966 966 customized. Example::
967 967
968 968 [graph]
969 969 # 2px width
970 970 default.width = 2
971 971 # red color
972 972 default.color = FF0000
973 973
974 974 Supported arguments:
975 975
976 976 ``width``
977 977 Set branch edges width in pixels.
978 978
979 979 ``color``
980 980 Set branch edges color in hexadecimal RGB notation.
981 981
982 982 ``hooks``
983 983 ---------
984 984
985 985 Commands or Python functions that get automatically executed by
986 986 various actions such as starting or finishing a commit. Multiple
987 987 hooks can be run for the same action by appending a suffix to the
988 988 action. Overriding a site-wide hook can be done by changing its
989 989 value or setting it to an empty string. Hooks can be prioritized
990 990 by adding a prefix of ``priority.`` to the hook name on a new line
991 991 and setting the priority. The default priority is 0.
992 992
993 993 Example ``.hg/hgrc``::
994 994
995 995 [hooks]
996 996 # update working directory after adding changesets
997 997 changegroup.update = hg update
998 998 # do not use the site-wide hook
999 999 incoming =
1000 1000 incoming.email = /my/email/hook
1001 1001 incoming.autobuild = /my/build/hook
1002 1002 # force autobuild hook to run before other incoming hooks
1003 1003 priority.incoming.autobuild = 1
1004 1004
1005 1005 Most hooks are run with environment variables set that give useful
1006 1006 additional information. For each hook below, the environment variables
1007 1007 it is passed are listed with names in the form ``$HG_foo``. The
1008 1008 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
1009 1009 They contain the type of hook which triggered the run and the full name
1010 1010 of the hook in the config, respectively. In the example above, this will
1011 1011 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
1012 1012
1013 1013 .. container:: windows
1014 1014
1015 1015 Some basic Unix syntax can be enabled for portability, including ``$VAR``
1016 1016 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
1017 1017 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
1018 1018 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
1019 1019 slash or inside of a strong quote. Strong quotes will be replaced by
1020 1020 double quotes after processing.
1021 1021
1022 1022 This feature is enabled by adding a prefix of ``tonative.`` to the hook
1023 1023 name on a new line, and setting it to ``True``. For example::
1024 1024
1025 1025 [hooks]
1026 1026 incoming.autobuild = /my/build/hook
1027 1027 # enable translation to cmd.exe syntax for autobuild hook
1028 1028 tonative.incoming.autobuild = True
1029 1029
1030 1030 ``changegroup``
1031 1031 Run after a changegroup has been added via push, pull or unbundle. The ID of
1032 1032 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
1033 1033 The URL from which changes came is in ``$HG_URL``.
1034 1034
1035 1035 ``commit``
1036 1036 Run after a changeset has been created in the local repository. The ID
1037 1037 of the newly created changeset is in ``$HG_NODE``. Parent changeset
1038 1038 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1039 1039
1040 1040 ``incoming``
1041 1041 Run after a changeset has been pulled, pushed, or unbundled into
1042 1042 the local repository. The ID of the newly arrived changeset is in
1043 1043 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
1044 1044
1045 1045 ``outgoing``
1046 1046 Run after sending changes from the local repository to another. The ID of
1047 1047 first changeset sent is in ``$HG_NODE``. The source of operation is in
1048 1048 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
1049 1049
1050 1050 ``post-<command>``
1051 1051 Run after successful invocations of the associated command. The
1052 1052 contents of the command line are passed as ``$HG_ARGS`` and the result
1053 1053 code in ``$HG_RESULT``. Parsed command line arguments are passed as
1054 1054 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
1055 1055 the python data internally passed to <command>. ``$HG_OPTS`` is a
1056 1056 dictionary of options (with unspecified options set to their defaults).
1057 1057 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
1058 1058
1059 1059 ``fail-<command>``
1060 1060 Run after a failed invocation of an associated command. The contents
1061 1061 of the command line are passed as ``$HG_ARGS``. Parsed command line
1062 1062 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
1063 1063 string representations of the python data internally passed to
1064 1064 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
1065 1065 options set to their defaults). ``$HG_PATS`` is a list of arguments.
1066 1066 Hook failure is ignored.
1067 1067
1068 1068 ``pre-<command>``
1069 1069 Run before executing the associated command. The contents of the
1070 1070 command line are passed as ``$HG_ARGS``. Parsed command line arguments
1071 1071 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
1072 1072 representations of the data internally passed to <command>. ``$HG_OPTS``
1073 1073 is a dictionary of options (with unspecified options set to their
1074 1074 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
1075 1075 failure, the command doesn't execute and Mercurial returns the failure
1076 1076 code.
1077 1077
1078 1078 ``prechangegroup``
1079 1079 Run before a changegroup is added via push, pull or unbundle. Exit
1080 1080 status 0 allows the changegroup to proceed. A non-zero status will
1081 1081 cause the push, pull or unbundle to fail. The URL from which changes
1082 1082 will come is in ``$HG_URL``.
1083 1083
1084 1084 ``precommit``
1085 1085 Run before starting a local commit. Exit status 0 allows the
1086 1086 commit to proceed. A non-zero status will cause the commit to fail.
1087 1087 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1088 1088
1089 1089 ``prelistkeys``
1090 1090 Run before listing pushkeys (like bookmarks) in the
1091 1091 repository. A non-zero status will cause failure. The key namespace is
1092 1092 in ``$HG_NAMESPACE``.
1093 1093
1094 1094 ``preoutgoing``
1095 1095 Run before collecting changes to send from the local repository to
1096 1096 another. A non-zero status will cause failure. This lets you prevent
1097 1097 pull over HTTP or SSH. It can also prevent propagating commits (via
1098 1098 local pull, push (outbound) or bundle commands), but not completely,
1099 1099 since you can just copy files instead. The source of operation is in
1100 1100 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1101 1101 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1102 1102 is happening on behalf of a repository on same system.
1103 1103
1104 1104 ``prepushkey``
1105 1105 Run before a pushkey (like a bookmark) is added to the
1106 1106 repository. A non-zero status will cause the key to be rejected. The
1107 1107 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1108 1108 the old value (if any) is in ``$HG_OLD``, and the new value is in
1109 1109 ``$HG_NEW``.
1110 1110
1111 1111 ``pretag``
1112 1112 Run before creating a tag. Exit status 0 allows the tag to be
1113 1113 created. A non-zero status will cause the tag to fail. The ID of the
1114 1114 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1115 1115 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1116 1116
1117 1117 ``pretxnopen``
1118 1118 Run before any new repository transaction is open. The reason for the
1119 1119 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1120 1120 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1121 1121 transaction from being opened.
1122 1122
1123 1123 ``pretxnclose``
1124 1124 Run right before the transaction is actually finalized. Any repository change
1125 1125 will be visible to the hook program. This lets you validate the transaction
1126 1126 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1127 1127 status will cause the transaction to be rolled back. The reason for the
1128 1128 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1129 1129 the transaction will be in ``HG_TXNID``. The rest of the available data will
1130 1130 vary according the transaction type. New changesets will add ``$HG_NODE``
1131 1131 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1132 1132 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1133 1133 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1134 1134 respectively, etc.
1135 1135
1136 1136 ``pretxnclose-bookmark``
1137 1137 Run right before a bookmark change is actually finalized. Any repository
1138 1138 change will be visible to the hook program. This lets you validate the
1139 1139 transaction content or change it. Exit status 0 allows the commit to
1140 1140 proceed. A non-zero status will cause the transaction to be rolled back.
1141 1141 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1142 1142 bookmark location will be available in ``$HG_NODE`` while the previous
1143 1143 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1144 1144 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1145 1145 will be empty.
1146 1146 In addition, the reason for the transaction opening will be in
1147 1147 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1148 1148 ``HG_TXNID``.
1149 1149
1150 1150 ``pretxnclose-phase``
1151 1151 Run right before a phase change is actually finalized. Any repository change
1152 1152 will be visible to the hook program. This lets you validate the transaction
1153 1153 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1154 1154 status will cause the transaction to be rolled back. The hook is called
1155 1155 multiple times, once for each revision affected by a phase change.
1156 1156 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1157 1157 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1158 1158 will be empty. In addition, the reason for the transaction opening will be in
1159 1159 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1160 1160 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1161 1161 the ``$HG_OLDPHASE`` entry will be empty.
1162 1162
1163 1163 ``txnclose``
1164 1164 Run after any repository transaction has been committed. At this
1165 1165 point, the transaction can no longer be rolled back. The hook will run
1166 1166 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1167 1167 details about available variables.
1168 1168
1169 1169 ``txnclose-bookmark``
1170 1170 Run after any bookmark change has been committed. At this point, the
1171 1171 transaction can no longer be rolled back. The hook will run after the lock
1172 1172 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1173 1173 about available variables.
1174 1174
1175 1175 ``txnclose-phase``
1176 1176 Run after any phase change has been committed. At this point, the
1177 1177 transaction can no longer be rolled back. The hook will run after the lock
1178 1178 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1179 1179 available variables.
1180 1180
1181 1181 ``txnabort``
1182 1182 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1183 1183 for details about available variables.
1184 1184
1185 1185 ``pretxnchangegroup``
1186 1186 Run after a changegroup has been added via push, pull or unbundle, but before
1187 1187 the transaction has been committed. The changegroup is visible to the hook
1188 1188 program. This allows validation of incoming changes before accepting them.
1189 1189 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1190 1190 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1191 1191 status will cause the transaction to be rolled back, and the push, pull or
1192 1192 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1193 1193
1194 1194 ``pretxncommit``
1195 1195 Run after a changeset has been created, but before the transaction is
1196 1196 committed. The changeset is visible to the hook program. This allows
1197 1197 validation of the commit message and changes. Exit status 0 allows the
1198 1198 commit to proceed. A non-zero status will cause the transaction to
1199 1199 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1200 1200 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1201 1201
1202 1202 ``preupdate``
1203 1203 Run before updating the working directory. Exit status 0 allows
1204 1204 the update to proceed. A non-zero status will prevent the update.
1205 1205 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1206 1206 merge, the ID of second new parent is in ``$HG_PARENT2``.
1207 1207
1208 1208 ``listkeys``
1209 1209 Run after listing pushkeys (like bookmarks) in the repository. The
1210 1210 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1211 1211 dictionary containing the keys and values.
1212 1212
1213 1213 ``pushkey``
1214 1214 Run after a pushkey (like a bookmark) is added to the
1215 1215 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1216 1216 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1217 1217 value is in ``$HG_NEW``.
1218 1218
1219 1219 ``tag``
1220 1220 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1221 1221 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1222 1222 the repository if ``$HG_LOCAL=0``.
1223 1223
1224 1224 ``update``
1225 1225 Run after updating the working directory. The changeset ID of first
1226 1226 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1227 1227 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1228 1228 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1229 1229
1230 1230 .. note::
1231 1231
1232 1232 It is generally better to use standard hooks rather than the
1233 1233 generic pre- and post- command hooks, as they are guaranteed to be
1234 1234 called in the appropriate contexts for influencing transactions.
1235 1235 Also, hooks like "commit" will be called in all contexts that
1236 1236 generate a commit (e.g. tag) and not just the commit command.
1237 1237
1238 1238 .. note::
1239 1239
1240 1240 Environment variables with empty values may not be passed to
1241 1241 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1242 1242 will have an empty value under Unix-like platforms for non-merge
1243 1243 changesets, while it will not be available at all under Windows.
1244 1244
1245 1245 The syntax for Python hooks is as follows::
1246 1246
1247 1247 hookname = python:modulename.submodule.callable
1248 1248 hookname = python:/path/to/python/module.py:callable
1249 1249
1250 1250 Python hooks are run within the Mercurial process. Each hook is
1251 1251 called with at least three keyword arguments: a ui object (keyword
1252 1252 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1253 1253 keyword that tells what kind of hook is used. Arguments listed as
1254 1254 environment variables above are passed as keyword arguments, with no
1255 1255 ``HG_`` prefix, and names in lower case.
1256 1256
1257 1257 If a Python hook returns a "true" value or raises an exception, this
1258 1258 is treated as a failure.
1259 1259
1260 1260
1261 1261 ``hostfingerprints``
1262 1262 --------------------
1263 1263
1264 1264 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1265 1265
1266 1266 Fingerprints of the certificates of known HTTPS servers.
1267 1267
1268 1268 A HTTPS connection to a server with a fingerprint configured here will
1269 1269 only succeed if the servers certificate matches the fingerprint.
1270 1270 This is very similar to how ssh known hosts works.
1271 1271
1272 1272 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1273 1273 Multiple values can be specified (separated by spaces or commas). This can
1274 1274 be used to define both old and new fingerprints while a host transitions
1275 1275 to a new certificate.
1276 1276
1277 1277 The CA chain and web.cacerts is not used for servers with a fingerprint.
1278 1278
1279 1279 For example::
1280 1280
1281 1281 [hostfingerprints]
1282 1282 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1283 1283 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1284 1284
1285 1285 ``hostsecurity``
1286 1286 ----------------
1287 1287
1288 1288 Used to specify global and per-host security settings for connecting to
1289 1289 other machines.
1290 1290
1291 1291 The following options control default behavior for all hosts.
1292 1292
1293 1293 ``ciphers``
1294 1294 Defines the cryptographic ciphers to use for connections.
1295 1295
1296 1296 Value must be a valid OpenSSL Cipher List Format as documented at
1297 1297 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1298 1298
1299 1299 This setting is for advanced users only. Setting to incorrect values
1300 1300 can significantly lower connection security or decrease performance.
1301 1301 You have been warned.
1302 1302
1303 1303 This option requires Python 2.7.
1304 1304
1305 1305 ``minimumprotocol``
1306 1306 Defines the minimum channel encryption protocol to use.
1307 1307
1308 1308 By default, the highest version of TLS supported by both client and server
1309 1309 is used.
1310 1310
1311 1311 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1312 1312
1313 1313 When running on an old Python version, only ``tls1.0`` is allowed since
1314 1314 old versions of Python only support up to TLS 1.0.
1315 1315
1316 1316 When running a Python that supports modern TLS versions, the default is
1317 1317 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1318 1318 weakens security and should only be used as a feature of last resort if
1319 1319 a server does not support TLS 1.1+.
1320 1320
1321 1321 Options in the ``[hostsecurity]`` section can have the form
1322 1322 ``hostname``:``setting``. This allows multiple settings to be defined on a
1323 1323 per-host basis.
1324 1324
1325 1325 The following per-host settings can be defined.
1326 1326
1327 1327 ``ciphers``
1328 1328 This behaves like ``ciphers`` as described above except it only applies
1329 1329 to the host on which it is defined.
1330 1330
1331 1331 ``fingerprints``
1332 1332 A list of hashes of the DER encoded peer/remote certificate. Values have
1333 1333 the form ``algorithm``:``fingerprint``. e.g.
1334 1334 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1335 1335 In addition, colons (``:``) can appear in the fingerprint part.
1336 1336
1337 1337 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1338 1338 ``sha512``.
1339 1339
1340 1340 Use of ``sha256`` or ``sha512`` is preferred.
1341 1341
1342 1342 If a fingerprint is specified, the CA chain is not validated for this
1343 1343 host and Mercurial will require the remote certificate to match one
1344 1344 of the fingerprints specified. This means if the server updates its
1345 1345 certificate, Mercurial will abort until a new fingerprint is defined.
1346 1346 This can provide stronger security than traditional CA-based validation
1347 1347 at the expense of convenience.
1348 1348
1349 1349 This option takes precedence over ``verifycertsfile``.
1350 1350
1351 1351 ``minimumprotocol``
1352 1352 This behaves like ``minimumprotocol`` as described above except it
1353 1353 only applies to the host on which it is defined.
1354 1354
1355 1355 ``verifycertsfile``
1356 1356 Path to file a containing a list of PEM encoded certificates used to
1357 1357 verify the server certificate. Environment variables and ``~user``
1358 1358 constructs are expanded in the filename.
1359 1359
1360 1360 The server certificate or the certificate's certificate authority (CA)
1361 1361 must match a certificate from this file or certificate verification
1362 1362 will fail and connections to the server will be refused.
1363 1363
1364 1364 If defined, only certificates provided by this file will be used:
1365 1365 ``web.cacerts`` and any system/default certificates will not be
1366 1366 used.
1367 1367
1368 1368 This option has no effect if the per-host ``fingerprints`` option
1369 1369 is set.
1370 1370
1371 1371 The format of the file is as follows::
1372 1372
1373 1373 -----BEGIN CERTIFICATE-----
1374 1374 ... (certificate in base64 PEM encoding) ...
1375 1375 -----END CERTIFICATE-----
1376 1376 -----BEGIN CERTIFICATE-----
1377 1377 ... (certificate in base64 PEM encoding) ...
1378 1378 -----END CERTIFICATE-----
1379 1379
1380 1380 For example::
1381 1381
1382 1382 [hostsecurity]
1383 1383 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1384 1384 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1385 1385 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1386 1386 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1387 1387
1388 1388 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1389 1389 when connecting to ``hg.example.com``::
1390 1390
1391 1391 [hostsecurity]
1392 1392 minimumprotocol = tls1.2
1393 1393 hg.example.com:minimumprotocol = tls1.1
1394 1394
1395 1395 ``http_proxy``
1396 1396 --------------
1397 1397
1398 1398 Used to access web-based Mercurial repositories through a HTTP
1399 1399 proxy.
1400 1400
1401 1401 ``host``
1402 1402 Host name and (optional) port of the proxy server, for example
1403 1403 "myproxy:8000".
1404 1404
1405 1405 ``no``
1406 1406 Optional. Comma-separated list of host names that should bypass
1407 1407 the proxy.
1408 1408
1409 1409 ``passwd``
1410 1410 Optional. Password to authenticate with at the proxy server.
1411 1411
1412 1412 ``user``
1413 1413 Optional. User name to authenticate with at the proxy server.
1414 1414
1415 1415 ``always``
1416 1416 Optional. Always use the proxy, even for localhost and any entries
1417 1417 in ``http_proxy.no``. (default: False)
1418 1418
1419 1419 ``http``
1420 1420 ----------
1421 1421
1422 1422 Used to configure access to Mercurial repositories via HTTP.
1423 1423
1424 1424 ``timeout``
1425 1425 If set, blocking operations will timeout after that many seconds.
1426 1426 (default: None)
1427 1427
1428 1428 ``merge``
1429 1429 ---------
1430 1430
1431 1431 This section specifies behavior during merges and updates.
1432 1432
1433 1433 ``checkignored``
1434 1434 Controls behavior when an ignored file on disk has the same name as a tracked
1435 1435 file in the changeset being merged or updated to, and has different
1436 1436 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1437 1437 abort on such files. With ``warn``, warn on such files and back them up as
1438 1438 ``.orig``. With ``ignore``, don't print a warning and back them up as
1439 1439 ``.orig``. (default: ``abort``)
1440 1440
1441 1441 ``checkunknown``
1442 1442 Controls behavior when an unknown file that isn't ignored has the same name
1443 1443 as a tracked file in the changeset being merged or updated to, and has
1444 1444 different contents. Similar to ``merge.checkignored``, except for files that
1445 1445 are not ignored. (default: ``abort``)
1446 1446
1447 1447 ``on-failure``
1448 1448 When set to ``continue`` (the default), the merge process attempts to
1449 1449 merge all unresolved files using the merge chosen tool, regardless of
1450 1450 whether previous file merge attempts during the process succeeded or not.
1451 1451 Setting this to ``prompt`` will prompt after any merge failure continue
1452 1452 or halt the merge process. Setting this to ``halt`` will automatically
1453 1453 halt the merge process on any merge tool failure. The merge process
1454 1454 can be restarted by using the ``resolve`` command. When a merge is
1455 1455 halted, the repository is left in a normal ``unresolved`` merge state.
1456 1456 (default: ``continue``)
1457 1457
1458 1458 ``strict-capability-check``
1459 1459 Whether capabilities of internal merge tools are checked strictly
1460 1460 or not, while examining rules to decide merge tool to be used.
1461 1461 (default: False)
1462 1462
1463 1463 ``merge-patterns``
1464 1464 ------------------
1465 1465
1466 1466 This section specifies merge tools to associate with particular file
1467 1467 patterns. Tools matched here will take precedence over the default
1468 1468 merge tool. Patterns are globs by default, rooted at the repository
1469 1469 root.
1470 1470
1471 1471 Example::
1472 1472
1473 1473 [merge-patterns]
1474 1474 **.c = kdiff3
1475 1475 **.jpg = myimgmerge
1476 1476
1477 1477 ``merge-tools``
1478 1478 ---------------
1479 1479
1480 1480 This section configures external merge tools to use for file-level
1481 1481 merges. This section has likely been preconfigured at install time.
1482 1482 Use :hg:`config merge-tools` to check the existing configuration.
1483 1483 Also see :hg:`help merge-tools` for more details.
1484 1484
1485 1485 Example ``~/.hgrc``::
1486 1486
1487 1487 [merge-tools]
1488 1488 # Override stock tool location
1489 1489 kdiff3.executable = ~/bin/kdiff3
1490 1490 # Specify command line
1491 1491 kdiff3.args = $base $local $other -o $output
1492 1492 # Give higher priority
1493 1493 kdiff3.priority = 1
1494 1494
1495 1495 # Changing the priority of preconfigured tool
1496 1496 meld.priority = 0
1497 1497
1498 1498 # Disable a preconfigured tool
1499 1499 vimdiff.disabled = yes
1500 1500
1501 1501 # Define new tool
1502 1502 myHtmlTool.args = -m $local $other $base $output
1503 1503 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1504 1504 myHtmlTool.priority = 1
1505 1505
1506 1506 Supported arguments:
1507 1507
1508 1508 ``priority``
1509 1509 The priority in which to evaluate this tool.
1510 1510 (default: 0)
1511 1511
1512 1512 ``executable``
1513 1513 Either just the name of the executable or its pathname.
1514 1514
1515 1515 .. container:: windows
1516 1516
1517 1517 On Windows, the path can use environment variables with ${ProgramFiles}
1518 1518 syntax.
1519 1519
1520 1520 (default: the tool name)
1521 1521
1522 1522 ``args``
1523 1523 The arguments to pass to the tool executable. You can refer to the
1524 1524 files being merged as well as the output file through these
1525 1525 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1526 1526
1527 1527 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1528 1528 being performed. During an update or merge, ``$local`` represents the original
1529 1529 state of the file, while ``$other`` represents the commit you are updating to or
1530 1530 the commit you are merging with. During a rebase, ``$local`` represents the
1531 1531 destination of the rebase, and ``$other`` represents the commit being rebased.
1532 1532
1533 1533 Some operations define custom labels to assist with identifying the revisions,
1534 1534 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1535 1535 labels are not available, these will be ``local``, ``other``, and ``base``,
1536 1536 respectively.
1537 1537 (default: ``$local $base $other``)
1538 1538
1539 1539 ``premerge``
1540 1540 Attempt to run internal non-interactive 3-way merge tool before
1541 1541 launching external tool. Options are ``true``, ``false``, ``keep``,
1542 1542 ``keep-merge3``, or ``keep-mergediff`` (experimental). The ``keep`` option
1543 1543 will leave markers in the file if the premerge fails. The ``keep-merge3``
1544 1544 will do the same but include information about the base of the merge in the
1545 1545 marker (see internal :merge3 in :hg:`help merge-tools`). The
1546 1546 ``keep-mergediff`` option is similar but uses a different marker style
1547 1547 (see internal :merge3 in :hg:`help merge-tools`). (default: True)
1548 1548
1549 1549 ``binary``
1550 1550 This tool can merge binary files. (default: False, unless tool
1551 1551 was selected by file pattern match)
1552 1552
1553 1553 ``symlink``
1554 1554 This tool can merge symlinks. (default: False)
1555 1555
1556 1556 ``check``
1557 1557 A list of merge success-checking options:
1558 1558
1559 1559 ``changed``
1560 1560 Ask whether merge was successful when the merged file shows no changes.
1561 1561 ``conflicts``
1562 1562 Check whether there are conflicts even though the tool reported success.
1563 1563 ``prompt``
1564 1564 Always prompt for merge success, regardless of success reported by tool.
1565 1565
1566 1566 ``fixeol``
1567 1567 Attempt to fix up EOL changes caused by the merge tool.
1568 1568 (default: False)
1569 1569
1570 1570 ``gui``
1571 1571 This tool requires a graphical interface to run. (default: False)
1572 1572
1573 1573 ``mergemarkers``
1574 1574 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1575 1575 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1576 1576 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1577 1577 markers generated during premerge will be ``detailed`` if either this option or
1578 1578 the corresponding option in the ``[ui]`` section is ``detailed``.
1579 1579 (default: ``basic``)
1580 1580
1581 1581 ``mergemarkertemplate``
1582 1582 This setting can be used to override ``mergemarker`` from the
1583 1583 ``[command-templates]`` section on a per-tool basis; this applies to the
1584 1584 ``$label``-prefixed variables and to the conflict markers that are generated
1585 1585 if ``premerge`` is ``keep` or ``keep-merge3``. See the corresponding variable
1586 1586 in ``[ui]`` for more information.
1587 1587
1588 1588 .. container:: windows
1589 1589
1590 1590 ``regkey``
1591 1591 Windows registry key which describes install location of this
1592 1592 tool. Mercurial will search for this key first under
1593 1593 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1594 1594 (default: None)
1595 1595
1596 1596 ``regkeyalt``
1597 1597 An alternate Windows registry key to try if the first key is not
1598 1598 found. The alternate key uses the same ``regname`` and ``regappend``
1599 1599 semantics of the primary key. The most common use for this key
1600 1600 is to search for 32bit applications on 64bit operating systems.
1601 1601 (default: None)
1602 1602
1603 1603 ``regname``
1604 1604 Name of value to read from specified registry key.
1605 1605 (default: the unnamed (default) value)
1606 1606
1607 1607 ``regappend``
1608 1608 String to append to the value read from the registry, typically
1609 1609 the executable name of the tool.
1610 1610 (default: None)
1611 1611
1612 1612 ``pager``
1613 1613 ---------
1614 1614
1615 1615 Setting used to control when to paginate and with what external tool. See
1616 1616 :hg:`help pager` for details.
1617 1617
1618 1618 ``pager``
1619 1619 Define the external tool used as pager.
1620 1620
1621 1621 If no pager is set, Mercurial uses the environment variable $PAGER.
1622 1622 If neither pager.pager, nor $PAGER is set, a default pager will be
1623 1623 used, typically `less` on Unix and `more` on Windows. Example::
1624 1624
1625 1625 [pager]
1626 1626 pager = less -FRX
1627 1627
1628 1628 ``ignore``
1629 1629 List of commands to disable the pager for. Example::
1630 1630
1631 1631 [pager]
1632 1632 ignore = version, help, update
1633 1633
1634 1634 ``patch``
1635 1635 ---------
1636 1636
1637 1637 Settings used when applying patches, for instance through the 'import'
1638 1638 command or with Mercurial Queues extension.
1639 1639
1640 1640 ``eol``
1641 1641 When set to 'strict' patch content and patched files end of lines
1642 1642 are preserved. When set to ``lf`` or ``crlf``, both files end of
1643 1643 lines are ignored when patching and the result line endings are
1644 1644 normalized to either LF (Unix) or CRLF (Windows). When set to
1645 1645 ``auto``, end of lines are again ignored while patching but line
1646 1646 endings in patched files are normalized to their original setting
1647 1647 on a per-file basis. If target file does not exist or has no end
1648 1648 of line, patch line endings are preserved.
1649 1649 (default: strict)
1650 1650
1651 1651 ``fuzz``
1652 1652 The number of lines of 'fuzz' to allow when applying patches. This
1653 1653 controls how much context the patcher is allowed to ignore when
1654 1654 trying to apply a patch.
1655 1655 (default: 2)
1656 1656
1657 1657 ``paths``
1658 1658 ---------
1659 1659
1660 1660 Assigns symbolic names and behavior to repositories.
1661 1661
1662 1662 Options are symbolic names defining the URL or directory that is the
1663 1663 location of the repository. Example::
1664 1664
1665 1665 [paths]
1666 1666 my_server = https://example.com/my_repo
1667 1667 local_path = /home/me/repo
1668 1668
1669 1669 These symbolic names can be used from the command line. To pull
1670 1670 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1671 1671 :hg:`push local_path`.
1672 1672
1673 1673 Options containing colons (``:``) denote sub-options that can influence
1674 1674 behavior for that specific path. Example::
1675 1675
1676 1676 [paths]
1677 1677 my_server = https://example.com/my_path
1678 1678 my_server:pushurl = ssh://example.com/my_path
1679 1679
1680 1680 The following sub-options can be defined:
1681 1681
1682 1682 ``pushurl``
1683 1683 The URL to use for push operations. If not defined, the location
1684 1684 defined by the path's main entry is used.
1685 1685
1686 1686 ``pushrev``
1687 1687 A revset defining which revisions to push by default.
1688 1688
1689 1689 When :hg:`push` is executed without a ``-r`` argument, the revset
1690 1690 defined by this sub-option is evaluated to determine what to push.
1691 1691
1692 1692 For example, a value of ``.`` will push the working directory's
1693 1693 revision by default.
1694 1694
1695 1695 Revsets specifying bookmarks will not result in the bookmark being
1696 1696 pushed.
1697 1697
1698 1698 The following special named paths exist:
1699 1699
1700 1700 ``default``
1701 1701 The URL or directory to use when no source or remote is specified.
1702 1702
1703 1703 :hg:`clone` will automatically define this path to the location the
1704 1704 repository was cloned from.
1705 1705
1706 1706 ``default-push``
1707 1707 (deprecated) The URL or directory for the default :hg:`push` location.
1708 1708 ``default:pushurl`` should be used instead.
1709 1709
1710 1710 ``phases``
1711 1711 ----------
1712 1712
1713 1713 Specifies default handling of phases. See :hg:`help phases` for more
1714 1714 information about working with phases.
1715 1715
1716 1716 ``publish``
1717 1717 Controls draft phase behavior when working as a server. When true,
1718 1718 pushed changesets are set to public in both client and server and
1719 1719 pulled or cloned changesets are set to public in the client.
1720 1720 (default: True)
1721 1721
1722 1722 ``new-commit``
1723 1723 Phase of newly-created commits.
1724 1724 (default: draft)
1725 1725
1726 1726 ``checksubrepos``
1727 1727 Check the phase of the current revision of each subrepository. Allowed
1728 1728 values are "ignore", "follow" and "abort". For settings other than
1729 1729 "ignore", the phase of the current revision of each subrepository is
1730 1730 checked before committing the parent repository. If any of those phases is
1731 1731 greater than the phase of the parent repository (e.g. if a subrepo is in a
1732 1732 "secret" phase while the parent repo is in "draft" phase), the commit is
1733 1733 either aborted (if checksubrepos is set to "abort") or the higher phase is
1734 1734 used for the parent repository commit (if set to "follow").
1735 1735 (default: follow)
1736 1736
1737 1737
1738 1738 ``profiling``
1739 1739 -------------
1740 1740
1741 1741 Specifies profiling type, format, and file output. Two profilers are
1742 1742 supported: an instrumenting profiler (named ``ls``), and a sampling
1743 1743 profiler (named ``stat``).
1744 1744
1745 1745 In this section description, 'profiling data' stands for the raw data
1746 1746 collected during profiling, while 'profiling report' stands for a
1747 1747 statistical text report generated from the profiling data.
1748 1748
1749 1749 ``enabled``
1750 1750 Enable the profiler.
1751 1751 (default: false)
1752 1752
1753 1753 This is equivalent to passing ``--profile`` on the command line.
1754 1754
1755 1755 ``type``
1756 1756 The type of profiler to use.
1757 1757 (default: stat)
1758 1758
1759 1759 ``ls``
1760 1760 Use Python's built-in instrumenting profiler. This profiler
1761 1761 works on all platforms, but each line number it reports is the
1762 1762 first line of a function. This restriction makes it difficult to
1763 1763 identify the expensive parts of a non-trivial function.
1764 1764 ``stat``
1765 1765 Use a statistical profiler, statprof. This profiler is most
1766 1766 useful for profiling commands that run for longer than about 0.1
1767 1767 seconds.
1768 1768
1769 1769 ``format``
1770 1770 Profiling format. Specific to the ``ls`` instrumenting profiler.
1771 1771 (default: text)
1772 1772
1773 1773 ``text``
1774 1774 Generate a profiling report. When saving to a file, it should be
1775 1775 noted that only the report is saved, and the profiling data is
1776 1776 not kept.
1777 1777 ``kcachegrind``
1778 1778 Format profiling data for kcachegrind use: when saving to a
1779 1779 file, the generated file can directly be loaded into
1780 1780 kcachegrind.
1781 1781
1782 1782 ``statformat``
1783 1783 Profiling format for the ``stat`` profiler.
1784 1784 (default: hotpath)
1785 1785
1786 1786 ``hotpath``
1787 1787 Show a tree-based display containing the hot path of execution (where
1788 1788 most time was spent).
1789 1789 ``bymethod``
1790 1790 Show a table of methods ordered by how frequently they are active.
1791 1791 ``byline``
1792 1792 Show a table of lines in files ordered by how frequently they are active.
1793 1793 ``json``
1794 1794 Render profiling data as JSON.
1795 1795
1796 1796 ``frequency``
1797 1797 Sampling frequency. Specific to the ``stat`` sampling profiler.
1798 1798 (default: 1000)
1799 1799
1800 1800 ``output``
1801 1801 File path where profiling data or report should be saved. If the
1802 1802 file exists, it is replaced. (default: None, data is printed on
1803 1803 stderr)
1804 1804
1805 1805 ``sort``
1806 1806 Sort field. Specific to the ``ls`` instrumenting profiler.
1807 1807 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1808 1808 ``inlinetime``.
1809 1809 (default: inlinetime)
1810 1810
1811 1811 ``time-track``
1812 1812 Control if the stat profiler track ``cpu`` or ``real`` time.
1813 1813 (default: ``cpu`` on Windows, otherwise ``real``)
1814 1814
1815 1815 ``limit``
1816 1816 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1817 1817 (default: 30)
1818 1818
1819 1819 ``nested``
1820 1820 Show at most this number of lines of drill-down info after each main entry.
1821 1821 This can help explain the difference between Total and Inline.
1822 1822 Specific to the ``ls`` instrumenting profiler.
1823 1823 (default: 0)
1824 1824
1825 1825 ``showmin``
1826 1826 Minimum fraction of samples an entry must have for it to be displayed.
1827 1827 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1828 1828 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1829 1829
1830 1830 Only used by the ``stat`` profiler.
1831 1831
1832 1832 For the ``hotpath`` format, default is ``0.05``.
1833 1833 For the ``chrome`` format, default is ``0.005``.
1834 1834
1835 1835 The option is unused on other formats.
1836 1836
1837 1837 ``showmax``
1838 1838 Maximum fraction of samples an entry can have before it is ignored in
1839 1839 display. Values format is the same as ``showmin``.
1840 1840
1841 1841 Only used by the ``stat`` profiler.
1842 1842
1843 1843 For the ``chrome`` format, default is ``0.999``.
1844 1844
1845 1845 The option is unused on other formats.
1846 1846
1847 1847 ``showtime``
1848 1848 Show time taken as absolute durations, in addition to percentages.
1849 1849 Only used by the ``hotpath`` format.
1850 1850 (default: true)
1851 1851
1852 1852 ``progress``
1853 1853 ------------
1854 1854
1855 1855 Mercurial commands can draw progress bars that are as informative as
1856 1856 possible. Some progress bars only offer indeterminate information, while others
1857 1857 have a definite end point.
1858 1858
1859 1859 ``debug``
1860 1860 Whether to print debug info when updating the progress bar. (default: False)
1861 1861
1862 1862 ``delay``
1863 1863 Number of seconds (float) before showing the progress bar. (default: 3)
1864 1864
1865 1865 ``changedelay``
1866 1866 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1867 1867 that value will be used instead. (default: 1)
1868 1868
1869 1869 ``estimateinterval``
1870 1870 Maximum sampling interval in seconds for speed and estimated time
1871 1871 calculation. (default: 60)
1872 1872
1873 1873 ``refresh``
1874 1874 Time in seconds between refreshes of the progress bar. (default: 0.1)
1875 1875
1876 1876 ``format``
1877 1877 Format of the progress bar.
1878 1878
1879 1879 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1880 1880 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1881 1881 last 20 characters of the item, but this can be changed by adding either
1882 1882 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1883 1883 first num characters.
1884 1884
1885 1885 (default: topic bar number estimate)
1886 1886
1887 1887 ``width``
1888 1888 If set, the maximum width of the progress information (that is, min(width,
1889 1889 term width) will be used).
1890 1890
1891 1891 ``clear-complete``
1892 1892 Clear the progress bar after it's done. (default: True)
1893 1893
1894 1894 ``disable``
1895 1895 If true, don't show a progress bar.
1896 1896
1897 1897 ``assume-tty``
1898 1898 If true, ALWAYS show a progress bar, unless disable is given.
1899 1899
1900 1900 ``rebase``
1901 1901 ----------
1902 1902
1903 1903 ``evolution.allowdivergence``
1904 1904 Default to False, when True allow creating divergence when performing
1905 1905 rebase of obsolete changesets.
1906 1906
1907 1907 ``revsetalias``
1908 1908 ---------------
1909 1909
1910 1910 Alias definitions for revsets. See :hg:`help revsets` for details.
1911 1911
1912 1912 ``rewrite``
1913 1913 -----------
1914 1914
1915 1915 ``backup-bundle``
1916 1916 Whether to save stripped changesets to a bundle file. (default: True)
1917 1917
1918 1918 ``update-timestamp``
1919 1919 If true, updates the date and time of the changeset to current. It is only
1920 1920 applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
1921 1921 current version.
1922 1922
1923 1923 ``empty-successor``
1924 1924
1925 1925 Control what happens with empty successors that are the result of rewrite
1926 1926 operations. If set to ``skip``, the successor is not created. If set to
1927 1927 ``keep``, the empty successor is created and kept.
1928 1928
1929 1929 Currently, only the rebase and absorb commands consider this configuration.
1930 1930 (EXPERIMENTAL)
1931 1931
1932 1932 ``storage``
1933 1933 -----------
1934 1934
1935 1935 Control the strategy Mercurial uses internally to store history. Options in this
1936 1936 category impact performance and repository size.
1937 1937
1938 1938 ``revlog.optimize-delta-parent-choice``
1939 1939 When storing a merge revision, both parents will be equally considered as
1940 1940 a possible delta base. This results in better delta selection and improved
1941 1941 revlog compression. This option is enabled by default.
1942 1942
1943 1943 Turning this option off can result in large increase of repository size for
1944 1944 repository with many merges.
1945 1945
1946 1946 ``revlog.persistent-nodemap.mmap``
1947 1947 Whether to use the Operating System "memory mapping" feature (when
1948 1948 possible) to access the persistent nodemap data. This improve performance
1949 1949 and reduce memory pressure.
1950 1950
1951 1951 Default to True.
1952 1952
1953 1953 For details on the "persistent-nodemap" feature, see:
1954 1954 :hg:`help config format.use-persistent-nodemap`.
1955 1955
1956 1956 ``revlog.persistent-nodemap.slow-path``
1957 1957 Control the behavior of Merucrial when using a repository with "persistent"
1958 1958 nodemap with an installation of Mercurial without a fast implementation for
1959 1959 the feature:
1960 1960
1961 1961 ``allow``: Silently use the slower implementation to access the repository.
1962
1963 Default to "allow"
1962 ``warn``: Warn, but use the slower implementation to access the repository.
1963
1964 Default to ``warn``
1964 1965
1965 1966 For details on the "persistent-nodemap" feature, see:
1966 1967 :hg:`help config format.use-persistent-nodemap`.
1967 1968
1968 1969 ``revlog.reuse-external-delta-parent``
1969 1970 Control the order in which delta parents are considered when adding new
1970 1971 revisions from an external source.
1971 1972 (typically: apply bundle from `hg pull` or `hg push`).
1972 1973
1973 1974 New revisions are usually provided as a delta against other revisions. By
1974 1975 default, Mercurial will try to reuse this delta first, therefore using the
1975 1976 same "delta parent" as the source. Directly using delta's from the source
1976 1977 reduces CPU usage and usually speeds up operation. However, in some case,
1977 1978 the source might have sub-optimal delta bases and forcing their reevaluation
1978 1979 is useful. For example, pushes from an old client could have sub-optimal
1979 1980 delta's parent that the server want to optimize. (lack of general delta, bad
1980 1981 parents, choice, lack of sparse-revlog, etc).
1981 1982
1982 1983 This option is enabled by default. Turning it off will ensure bad delta
1983 1984 parent choices from older client do not propagate to this repository, at
1984 1985 the cost of a small increase in CPU consumption.
1985 1986
1986 1987 Note: this option only control the order in which delta parents are
1987 1988 considered. Even when disabled, the existing delta from the source will be
1988 1989 reused if the same delta parent is selected.
1989 1990
1990 1991 ``revlog.reuse-external-delta``
1991 1992 Control the reuse of delta from external source.
1992 1993 (typically: apply bundle from `hg pull` or `hg push`).
1993 1994
1994 1995 New revisions are usually provided as a delta against another revision. By
1995 1996 default, Mercurial will not recompute the same delta again, trusting
1996 1997 externally provided deltas. There have been rare cases of small adjustment
1997 1998 to the diffing algorithm in the past. So in some rare case, recomputing
1998 1999 delta provided by ancient clients can provides better results. Disabling
1999 2000 this option means going through a full delta recomputation for all incoming
2000 2001 revisions. It means a large increase in CPU usage and will slow operations
2001 2002 down.
2002 2003
2003 2004 This option is enabled by default. When disabled, it also disables the
2004 2005 related ``storage.revlog.reuse-external-delta-parent`` option.
2005 2006
2006 2007 ``revlog.zlib.level``
2007 2008 Zlib compression level used when storing data into the repository. Accepted
2008 2009 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
2009 2010 default value is 6.
2010 2011
2011 2012
2012 2013 ``revlog.zstd.level``
2013 2014 zstd compression level used when storing data into the repository. Accepted
2014 2015 Value range from 1 (lowest compression) to 22 (highest compression).
2015 2016 (default 3)
2016 2017
2017 2018 ``server``
2018 2019 ----------
2019 2020
2020 2021 Controls generic server settings.
2021 2022
2022 2023 ``bookmarks-pushkey-compat``
2023 2024 Trigger pushkey hook when being pushed bookmark updates. This config exist
2024 2025 for compatibility purpose (default to True)
2025 2026
2026 2027 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
2027 2028 movement we recommend you migrate them to ``txnclose-bookmark`` and
2028 2029 ``pretxnclose-bookmark``.
2029 2030
2030 2031 ``compressionengines``
2031 2032 List of compression engines and their relative priority to advertise
2032 2033 to clients.
2033 2034
2034 2035 The order of compression engines determines their priority, the first
2035 2036 having the highest priority. If a compression engine is not listed
2036 2037 here, it won't be advertised to clients.
2037 2038
2038 2039 If not set (the default), built-in defaults are used. Run
2039 2040 :hg:`debuginstall` to list available compression engines and their
2040 2041 default wire protocol priority.
2041 2042
2042 2043 Older Mercurial clients only support zlib compression and this setting
2043 2044 has no effect for legacy clients.
2044 2045
2045 2046 ``uncompressed``
2046 2047 Whether to allow clients to clone a repository using the
2047 2048 uncompressed streaming protocol. This transfers about 40% more
2048 2049 data than a regular clone, but uses less memory and CPU on both
2049 2050 server and client. Over a LAN (100 Mbps or better) or a very fast
2050 2051 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
2051 2052 regular clone. Over most WAN connections (anything slower than
2052 2053 about 6 Mbps), uncompressed streaming is slower, because of the
2053 2054 extra data transfer overhead. This mode will also temporarily hold
2054 2055 the write lock while determining what data to transfer.
2055 2056 (default: True)
2056 2057
2057 2058 ``uncompressedallowsecret``
2058 2059 Whether to allow stream clones when the repository contains secret
2059 2060 changesets. (default: False)
2060 2061
2061 2062 ``preferuncompressed``
2062 2063 When set, clients will try to use the uncompressed streaming
2063 2064 protocol. (default: False)
2064 2065
2065 2066 ``disablefullbundle``
2066 2067 When set, servers will refuse attempts to do pull-based clones.
2067 2068 If this option is set, ``preferuncompressed`` and/or clone bundles
2068 2069 are highly recommended. Partial clones will still be allowed.
2069 2070 (default: False)
2070 2071
2071 2072 ``streamunbundle``
2072 2073 When set, servers will apply data sent from the client directly,
2073 2074 otherwise it will be written to a temporary file first. This option
2074 2075 effectively prevents concurrent pushes.
2075 2076
2076 2077 ``pullbundle``
2077 2078 When set, the server will check pullbundle.manifest for bundles
2078 2079 covering the requested heads and common nodes. The first matching
2079 2080 entry will be streamed to the client.
2080 2081
2081 2082 For HTTP transport, the stream will still use zlib compression
2082 2083 for older clients.
2083 2084
2084 2085 ``concurrent-push-mode``
2085 2086 Level of allowed race condition between two pushing clients.
2086 2087
2087 2088 - 'strict': push is abort if another client touched the repository
2088 2089 while the push was preparing.
2089 2090 - 'check-related': push is only aborted if it affects head that got also
2090 2091 affected while the push was preparing. (default since 5.4)
2091 2092
2092 2093 'check-related' only takes effect for compatible clients (version
2093 2094 4.3 and later). Older clients will use 'strict'.
2094 2095
2095 2096 ``validate``
2096 2097 Whether to validate the completeness of pushed changesets by
2097 2098 checking that all new file revisions specified in manifests are
2098 2099 present. (default: False)
2099 2100
2100 2101 ``maxhttpheaderlen``
2101 2102 Instruct HTTP clients not to send request headers longer than this
2102 2103 many bytes. (default: 1024)
2103 2104
2104 2105 ``bundle1``
2105 2106 Whether to allow clients to push and pull using the legacy bundle1
2106 2107 exchange format. (default: True)
2107 2108
2108 2109 ``bundle1gd``
2109 2110 Like ``bundle1`` but only used if the repository is using the
2110 2111 *generaldelta* storage format. (default: True)
2111 2112
2112 2113 ``bundle1.push``
2113 2114 Whether to allow clients to push using the legacy bundle1 exchange
2114 2115 format. (default: True)
2115 2116
2116 2117 ``bundle1gd.push``
2117 2118 Like ``bundle1.push`` but only used if the repository is using the
2118 2119 *generaldelta* storage format. (default: True)
2119 2120
2120 2121 ``bundle1.pull``
2121 2122 Whether to allow clients to pull using the legacy bundle1 exchange
2122 2123 format. (default: True)
2123 2124
2124 2125 ``bundle1gd.pull``
2125 2126 Like ``bundle1.pull`` but only used if the repository is using the
2126 2127 *generaldelta* storage format. (default: True)
2127 2128
2128 2129 Large repositories using the *generaldelta* storage format should
2129 2130 consider setting this option because converting *generaldelta*
2130 2131 repositories to the exchange format required by the bundle1 data
2131 2132 format can consume a lot of CPU.
2132 2133
2133 2134 ``bundle2.stream``
2134 2135 Whether to allow clients to pull using the bundle2 streaming protocol.
2135 2136 (default: True)
2136 2137
2137 2138 ``zliblevel``
2138 2139 Integer between ``-1`` and ``9`` that controls the zlib compression level
2139 2140 for wire protocol commands that send zlib compressed output (notably the
2140 2141 commands that send repository history data).
2141 2142
2142 2143 The default (``-1``) uses the default zlib compression level, which is
2143 2144 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2144 2145 maximum compression.
2145 2146
2146 2147 Setting this option allows server operators to make trade-offs between
2147 2148 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2148 2149 but sends more bytes to clients.
2149 2150
2150 2151 This option only impacts the HTTP server.
2151 2152
2152 2153 ``zstdlevel``
2153 2154 Integer between ``1`` and ``22`` that controls the zstd compression level
2154 2155 for wire protocol commands. ``1`` is the minimal amount of compression and
2155 2156 ``22`` is the highest amount of compression.
2156 2157
2157 2158 The default (``3``) should be significantly faster than zlib while likely
2158 2159 delivering better compression ratios.
2159 2160
2160 2161 This option only impacts the HTTP server.
2161 2162
2162 2163 See also ``server.zliblevel``.
2163 2164
2164 2165 ``view``
2165 2166 Repository filter used when exchanging revisions with the peer.
2166 2167
2167 2168 The default view (``served``) excludes secret and hidden changesets.
2168 2169 Another useful value is ``immutable`` (no draft, secret or hidden
2169 2170 changesets). (EXPERIMENTAL)
2170 2171
2171 2172 ``smtp``
2172 2173 --------
2173 2174
2174 2175 Configuration for extensions that need to send email messages.
2175 2176
2176 2177 ``host``
2177 2178 Host name of mail server, e.g. "mail.example.com".
2178 2179
2179 2180 ``port``
2180 2181 Optional. Port to connect to on mail server. (default: 465 if
2181 2182 ``tls`` is smtps; 25 otherwise)
2182 2183
2183 2184 ``tls``
2184 2185 Optional. Method to enable TLS when connecting to mail server: starttls,
2185 2186 smtps or none. (default: none)
2186 2187
2187 2188 ``username``
2188 2189 Optional. User name for authenticating with the SMTP server.
2189 2190 (default: None)
2190 2191
2191 2192 ``password``
2192 2193 Optional. Password for authenticating with the SMTP server. If not
2193 2194 specified, interactive sessions will prompt the user for a
2194 2195 password; non-interactive sessions will fail. (default: None)
2195 2196
2196 2197 ``local_hostname``
2197 2198 Optional. The hostname that the sender can use to identify
2198 2199 itself to the MTA.
2199 2200
2200 2201
2201 2202 ``subpaths``
2202 2203 ------------
2203 2204
2204 2205 Subrepository source URLs can go stale if a remote server changes name
2205 2206 or becomes temporarily unavailable. This section lets you define
2206 2207 rewrite rules of the form::
2207 2208
2208 2209 <pattern> = <replacement>
2209 2210
2210 2211 where ``pattern`` is a regular expression matching a subrepository
2211 2212 source URL and ``replacement`` is the replacement string used to
2212 2213 rewrite it. Groups can be matched in ``pattern`` and referenced in
2213 2214 ``replacements``. For instance::
2214 2215
2215 2216 http://server/(.*)-hg/ = http://hg.server/\1/
2216 2217
2217 2218 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2218 2219
2219 2220 Relative subrepository paths are first made absolute, and the
2220 2221 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2221 2222 doesn't match the full path, an attempt is made to apply it on the
2222 2223 relative path alone. The rules are applied in definition order.
2223 2224
2224 2225 ``subrepos``
2225 2226 ------------
2226 2227
2227 2228 This section contains options that control the behavior of the
2228 2229 subrepositories feature. See also :hg:`help subrepos`.
2229 2230
2230 2231 Security note: auditing in Mercurial is known to be insufficient to
2231 2232 prevent clone-time code execution with carefully constructed Git
2232 2233 subrepos. It is unknown if a similar detect is present in Subversion
2233 2234 subrepos. Both Git and Subversion subrepos are disabled by default
2234 2235 out of security concerns. These subrepo types can be enabled using
2235 2236 the respective options below.
2236 2237
2237 2238 ``allowed``
2238 2239 Whether subrepositories are allowed in the working directory.
2239 2240
2240 2241 When false, commands involving subrepositories (like :hg:`update`)
2241 2242 will fail for all subrepository types.
2242 2243 (default: true)
2243 2244
2244 2245 ``hg:allowed``
2245 2246 Whether Mercurial subrepositories are allowed in the working
2246 2247 directory. This option only has an effect if ``subrepos.allowed``
2247 2248 is true.
2248 2249 (default: true)
2249 2250
2250 2251 ``git:allowed``
2251 2252 Whether Git subrepositories are allowed in the working directory.
2252 2253 This option only has an effect if ``subrepos.allowed`` is true.
2253 2254
2254 2255 See the security note above before enabling Git subrepos.
2255 2256 (default: false)
2256 2257
2257 2258 ``svn:allowed``
2258 2259 Whether Subversion subrepositories are allowed in the working
2259 2260 directory. This option only has an effect if ``subrepos.allowed``
2260 2261 is true.
2261 2262
2262 2263 See the security note above before enabling Subversion subrepos.
2263 2264 (default: false)
2264 2265
2265 2266 ``templatealias``
2266 2267 -----------------
2267 2268
2268 2269 Alias definitions for templates. See :hg:`help templates` for details.
2269 2270
2270 2271 ``templates``
2271 2272 -------------
2272 2273
2273 2274 Use the ``[templates]`` section to define template strings.
2274 2275 See :hg:`help templates` for details.
2275 2276
2276 2277 ``trusted``
2277 2278 -----------
2278 2279
2279 2280 Mercurial will not use the settings in the
2280 2281 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2281 2282 user or to a trusted group, as various hgrc features allow arbitrary
2282 2283 commands to be run. This issue is often encountered when configuring
2283 2284 hooks or extensions for shared repositories or servers. However,
2284 2285 the web interface will use some safe settings from the ``[web]``
2285 2286 section.
2286 2287
2287 2288 This section specifies what users and groups are trusted. The
2288 2289 current user is always trusted. To trust everybody, list a user or a
2289 2290 group with name ``*``. These settings must be placed in an
2290 2291 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2291 2292 user or service running Mercurial.
2292 2293
2293 2294 ``users``
2294 2295 Comma-separated list of trusted users.
2295 2296
2296 2297 ``groups``
2297 2298 Comma-separated list of trusted groups.
2298 2299
2299 2300
2300 2301 ``ui``
2301 2302 ------
2302 2303
2303 2304 User interface controls.
2304 2305
2305 2306 ``archivemeta``
2306 2307 Whether to include the .hg_archival.txt file containing meta data
2307 2308 (hashes for the repository base and for tip) in archives created
2308 2309 by the :hg:`archive` command or downloaded via hgweb.
2309 2310 (default: True)
2310 2311
2311 2312 ``askusername``
2312 2313 Whether to prompt for a username when committing. If True, and
2313 2314 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2314 2315 be prompted to enter a username. If no username is entered, the
2315 2316 default ``USER@HOST`` is used instead.
2316 2317 (default: False)
2317 2318
2318 2319 ``clonebundles``
2319 2320 Whether the "clone bundles" feature is enabled.
2320 2321
2321 2322 When enabled, :hg:`clone` may download and apply a server-advertised
2322 2323 bundle file from a URL instead of using the normal exchange mechanism.
2323 2324
2324 2325 This can likely result in faster and more reliable clones.
2325 2326
2326 2327 (default: True)
2327 2328
2328 2329 ``clonebundlefallback``
2329 2330 Whether failure to apply an advertised "clone bundle" from a server
2330 2331 should result in fallback to a regular clone.
2331 2332
2332 2333 This is disabled by default because servers advertising "clone
2333 2334 bundles" often do so to reduce server load. If advertised bundles
2334 2335 start mass failing and clients automatically fall back to a regular
2335 2336 clone, this would add significant and unexpected load to the server
2336 2337 since the server is expecting clone operations to be offloaded to
2337 2338 pre-generated bundles. Failing fast (the default behavior) ensures
2338 2339 clients don't overwhelm the server when "clone bundle" application
2339 2340 fails.
2340 2341
2341 2342 (default: False)
2342 2343
2343 2344 ``clonebundleprefers``
2344 2345 Defines preferences for which "clone bundles" to use.
2345 2346
2346 2347 Servers advertising "clone bundles" may advertise multiple available
2347 2348 bundles. Each bundle may have different attributes, such as the bundle
2348 2349 type and compression format. This option is used to prefer a particular
2349 2350 bundle over another.
2350 2351
2351 2352 The following keys are defined by Mercurial:
2352 2353
2353 2354 BUNDLESPEC
2354 2355 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2355 2356 e.g. ``gzip-v2`` or ``bzip2-v1``.
2356 2357
2357 2358 COMPRESSION
2358 2359 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2359 2360
2360 2361 Server operators may define custom keys.
2361 2362
2362 2363 Example values: ``COMPRESSION=bzip2``,
2363 2364 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2364 2365
2365 2366 By default, the first bundle advertised by the server is used.
2366 2367
2367 2368 ``color``
2368 2369 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2369 2370 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2370 2371 seems possible. See :hg:`help color` for details.
2371 2372
2372 2373 ``commitsubrepos``
2373 2374 Whether to commit modified subrepositories when committing the
2374 2375 parent repository. If False and one subrepository has uncommitted
2375 2376 changes, abort the commit.
2376 2377 (default: False)
2377 2378
2378 2379 ``debug``
2379 2380 Print debugging information. (default: False)
2380 2381
2381 2382 ``editor``
2382 2383 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2383 2384
2384 2385 ``fallbackencoding``
2385 2386 Encoding to try if it's not possible to decode the changelog using
2386 2387 UTF-8. (default: ISO-8859-1)
2387 2388
2388 2389 ``graphnodetemplate``
2389 2390 (DEPRECATED) Use ``command-templates.graphnode`` instead.
2390 2391
2391 2392 ``ignore``
2392 2393 A file to read per-user ignore patterns from. This file should be
2393 2394 in the same format as a repository-wide .hgignore file. Filenames
2394 2395 are relative to the repository root. This option supports hook syntax,
2395 2396 so if you want to specify multiple ignore files, you can do so by
2396 2397 setting something like ``ignore.other = ~/.hgignore2``. For details
2397 2398 of the ignore file format, see the ``hgignore(5)`` man page.
2398 2399
2399 2400 ``interactive``
2400 2401 Allow to prompt the user. (default: True)
2401 2402
2402 2403 ``interface``
2403 2404 Select the default interface for interactive features (default: text).
2404 2405 Possible values are 'text' and 'curses'.
2405 2406
2406 2407 ``interface.chunkselector``
2407 2408 Select the interface for change recording (e.g. :hg:`commit -i`).
2408 2409 Possible values are 'text' and 'curses'.
2409 2410 This config overrides the interface specified by ui.interface.
2410 2411
2411 2412 ``large-file-limit``
2412 2413 Largest file size that gives no memory use warning.
2413 2414 Possible values are integers or 0 to disable the check.
2414 2415 (default: 10000000)
2415 2416
2416 2417 ``logtemplate``
2417 2418 (DEPRECATED) Use ``command-templates.log`` instead.
2418 2419
2419 2420 ``merge``
2420 2421 The conflict resolution program to use during a manual merge.
2421 2422 For more information on merge tools see :hg:`help merge-tools`.
2422 2423 For configuring merge tools see the ``[merge-tools]`` section.
2423 2424
2424 2425 ``mergemarkers``
2425 2426 Sets the merge conflict marker label styling. The ``detailed`` style
2426 2427 uses the ``command-templates.mergemarker`` setting to style the labels.
2427 2428 The ``basic`` style just uses 'local' and 'other' as the marker label.
2428 2429 One of ``basic`` or ``detailed``.
2429 2430 (default: ``basic``)
2430 2431
2431 2432 ``mergemarkertemplate``
2432 2433 (DEPRECATED) Use ``command-templates.mergemarker`` instead.
2433 2434
2434 2435 ``message-output``
2435 2436 Where to write status and error messages. (default: ``stdio``)
2436 2437
2437 2438 ``channel``
2438 2439 Use separate channel for structured output. (Command-server only)
2439 2440 ``stderr``
2440 2441 Everything to stderr.
2441 2442 ``stdio``
2442 2443 Status to stdout, and error to stderr.
2443 2444
2444 2445 ``origbackuppath``
2445 2446 The path to a directory used to store generated .orig files. If the path is
2446 2447 not a directory, one will be created. If set, files stored in this
2447 2448 directory have the same name as the original file and do not have a .orig
2448 2449 suffix.
2449 2450
2450 2451 ``paginate``
2451 2452 Control the pagination of command output (default: True). See :hg:`help pager`
2452 2453 for details.
2453 2454
2454 2455 ``patch``
2455 2456 An optional external tool that ``hg import`` and some extensions
2456 2457 will use for applying patches. By default Mercurial uses an
2457 2458 internal patch utility. The external tool must work as the common
2458 2459 Unix ``patch`` program. In particular, it must accept a ``-p``
2459 2460 argument to strip patch headers, a ``-d`` argument to specify the
2460 2461 current directory, a file name to patch, and a patch file to take
2461 2462 from stdin.
2462 2463
2463 2464 It is possible to specify a patch tool together with extra
2464 2465 arguments. For example, setting this option to ``patch --merge``
2465 2466 will use the ``patch`` program with its 2-way merge option.
2466 2467
2467 2468 ``portablefilenames``
2468 2469 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2469 2470 (default: ``warn``)
2470 2471
2471 2472 ``warn``
2472 2473 Print a warning message on POSIX platforms, if a file with a non-portable
2473 2474 filename is added (e.g. a file with a name that can't be created on
2474 2475 Windows because it contains reserved parts like ``AUX``, reserved
2475 2476 characters like ``:``, or would cause a case collision with an existing
2476 2477 file).
2477 2478
2478 2479 ``ignore``
2479 2480 Don't print a warning.
2480 2481
2481 2482 ``abort``
2482 2483 The command is aborted.
2483 2484
2484 2485 ``true``
2485 2486 Alias for ``warn``.
2486 2487
2487 2488 ``false``
2488 2489 Alias for ``ignore``.
2489 2490
2490 2491 .. container:: windows
2491 2492
2492 2493 On Windows, this configuration option is ignored and the command aborted.
2493 2494
2494 2495 ``pre-merge-tool-output-template``
2495 2496 (DEPRECATED) Use ``command-template.pre-merge-tool-output`` instead.
2496 2497
2497 2498 ``quiet``
2498 2499 Reduce the amount of output printed.
2499 2500 (default: False)
2500 2501
2501 2502 ``relative-paths``
2502 2503 Prefer relative paths in the UI.
2503 2504
2504 2505 ``remotecmd``
2505 2506 Remote command to use for clone/push/pull operations.
2506 2507 (default: ``hg``)
2507 2508
2508 2509 ``report_untrusted``
2509 2510 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2510 2511 trusted user or group.
2511 2512 (default: True)
2512 2513
2513 2514 ``slash``
2514 2515 (Deprecated. Use ``slashpath`` template filter instead.)
2515 2516
2516 2517 Display paths using a slash (``/``) as the path separator. This
2517 2518 only makes a difference on systems where the default path
2518 2519 separator is not the slash character (e.g. Windows uses the
2519 2520 backslash character (``\``)).
2520 2521 (default: False)
2521 2522
2522 2523 ``statuscopies``
2523 2524 Display copies in the status command.
2524 2525
2525 2526 ``ssh``
2526 2527 Command to use for SSH connections. (default: ``ssh``)
2527 2528
2528 2529 ``ssherrorhint``
2529 2530 A hint shown to the user in the case of SSH error (e.g.
2530 2531 ``Please see http://company/internalwiki/ssh.html``)
2531 2532
2532 2533 ``strict``
2533 2534 Require exact command names, instead of allowing unambiguous
2534 2535 abbreviations. (default: False)
2535 2536
2536 2537 ``style``
2537 2538 Name of style to use for command output.
2538 2539
2539 2540 ``supportcontact``
2540 2541 A URL where users should report a Mercurial traceback. Use this if you are a
2541 2542 large organisation with its own Mercurial deployment process and crash
2542 2543 reports should be addressed to your internal support.
2543 2544
2544 2545 ``textwidth``
2545 2546 Maximum width of help text. A longer line generated by ``hg help`` or
2546 2547 ``hg subcommand --help`` will be broken after white space to get this
2547 2548 width or the terminal width, whichever comes first.
2548 2549 A non-positive value will disable this and the terminal width will be
2549 2550 used. (default: 78)
2550 2551
2551 2552 ``timeout``
2552 2553 The timeout used when a lock is held (in seconds), a negative value
2553 2554 means no timeout. (default: 600)
2554 2555
2555 2556 ``timeout.warn``
2556 2557 Time (in seconds) before a warning is printed about held lock. A negative
2557 2558 value means no warning. (default: 0)
2558 2559
2559 2560 ``traceback``
2560 2561 Mercurial always prints a traceback when an unknown exception
2561 2562 occurs. Setting this to True will make Mercurial print a traceback
2562 2563 on all exceptions, even those recognized by Mercurial (such as
2563 2564 IOError or MemoryError). (default: False)
2564 2565
2565 2566 ``tweakdefaults``
2566 2567
2567 2568 By default Mercurial's behavior changes very little from release
2568 2569 to release, but over time the recommended config settings
2569 2570 shift. Enable this config to opt in to get automatic tweaks to
2570 2571 Mercurial's behavior over time. This config setting will have no
2571 2572 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2572 2573 not include ``tweakdefaults``. (default: False)
2573 2574
2574 2575 It currently means::
2575 2576
2576 2577 .. tweakdefaultsmarker
2577 2578
2578 2579 ``username``
2579 2580 The committer of a changeset created when running "commit".
2580 2581 Typically a person's name and email address, e.g. ``Fred Widget
2581 2582 <fred@example.com>``. Environment variables in the
2582 2583 username are expanded.
2583 2584
2584 2585 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2585 2586 hgrc is empty, e.g. if the system admin set ``username =`` in the
2586 2587 system hgrc, it has to be specified manually or in a different
2587 2588 hgrc file)
2588 2589
2589 2590 ``verbose``
2590 2591 Increase the amount of output printed. (default: False)
2591 2592
2592 2593
2593 2594 ``command-templates``
2594 2595 ---------------------
2595 2596
2596 2597 Templates used for customizing the output of commands.
2597 2598
2598 2599 ``graphnode``
2599 2600 The template used to print changeset nodes in an ASCII revision graph.
2600 2601 (default: ``{graphnode}``)
2601 2602
2602 2603 ``log``
2603 2604 Template string for commands that print changesets.
2604 2605
2605 2606 ``mergemarker``
2606 2607 The template used to print the commit description next to each conflict
2607 2608 marker during merge conflicts. See :hg:`help templates` for the template
2608 2609 format.
2609 2610
2610 2611 Defaults to showing the hash, tags, branches, bookmarks, author, and
2611 2612 the first line of the commit description.
2612 2613
2613 2614 If you use non-ASCII characters in names for tags, branches, bookmarks,
2614 2615 authors, and/or commit descriptions, you must pay attention to encodings of
2615 2616 managed files. At template expansion, non-ASCII characters use the encoding
2616 2617 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2617 2618 environment variables that govern your locale. If the encoding of the merge
2618 2619 markers is different from the encoding of the merged files,
2619 2620 serious problems may occur.
2620 2621
2621 2622 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2622 2623
2623 2624 ``oneline-summary``
2624 2625 A template used by `hg rebase` and other commands for showing a one-line
2625 2626 summary of a commit. If the template configured here is longer than one
2626 2627 line, then only the first line is used.
2627 2628
2628 2629 The template can be overridden per command by defining a template in
2629 2630 `oneline-summary.<command>`, where `<command>` can be e.g. "rebase".
2630 2631
2631 2632 ``pre-merge-tool-output``
2632 2633 A template that is printed before executing an external merge tool. This can
2633 2634 be used to print out additional context that might be useful to have during
2634 2635 the conflict resolution, such as the description of the various commits
2635 2636 involved or bookmarks/tags.
2636 2637
2637 2638 Additional information is available in the ``local`, ``base``, and ``other``
2638 2639 dicts. For example: ``{local.label}``, ``{base.name}``, or
2639 2640 ``{other.islink}``.
2640 2641
2641 2642
2642 2643 ``web``
2643 2644 -------
2644 2645
2645 2646 Web interface configuration. The settings in this section apply to
2646 2647 both the builtin webserver (started by :hg:`serve`) and the script you
2647 2648 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2648 2649 and WSGI).
2649 2650
2650 2651 The Mercurial webserver does no authentication (it does not prompt for
2651 2652 usernames and passwords to validate *who* users are), but it does do
2652 2653 authorization (it grants or denies access for *authenticated users*
2653 2654 based on settings in this section). You must either configure your
2654 2655 webserver to do authentication for you, or disable the authorization
2655 2656 checks.
2656 2657
2657 2658 For a quick setup in a trusted environment, e.g., a private LAN, where
2658 2659 you want it to accept pushes from anybody, you can use the following
2659 2660 command line::
2660 2661
2661 2662 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2662 2663
2663 2664 Note that this will allow anybody to push anything to the server and
2664 2665 that this should not be used for public servers.
2665 2666
2666 2667 The full set of options is:
2667 2668
2668 2669 ``accesslog``
2669 2670 Where to output the access log. (default: stdout)
2670 2671
2671 2672 ``address``
2672 2673 Interface address to bind to. (default: all)
2673 2674
2674 2675 ``allow-archive``
2675 2676 List of archive format (bz2, gz, zip) allowed for downloading.
2676 2677 (default: empty)
2677 2678
2678 2679 ``allowbz2``
2679 2680 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2680 2681 revisions.
2681 2682 (default: False)
2682 2683
2683 2684 ``allowgz``
2684 2685 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2685 2686 revisions.
2686 2687 (default: False)
2687 2688
2688 2689 ``allow-pull``
2689 2690 Whether to allow pulling from the repository. (default: True)
2690 2691
2691 2692 ``allow-push``
2692 2693 Whether to allow pushing to the repository. If empty or not set,
2693 2694 pushing is not allowed. If the special value ``*``, any remote
2694 2695 user can push, including unauthenticated users. Otherwise, the
2695 2696 remote user must have been authenticated, and the authenticated
2696 2697 user name must be present in this list. The contents of the
2697 2698 allow-push list are examined after the deny_push list.
2698 2699
2699 2700 ``allow_read``
2700 2701 If the user has not already been denied repository access due to
2701 2702 the contents of deny_read, this list determines whether to grant
2702 2703 repository access to the user. If this list is not empty, and the
2703 2704 user is unauthenticated or not present in the list, then access is
2704 2705 denied for the user. If the list is empty or not set, then access
2705 2706 is permitted to all users by default. Setting allow_read to the
2706 2707 special value ``*`` is equivalent to it not being set (i.e. access
2707 2708 is permitted to all users). The contents of the allow_read list are
2708 2709 examined after the deny_read list.
2709 2710
2710 2711 ``allowzip``
2711 2712 (DEPRECATED) Whether to allow .zip downloading of repository
2712 2713 revisions. This feature creates temporary files.
2713 2714 (default: False)
2714 2715
2715 2716 ``archivesubrepos``
2716 2717 Whether to recurse into subrepositories when archiving.
2717 2718 (default: False)
2718 2719
2719 2720 ``baseurl``
2720 2721 Base URL to use when publishing URLs in other locations, so
2721 2722 third-party tools like email notification hooks can construct
2722 2723 URLs. Example: ``http://hgserver/repos/``.
2723 2724
2724 2725 ``cacerts``
2725 2726 Path to file containing a list of PEM encoded certificate
2726 2727 authority certificates. Environment variables and ``~user``
2727 2728 constructs are expanded in the filename. If specified on the
2728 2729 client, then it will verify the identity of remote HTTPS servers
2729 2730 with these certificates.
2730 2731
2731 2732 To disable SSL verification temporarily, specify ``--insecure`` from
2732 2733 command line.
2733 2734
2734 2735 You can use OpenSSL's CA certificate file if your platform has
2735 2736 one. On most Linux systems this will be
2736 2737 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2737 2738 generate this file manually. The form must be as follows::
2738 2739
2739 2740 -----BEGIN CERTIFICATE-----
2740 2741 ... (certificate in base64 PEM encoding) ...
2741 2742 -----END CERTIFICATE-----
2742 2743 -----BEGIN CERTIFICATE-----
2743 2744 ... (certificate in base64 PEM encoding) ...
2744 2745 -----END CERTIFICATE-----
2745 2746
2746 2747 ``cache``
2747 2748 Whether to support caching in hgweb. (default: True)
2748 2749
2749 2750 ``certificate``
2750 2751 Certificate to use when running :hg:`serve`.
2751 2752
2752 2753 ``collapse``
2753 2754 With ``descend`` enabled, repositories in subdirectories are shown at
2754 2755 a single level alongside repositories in the current path. With
2755 2756 ``collapse`` also enabled, repositories residing at a deeper level than
2756 2757 the current path are grouped behind navigable directory entries that
2757 2758 lead to the locations of these repositories. In effect, this setting
2758 2759 collapses each collection of repositories found within a subdirectory
2759 2760 into a single entry for that subdirectory. (default: False)
2760 2761
2761 2762 ``comparisoncontext``
2762 2763 Number of lines of context to show in side-by-side file comparison. If
2763 2764 negative or the value ``full``, whole files are shown. (default: 5)
2764 2765
2765 2766 This setting can be overridden by a ``context`` request parameter to the
2766 2767 ``comparison`` command, taking the same values.
2767 2768
2768 2769 ``contact``
2769 2770 Name or email address of the person in charge of the repository.
2770 2771 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2771 2772
2772 2773 ``csp``
2773 2774 Send a ``Content-Security-Policy`` HTTP header with this value.
2774 2775
2775 2776 The value may contain a special string ``%nonce%``, which will be replaced
2776 2777 by a randomly-generated one-time use value. If the value contains
2777 2778 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2778 2779 one-time property of the nonce. This nonce will also be inserted into
2779 2780 ``<script>`` elements containing inline JavaScript.
2780 2781
2781 2782 Note: lots of HTML content sent by the server is derived from repository
2782 2783 data. Please consider the potential for malicious repository data to
2783 2784 "inject" itself into generated HTML content as part of your security
2784 2785 threat model.
2785 2786
2786 2787 ``deny_push``
2787 2788 Whether to deny pushing to the repository. If empty or not set,
2788 2789 push is not denied. If the special value ``*``, all remote users are
2789 2790 denied push. Otherwise, unauthenticated users are all denied, and
2790 2791 any authenticated user name present in this list is also denied. The
2791 2792 contents of the deny_push list are examined before the allow-push list.
2792 2793
2793 2794 ``deny_read``
2794 2795 Whether to deny reading/viewing of the repository. If this list is
2795 2796 not empty, unauthenticated users are all denied, and any
2796 2797 authenticated user name present in this list is also denied access to
2797 2798 the repository. If set to the special value ``*``, all remote users
2798 2799 are denied access (rarely needed ;). If deny_read is empty or not set,
2799 2800 the determination of repository access depends on the presence and
2800 2801 content of the allow_read list (see description). If both
2801 2802 deny_read and allow_read are empty or not set, then access is
2802 2803 permitted to all users by default. If the repository is being
2803 2804 served via hgwebdir, denied users will not be able to see it in
2804 2805 the list of repositories. The contents of the deny_read list have
2805 2806 priority over (are examined before) the contents of the allow_read
2806 2807 list.
2807 2808
2808 2809 ``descend``
2809 2810 hgwebdir indexes will not descend into subdirectories. Only repositories
2810 2811 directly in the current path will be shown (other repositories are still
2811 2812 available from the index corresponding to their containing path).
2812 2813
2813 2814 ``description``
2814 2815 Textual description of the repository's purpose or contents.
2815 2816 (default: "unknown")
2816 2817
2817 2818 ``encoding``
2818 2819 Character encoding name. (default: the current locale charset)
2819 2820 Example: "UTF-8".
2820 2821
2821 2822 ``errorlog``
2822 2823 Where to output the error log. (default: stderr)
2823 2824
2824 2825 ``guessmime``
2825 2826 Control MIME types for raw download of file content.
2826 2827 Set to True to let hgweb guess the content type from the file
2827 2828 extension. This will serve HTML files as ``text/html`` and might
2828 2829 allow cross-site scripting attacks when serving untrusted
2829 2830 repositories. (default: False)
2830 2831
2831 2832 ``hidden``
2832 2833 Whether to hide the repository in the hgwebdir index.
2833 2834 (default: False)
2834 2835
2835 2836 ``ipv6``
2836 2837 Whether to use IPv6. (default: False)
2837 2838
2838 2839 ``labels``
2839 2840 List of string *labels* associated with the repository.
2840 2841
2841 2842 Labels are exposed as a template keyword and can be used to customize
2842 2843 output. e.g. the ``index`` template can group or filter repositories
2843 2844 by labels and the ``summary`` template can display additional content
2844 2845 if a specific label is present.
2845 2846
2846 2847 ``logoimg``
2847 2848 File name of the logo image that some templates display on each page.
2848 2849 The file name is relative to ``staticurl``. That is, the full path to
2849 2850 the logo image is "staticurl/logoimg".
2850 2851 If unset, ``hglogo.png`` will be used.
2851 2852
2852 2853 ``logourl``
2853 2854 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2854 2855 will be used.
2855 2856
2856 2857 ``maxchanges``
2857 2858 Maximum number of changes to list on the changelog. (default: 10)
2858 2859
2859 2860 ``maxfiles``
2860 2861 Maximum number of files to list per changeset. (default: 10)
2861 2862
2862 2863 ``maxshortchanges``
2863 2864 Maximum number of changes to list on the shortlog, graph or filelog
2864 2865 pages. (default: 60)
2865 2866
2866 2867 ``name``
2867 2868 Repository name to use in the web interface.
2868 2869 (default: current working directory)
2869 2870
2870 2871 ``port``
2871 2872 Port to listen on. (default: 8000)
2872 2873
2873 2874 ``prefix``
2874 2875 Prefix path to serve from. (default: '' (server root))
2875 2876
2876 2877 ``push_ssl``
2877 2878 Whether to require that inbound pushes be transported over SSL to
2878 2879 prevent password sniffing. (default: True)
2879 2880
2880 2881 ``refreshinterval``
2881 2882 How frequently directory listings re-scan the filesystem for new
2882 2883 repositories, in seconds. This is relevant when wildcards are used
2883 2884 to define paths. Depending on how much filesystem traversal is
2884 2885 required, refreshing may negatively impact performance.
2885 2886
2886 2887 Values less than or equal to 0 always refresh.
2887 2888 (default: 20)
2888 2889
2889 2890 ``server-header``
2890 2891 Value for HTTP ``Server`` response header.
2891 2892
2892 2893 ``static``
2893 2894 Directory where static files are served from.
2894 2895
2895 2896 ``staticurl``
2896 2897 Base URL to use for static files. If unset, static files (e.g. the
2897 2898 hgicon.png favicon) will be served by the CGI script itself. Use
2898 2899 this setting to serve them directly with the HTTP server.
2899 2900 Example: ``http://hgserver/static/``.
2900 2901
2901 2902 ``stripes``
2902 2903 How many lines a "zebra stripe" should span in multi-line output.
2903 2904 Set to 0 to disable. (default: 1)
2904 2905
2905 2906 ``style``
2906 2907 Which template map style to use. The available options are the names of
2907 2908 subdirectories in the HTML templates path. (default: ``paper``)
2908 2909 Example: ``monoblue``.
2909 2910
2910 2911 ``templates``
2911 2912 Where to find the HTML templates. The default path to the HTML templates
2912 2913 can be obtained from ``hg debuginstall``.
2913 2914
2914 2915 ``websub``
2915 2916 ----------
2916 2917
2917 2918 Web substitution filter definition. You can use this section to
2918 2919 define a set of regular expression substitution patterns which
2919 2920 let you automatically modify the hgweb server output.
2920 2921
2921 2922 The default hgweb templates only apply these substitution patterns
2922 2923 on the revision description fields. You can apply them anywhere
2923 2924 you want when you create your own templates by adding calls to the
2924 2925 "websub" filter (usually after calling the "escape" filter).
2925 2926
2926 2927 This can be used, for example, to convert issue references to links
2927 2928 to your issue tracker, or to convert "markdown-like" syntax into
2928 2929 HTML (see the examples below).
2929 2930
2930 2931 Each entry in this section names a substitution filter.
2931 2932 The value of each entry defines the substitution expression itself.
2932 2933 The websub expressions follow the old interhg extension syntax,
2933 2934 which in turn imitates the Unix sed replacement syntax::
2934 2935
2935 2936 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2936 2937
2937 2938 You can use any separator other than "/". The final "i" is optional
2938 2939 and indicates that the search must be case insensitive.
2939 2940
2940 2941 Examples::
2941 2942
2942 2943 [websub]
2943 2944 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2944 2945 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2945 2946 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2946 2947
2947 2948 ``worker``
2948 2949 ----------
2949 2950
2950 2951 Parallel master/worker configuration. We currently perform working
2951 2952 directory updates in parallel on Unix-like systems, which greatly
2952 2953 helps performance.
2953 2954
2954 2955 ``enabled``
2955 2956 Whether to enable workers code to be used.
2956 2957 (default: true)
2957 2958
2958 2959 ``numcpus``
2959 2960 Number of CPUs to use for parallel operations. A zero or
2960 2961 negative value is treated as ``use the default``.
2961 2962 (default: 4 or the number of CPUs on the system, whichever is larger)
2962 2963
2963 2964 ``backgroundclose``
2964 2965 Whether to enable closing file handles on background threads during certain
2965 2966 operations. Some platforms aren't very efficient at closing file
2966 2967 handles that have been written or appended to. By performing file closing
2967 2968 on background threads, file write rate can increase substantially.
2968 2969 (default: true on Windows, false elsewhere)
2969 2970
2970 2971 ``backgroundcloseminfilecount``
2971 2972 Minimum number of files required to trigger background file closing.
2972 2973 Operations not writing this many files won't start background close
2973 2974 threads.
2974 2975 (default: 2048)
2975 2976
2976 2977 ``backgroundclosemaxqueue``
2977 2978 The maximum number of opened file handles waiting to be closed in the
2978 2979 background. This option only has an effect if ``backgroundclose`` is
2979 2980 enabled.
2980 2981 (default: 384)
2981 2982
2982 2983 ``backgroundclosethreadcount``
2983 2984 Number of threads to process background file closes. Only relevant if
2984 2985 ``backgroundclose`` is enabled.
2985 2986 (default: 4)
@@ -1,3634 +1,3650 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 revlog,
62 63 revset,
63 64 revsetlang,
64 65 scmutil,
65 66 sparse,
66 67 store as storemod,
67 68 subrepoutil,
68 69 tags as tagsmod,
69 70 transaction,
70 71 txnutil,
71 72 util,
72 73 vfs as vfsmod,
73 74 )
74 75
75 76 from .interfaces import (
76 77 repository,
77 78 util as interfaceutil,
78 79 )
79 80
80 81 from .utils import (
81 82 hashutil,
82 83 procutil,
83 84 stringutil,
84 85 )
85 86
86 87 from .revlogutils import constants as revlogconst
87 88
88 89 release = lockmod.release
89 90 urlerr = util.urlerr
90 91 urlreq = util.urlreq
91 92
92 93 # set of (path, vfs-location) tuples. vfs-location is:
93 94 # - 'plain for vfs relative paths
94 95 # - '' for svfs relative paths
95 96 _cachedfiles = set()
96 97
97 98
98 99 class _basefilecache(scmutil.filecache):
99 100 """All filecache usage on repo are done for logic that should be unfiltered"""
100 101
101 102 def __get__(self, repo, type=None):
102 103 if repo is None:
103 104 return self
104 105 # proxy to unfiltered __dict__ since filtered repo has no entry
105 106 unfi = repo.unfiltered()
106 107 try:
107 108 return unfi.__dict__[self.sname]
108 109 except KeyError:
109 110 pass
110 111 return super(_basefilecache, self).__get__(unfi, type)
111 112
112 113 def set(self, repo, value):
113 114 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 115
115 116
116 117 class repofilecache(_basefilecache):
117 118 """filecache for files in .hg but outside of .hg/store"""
118 119
119 120 def __init__(self, *paths):
120 121 super(repofilecache, self).__init__(*paths)
121 122 for path in paths:
122 123 _cachedfiles.add((path, b'plain'))
123 124
124 125 def join(self, obj, fname):
125 126 return obj.vfs.join(fname)
126 127
127 128
128 129 class storecache(_basefilecache):
129 130 """filecache for files in the store"""
130 131
131 132 def __init__(self, *paths):
132 133 super(storecache, self).__init__(*paths)
133 134 for path in paths:
134 135 _cachedfiles.add((path, b''))
135 136
136 137 def join(self, obj, fname):
137 138 return obj.sjoin(fname)
138 139
139 140
140 141 class mixedrepostorecache(_basefilecache):
141 142 """filecache for a mix files in .hg/store and outside"""
142 143
143 144 def __init__(self, *pathsandlocations):
144 145 # scmutil.filecache only uses the path for passing back into our
145 146 # join(), so we can safely pass a list of paths and locations
146 147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 148 _cachedfiles.update(pathsandlocations)
148 149
149 150 def join(self, obj, fnameandlocation):
150 151 fname, location = fnameandlocation
151 152 if location == b'plain':
152 153 return obj.vfs.join(fname)
153 154 else:
154 155 if location != b'':
155 156 raise error.ProgrammingError(
156 157 b'unexpected location: %s' % location
157 158 )
158 159 return obj.sjoin(fname)
159 160
160 161
161 162 def isfilecached(repo, name):
162 163 """check if a repo has already cached "name" filecache-ed property
163 164
164 165 This returns (cachedobj-or-None, iscached) tuple.
165 166 """
166 167 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 168 if not cacheentry:
168 169 return None, False
169 170 return cacheentry.obj, True
170 171
171 172
172 173 class unfilteredpropertycache(util.propertycache):
173 174 """propertycache that apply to unfiltered repo only"""
174 175
175 176 def __get__(self, repo, type=None):
176 177 unfi = repo.unfiltered()
177 178 if unfi is repo:
178 179 return super(unfilteredpropertycache, self).__get__(unfi)
179 180 return getattr(unfi, self.name)
180 181
181 182
182 183 class filteredpropertycache(util.propertycache):
183 184 """propertycache that must take filtering in account"""
184 185
185 186 def cachevalue(self, obj, value):
186 187 object.__setattr__(obj, self.name, value)
187 188
188 189
189 190 def hasunfilteredcache(repo, name):
190 191 """check if a repo has an unfilteredpropertycache value for <name>"""
191 192 return name in vars(repo.unfiltered())
192 193
193 194
194 195 def unfilteredmethod(orig):
195 196 """decorate method that always need to be run on unfiltered version"""
196 197
197 198 @functools.wraps(orig)
198 199 def wrapper(repo, *args, **kwargs):
199 200 return orig(repo.unfiltered(), *args, **kwargs)
200 201
201 202 return wrapper
202 203
203 204
204 205 moderncaps = {
205 206 b'lookup',
206 207 b'branchmap',
207 208 b'pushkey',
208 209 b'known',
209 210 b'getbundle',
210 211 b'unbundle',
211 212 }
212 213 legacycaps = moderncaps.union({b'changegroupsubset'})
213 214
214 215
215 216 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 217 class localcommandexecutor(object):
217 218 def __init__(self, peer):
218 219 self._peer = peer
219 220 self._sent = False
220 221 self._closed = False
221 222
222 223 def __enter__(self):
223 224 return self
224 225
225 226 def __exit__(self, exctype, excvalue, exctb):
226 227 self.close()
227 228
228 229 def callcommand(self, command, args):
229 230 if self._sent:
230 231 raise error.ProgrammingError(
231 232 b'callcommand() cannot be used after sendcommands()'
232 233 )
233 234
234 235 if self._closed:
235 236 raise error.ProgrammingError(
236 237 b'callcommand() cannot be used after close()'
237 238 )
238 239
239 240 # We don't need to support anything fancy. Just call the named
240 241 # method on the peer and return a resolved future.
241 242 fn = getattr(self._peer, pycompat.sysstr(command))
242 243
243 244 f = pycompat.futures.Future()
244 245
245 246 try:
246 247 result = fn(**pycompat.strkwargs(args))
247 248 except Exception:
248 249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 250 else:
250 251 f.set_result(result)
251 252
252 253 return f
253 254
254 255 def sendcommands(self):
255 256 self._sent = True
256 257
257 258 def close(self):
258 259 self._closed = True
259 260
260 261
261 262 @interfaceutil.implementer(repository.ipeercommands)
262 263 class localpeer(repository.peer):
263 264 '''peer for a local repo; reflects only the most recent API'''
264 265
265 266 def __init__(self, repo, caps=None):
266 267 super(localpeer, self).__init__()
267 268
268 269 if caps is None:
269 270 caps = moderncaps.copy()
270 271 self._repo = repo.filtered(b'served')
271 272 self.ui = repo.ui
272 273 self._caps = repo._restrictcapabilities(caps)
273 274
274 275 # Begin of _basepeer interface.
275 276
276 277 def url(self):
277 278 return self._repo.url()
278 279
279 280 def local(self):
280 281 return self._repo
281 282
282 283 def peer(self):
283 284 return self
284 285
285 286 def canpush(self):
286 287 return True
287 288
288 289 def close(self):
289 290 self._repo.close()
290 291
291 292 # End of _basepeer interface.
292 293
293 294 # Begin of _basewirecommands interface.
294 295
295 296 def branchmap(self):
296 297 return self._repo.branchmap()
297 298
298 299 def capabilities(self):
299 300 return self._caps
300 301
301 302 def clonebundles(self):
302 303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 304
304 305 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 306 """Used to test argument passing over the wire"""
306 307 return b"%s %s %s %s %s" % (
307 308 one,
308 309 two,
309 310 pycompat.bytestr(three),
310 311 pycompat.bytestr(four),
311 312 pycompat.bytestr(five),
312 313 )
313 314
314 315 def getbundle(
315 316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 317 ):
317 318 chunks = exchange.getbundlechunks(
318 319 self._repo,
319 320 source,
320 321 heads=heads,
321 322 common=common,
322 323 bundlecaps=bundlecaps,
323 324 **kwargs
324 325 )[1]
325 326 cb = util.chunkbuffer(chunks)
326 327
327 328 if exchange.bundle2requested(bundlecaps):
328 329 # When requesting a bundle2, getbundle returns a stream to make the
329 330 # wire level function happier. We need to build a proper object
330 331 # from it in local peer.
331 332 return bundle2.getunbundler(self.ui, cb)
332 333 else:
333 334 return changegroup.getunbundler(b'01', cb, None)
334 335
335 336 def heads(self):
336 337 return self._repo.heads()
337 338
338 339 def known(self, nodes):
339 340 return self._repo.known(nodes)
340 341
341 342 def listkeys(self, namespace):
342 343 return self._repo.listkeys(namespace)
343 344
344 345 def lookup(self, key):
345 346 return self._repo.lookup(key)
346 347
347 348 def pushkey(self, namespace, key, old, new):
348 349 return self._repo.pushkey(namespace, key, old, new)
349 350
350 351 def stream_out(self):
351 352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 353
353 354 def unbundle(self, bundle, heads, url):
354 355 """apply a bundle on a repo
355 356
356 357 This function handles the repo locking itself."""
357 358 try:
358 359 try:
359 360 bundle = exchange.readbundle(self.ui, bundle, None)
360 361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 362 if util.safehasattr(ret, b'getchunks'):
362 363 # This is a bundle20 object, turn it into an unbundler.
363 364 # This little dance should be dropped eventually when the
364 365 # API is finally improved.
365 366 stream = util.chunkbuffer(ret.getchunks())
366 367 ret = bundle2.getunbundler(self.ui, stream)
367 368 return ret
368 369 except Exception as exc:
369 370 # If the exception contains output salvaged from a bundle2
370 371 # reply, we need to make sure it is printed before continuing
371 372 # to fail. So we build a bundle2 with such output and consume
372 373 # it directly.
373 374 #
374 375 # This is not very elegant but allows a "simple" solution for
375 376 # issue4594
376 377 output = getattr(exc, '_bundle2salvagedoutput', ())
377 378 if output:
378 379 bundler = bundle2.bundle20(self._repo.ui)
379 380 for out in output:
380 381 bundler.addpart(out)
381 382 stream = util.chunkbuffer(bundler.getchunks())
382 383 b = bundle2.getunbundler(self.ui, stream)
383 384 bundle2.processbundle(self._repo, b)
384 385 raise
385 386 except error.PushRaced as exc:
386 387 raise error.ResponseError(
387 388 _(b'push failed:'), stringutil.forcebytestr(exc)
388 389 )
389 390
390 391 # End of _basewirecommands interface.
391 392
392 393 # Begin of peer interface.
393 394
394 395 def commandexecutor(self):
395 396 return localcommandexecutor(self)
396 397
397 398 # End of peer interface.
398 399
399 400
400 401 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 402 class locallegacypeer(localpeer):
402 403 """peer extension which implements legacy methods too; used for tests with
403 404 restricted capabilities"""
404 405
405 406 def __init__(self, repo):
406 407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 408
408 409 # Begin of baselegacywirecommands interface.
409 410
410 411 def between(self, pairs):
411 412 return self._repo.between(pairs)
412 413
413 414 def branches(self, nodes):
414 415 return self._repo.branches(nodes)
415 416
416 417 def changegroup(self, nodes, source):
417 418 outgoing = discovery.outgoing(
418 419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 420 )
420 421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 422
422 423 def changegroupsubset(self, bases, heads, source):
423 424 outgoing = discovery.outgoing(
424 425 self._repo, missingroots=bases, ancestorsof=heads
425 426 )
426 427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 428
428 429 # End of baselegacywirecommands interface.
429 430
430 431
431 432 # Functions receiving (ui, features) that extensions can register to impact
432 433 # the ability to load repositories with custom requirements. Only
433 434 # functions defined in loaded extensions are called.
434 435 #
435 436 # The function receives a set of requirement strings that the repository
436 437 # is capable of opening. Functions will typically add elements to the
437 438 # set to reflect that the extension knows how to handle that requirements.
438 439 featuresetupfuncs = set()
439 440
440 441
441 442 def _getsharedvfs(hgvfs, requirements):
442 443 """returns the vfs object pointing to root of shared source
443 444 repo for a shared repository
444 445
445 446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 447 requirements is a set of requirements of current repo (shared one)
447 448 """
448 449 # The ``shared`` or ``relshared`` requirements indicate the
449 450 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 451 # This is an absolute path for ``shared`` and relative to
451 452 # ``.hg/`` for ``relshared``.
452 453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 455 sharedpath = hgvfs.join(sharedpath)
455 456
456 457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 458
458 459 if not sharedvfs.exists():
459 460 raise error.RepoError(
460 461 _(b'.hg/sharedpath points to nonexistent directory %s')
461 462 % sharedvfs.base
462 463 )
463 464 return sharedvfs
464 465
465 466
466 467 def _readrequires(vfs, allowmissing):
467 468 """reads the require file present at root of this vfs
468 469 and return a set of requirements
469 470
470 471 If allowmissing is True, we suppress ENOENT if raised"""
471 472 # requires file contains a newline-delimited list of
472 473 # features/capabilities the opener (us) must have in order to use
473 474 # the repository. This file was introduced in Mercurial 0.9.2,
474 475 # which means very old repositories may not have one. We assume
475 476 # a missing file translates to no requirements.
476 477 try:
477 478 requirements = set(vfs.read(b'requires').splitlines())
478 479 except IOError as e:
479 480 if not (allowmissing and e.errno == errno.ENOENT):
480 481 raise
481 482 requirements = set()
482 483 return requirements
483 484
484 485
485 486 def makelocalrepository(baseui, path, intents=None):
486 487 """Create a local repository object.
487 488
488 489 Given arguments needed to construct a local repository, this function
489 490 performs various early repository loading functionality (such as
490 491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 492 the repository can be opened, derives a type suitable for representing
492 493 that repository, and returns an instance of it.
493 494
494 495 The returned object conforms to the ``repository.completelocalrepository``
495 496 interface.
496 497
497 498 The repository type is derived by calling a series of factory functions
498 499 for each aspect/interface of the final repository. These are defined by
499 500 ``REPO_INTERFACES``.
500 501
501 502 Each factory function is called to produce a type implementing a specific
502 503 interface. The cumulative list of returned types will be combined into a
503 504 new type and that type will be instantiated to represent the local
504 505 repository.
505 506
506 507 The factory functions each receive various state that may be consulted
507 508 as part of deriving a type.
508 509
509 510 Extensions should wrap these factory functions to customize repository type
510 511 creation. Note that an extension's wrapped function may be called even if
511 512 that extension is not loaded for the repo being constructed. Extensions
512 513 should check if their ``__name__`` appears in the
513 514 ``extensionmodulenames`` set passed to the factory function and no-op if
514 515 not.
515 516 """
516 517 ui = baseui.copy()
517 518 # Prevent copying repo configuration.
518 519 ui.copy = baseui.copy
519 520
520 521 # Working directory VFS rooted at repository root.
521 522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 523
523 524 # Main VFS for .hg/ directory.
524 525 hgpath = wdirvfs.join(b'.hg')
525 526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 527 # Whether this repository is shared one or not
527 528 shared = False
528 529 # If this repository is shared, vfs pointing to shared repo
529 530 sharedvfs = None
530 531
531 532 # The .hg/ path should exist and should be a directory. All other
532 533 # cases are errors.
533 534 if not hgvfs.isdir():
534 535 try:
535 536 hgvfs.stat()
536 537 except OSError as e:
537 538 if e.errno != errno.ENOENT:
538 539 raise
539 540 except ValueError as e:
540 541 # Can be raised on Python 3.8 when path is invalid.
541 542 raise error.Abort(
542 543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 544 )
544 545
545 546 raise error.RepoError(_(b'repository %s not found') % path)
546 547
547 548 requirements = _readrequires(hgvfs, True)
548 549 shared = (
549 550 requirementsmod.SHARED_REQUIREMENT in requirements
550 551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 552 )
552 553 storevfs = None
553 554 if shared:
554 555 # This is a shared repo
555 556 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 558 else:
558 559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559 560
560 561 # if .hg/requires contains the sharesafe requirement, it means
561 562 # there exists a `.hg/store/requires` too and we should read it
562 563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 565 # is not present, refer checkrequirementscompat() for that
565 566 #
566 567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 568 # repository was shared the old way. We check the share source .hg/requires
568 569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 570 # to be reshared
570 571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571 572
572 573 if (
573 574 shared
574 575 and requirementsmod.SHARESAFE_REQUIREMENT
575 576 not in _readrequires(sharedvfs, True)
576 577 ):
577 578 if ui.configbool(
578 579 b'experimental', b'sharesafe-auto-downgrade-shares'
579 580 ):
580 581 # prevent cyclic import localrepo -> upgrade -> localrepo
581 582 from . import upgrade
582 583
583 584 upgrade.downgrade_share_to_non_safe(
584 585 ui,
585 586 hgvfs,
586 587 sharedvfs,
587 588 requirements,
588 589 )
589 590 else:
590 591 raise error.Abort(
591 592 _(
592 593 b"share source does not support exp-sharesafe requirement"
593 594 )
594 595 )
595 596 else:
596 597 requirements |= _readrequires(storevfs, False)
597 598 elif shared:
598 599 sourcerequires = _readrequires(sharedvfs, False)
599 600 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
600 601 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
601 602 # prevent cyclic import localrepo -> upgrade -> localrepo
602 603 from . import upgrade
603 604
604 605 upgrade.upgrade_share_to_safe(
605 606 ui,
606 607 hgvfs,
607 608 storevfs,
608 609 requirements,
609 610 )
610 611 elif ui.configbool(
611 612 b'experimental', b'sharesafe-warn-outdated-shares'
612 613 ):
613 614 ui.warn(
614 615 _(
615 616 b'warning: source repository supports share-safe functionality.'
616 617 b' Reshare to upgrade.\n'
617 618 )
618 619 )
619 620
620 621 # The .hg/hgrc file may load extensions or contain config options
621 622 # that influence repository construction. Attempt to load it and
622 623 # process any new extensions that it may have pulled in.
623 624 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
624 625 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
625 626 extensions.loadall(ui)
626 627 extensions.populateui(ui)
627 628
628 629 # Set of module names of extensions loaded for this repository.
629 630 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
630 631
631 632 supportedrequirements = gathersupportedrequirements(ui)
632 633
633 634 # We first validate the requirements are known.
634 635 ensurerequirementsrecognized(requirements, supportedrequirements)
635 636
636 637 # Then we validate that the known set is reasonable to use together.
637 638 ensurerequirementscompatible(ui, requirements)
638 639
639 640 # TODO there are unhandled edge cases related to opening repositories with
640 641 # shared storage. If storage is shared, we should also test for requirements
641 642 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
642 643 # that repo, as that repo may load extensions needed to open it. This is a
643 644 # bit complicated because we don't want the other hgrc to overwrite settings
644 645 # in this hgrc.
645 646 #
646 647 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
647 648 # file when sharing repos. But if a requirement is added after the share is
648 649 # performed, thereby introducing a new requirement for the opener, we may
649 650 # will not see that and could encounter a run-time error interacting with
650 651 # that shared store since it has an unknown-to-us requirement.
651 652
652 653 # At this point, we know we should be capable of opening the repository.
653 654 # Now get on with doing that.
654 655
655 656 features = set()
656 657
657 658 # The "store" part of the repository holds versioned data. How it is
658 659 # accessed is determined by various requirements. If `shared` or
659 660 # `relshared` requirements are present, this indicates current repository
660 661 # is a share and store exists in path mentioned in `.hg/sharedpath`
661 662 if shared:
662 663 storebasepath = sharedvfs.base
663 664 cachepath = sharedvfs.join(b'cache')
664 665 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
665 666 else:
666 667 storebasepath = hgvfs.base
667 668 cachepath = hgvfs.join(b'cache')
668 669 wcachepath = hgvfs.join(b'wcache')
669 670
670 671 # The store has changed over time and the exact layout is dictated by
671 672 # requirements. The store interface abstracts differences across all
672 673 # of them.
673 674 store = makestore(
674 675 requirements,
675 676 storebasepath,
676 677 lambda base: vfsmod.vfs(base, cacheaudited=True),
677 678 )
678 679 hgvfs.createmode = store.createmode
679 680
680 681 storevfs = store.vfs
681 682 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
682 683
683 684 # The cache vfs is used to manage cache files.
684 685 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
685 686 cachevfs.createmode = store.createmode
686 687 # The cache vfs is used to manage cache files related to the working copy
687 688 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
688 689 wcachevfs.createmode = store.createmode
689 690
690 691 # Now resolve the type for the repository object. We do this by repeatedly
691 692 # calling a factory function to produces types for specific aspects of the
692 693 # repo's operation. The aggregate returned types are used as base classes
693 694 # for a dynamically-derived type, which will represent our new repository.
694 695
695 696 bases = []
696 697 extrastate = {}
697 698
698 699 for iface, fn in REPO_INTERFACES:
699 700 # We pass all potentially useful state to give extensions tons of
700 701 # flexibility.
701 702 typ = fn()(
702 703 ui=ui,
703 704 intents=intents,
704 705 requirements=requirements,
705 706 features=features,
706 707 wdirvfs=wdirvfs,
707 708 hgvfs=hgvfs,
708 709 store=store,
709 710 storevfs=storevfs,
710 711 storeoptions=storevfs.options,
711 712 cachevfs=cachevfs,
712 713 wcachevfs=wcachevfs,
713 714 extensionmodulenames=extensionmodulenames,
714 715 extrastate=extrastate,
715 716 baseclasses=bases,
716 717 )
717 718
718 719 if not isinstance(typ, type):
719 720 raise error.ProgrammingError(
720 721 b'unable to construct type for %s' % iface
721 722 )
722 723
723 724 bases.append(typ)
724 725
725 726 # type() allows you to use characters in type names that wouldn't be
726 727 # recognized as Python symbols in source code. We abuse that to add
727 728 # rich information about our constructed repo.
728 729 name = pycompat.sysstr(
729 730 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
730 731 )
731 732
732 733 cls = type(name, tuple(bases), {})
733 734
734 735 return cls(
735 736 baseui=baseui,
736 737 ui=ui,
737 738 origroot=path,
738 739 wdirvfs=wdirvfs,
739 740 hgvfs=hgvfs,
740 741 requirements=requirements,
741 742 supportedrequirements=supportedrequirements,
742 743 sharedpath=storebasepath,
743 744 store=store,
744 745 cachevfs=cachevfs,
745 746 wcachevfs=wcachevfs,
746 747 features=features,
747 748 intents=intents,
748 749 )
749 750
750 751
751 752 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
752 753 """Load hgrc files/content into a ui instance.
753 754
754 755 This is called during repository opening to load any additional
755 756 config files or settings relevant to the current repository.
756 757
757 758 Returns a bool indicating whether any additional configs were loaded.
758 759
759 760 Extensions should monkeypatch this function to modify how per-repo
760 761 configs are loaded. For example, an extension may wish to pull in
761 762 configs from alternate files or sources.
762 763
763 764 sharedvfs is vfs object pointing to source repo if the current one is a
764 765 shared one
765 766 """
766 767 if not rcutil.use_repo_hgrc():
767 768 return False
768 769
769 770 ret = False
770 771 # first load config from shared source if we has to
771 772 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
772 773 try:
773 774 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
774 775 ret = True
775 776 except IOError:
776 777 pass
777 778
778 779 try:
779 780 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
780 781 ret = True
781 782 except IOError:
782 783 pass
783 784
784 785 try:
785 786 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
786 787 ret = True
787 788 except IOError:
788 789 pass
789 790
790 791 return ret
791 792
792 793
793 794 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
794 795 """Perform additional actions after .hg/hgrc is loaded.
795 796
796 797 This function is called during repository loading immediately after
797 798 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
798 799
799 800 The function can be used to validate configs, automatically add
800 801 options (including extensions) based on requirements, etc.
801 802 """
802 803
803 804 # Map of requirements to list of extensions to load automatically when
804 805 # requirement is present.
805 806 autoextensions = {
806 807 b'git': [b'git'],
807 808 b'largefiles': [b'largefiles'],
808 809 b'lfs': [b'lfs'],
809 810 }
810 811
811 812 for requirement, names in sorted(autoextensions.items()):
812 813 if requirement not in requirements:
813 814 continue
814 815
815 816 for name in names:
816 817 if not ui.hasconfig(b'extensions', name):
817 818 ui.setconfig(b'extensions', name, b'', source=b'autoload')
818 819
819 820
820 821 def gathersupportedrequirements(ui):
821 822 """Determine the complete set of recognized requirements."""
822 823 # Start with all requirements supported by this file.
823 824 supported = set(localrepository._basesupported)
824 825
825 826 # Execute ``featuresetupfuncs`` entries if they belong to an extension
826 827 # relevant to this ui instance.
827 828 modules = {m.__name__ for n, m in extensions.extensions(ui)}
828 829
829 830 for fn in featuresetupfuncs:
830 831 if fn.__module__ in modules:
831 832 fn(ui, supported)
832 833
833 834 # Add derived requirements from registered compression engines.
834 835 for name in util.compengines:
835 836 engine = util.compengines[name]
836 837 if engine.available() and engine.revlogheader():
837 838 supported.add(b'exp-compression-%s' % name)
838 839 if engine.name() == b'zstd':
839 840 supported.add(b'revlog-compression-zstd')
840 841
841 842 return supported
842 843
843 844
844 845 def ensurerequirementsrecognized(requirements, supported):
845 846 """Validate that a set of local requirements is recognized.
846 847
847 848 Receives a set of requirements. Raises an ``error.RepoError`` if there
848 849 exists any requirement in that set that currently loaded code doesn't
849 850 recognize.
850 851
851 852 Returns a set of supported requirements.
852 853 """
853 854 missing = set()
854 855
855 856 for requirement in requirements:
856 857 if requirement in supported:
857 858 continue
858 859
859 860 if not requirement or not requirement[0:1].isalnum():
860 861 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
861 862
862 863 missing.add(requirement)
863 864
864 865 if missing:
865 866 raise error.RequirementError(
866 867 _(b'repository requires features unknown to this Mercurial: %s')
867 868 % b' '.join(sorted(missing)),
868 869 hint=_(
869 870 b'see https://mercurial-scm.org/wiki/MissingRequirement '
870 871 b'for more information'
871 872 ),
872 873 )
873 874
874 875
875 876 def ensurerequirementscompatible(ui, requirements):
876 877 """Validates that a set of recognized requirements is mutually compatible.
877 878
878 879 Some requirements may not be compatible with others or require
879 880 config options that aren't enabled. This function is called during
880 881 repository opening to ensure that the set of requirements needed
881 882 to open a repository is sane and compatible with config options.
882 883
883 884 Extensions can monkeypatch this function to perform additional
884 885 checking.
885 886
886 887 ``error.RepoError`` should be raised on failure.
887 888 """
888 889 if (
889 890 requirementsmod.SPARSE_REQUIREMENT in requirements
890 891 and not sparse.enabled
891 892 ):
892 893 raise error.RepoError(
893 894 _(
894 895 b'repository is using sparse feature but '
895 896 b'sparse is not enabled; enable the '
896 897 b'"sparse" extensions to access'
897 898 )
898 899 )
899 900
900 901
901 902 def makestore(requirements, path, vfstype):
902 903 """Construct a storage object for a repository."""
903 904 if b'store' in requirements:
904 905 if b'fncache' in requirements:
905 906 return storemod.fncachestore(
906 907 path, vfstype, b'dotencode' in requirements
907 908 )
908 909
909 910 return storemod.encodedstore(path, vfstype)
910 911
911 912 return storemod.basicstore(path, vfstype)
912 913
913 914
914 915 def resolvestorevfsoptions(ui, requirements, features):
915 916 """Resolve the options to pass to the store vfs opener.
916 917
917 918 The returned dict is used to influence behavior of the storage layer.
918 919 """
919 920 options = {}
920 921
921 922 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
922 923 options[b'treemanifest'] = True
923 924
924 925 # experimental config: format.manifestcachesize
925 926 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
926 927 if manifestcachesize is not None:
927 928 options[b'manifestcachesize'] = manifestcachesize
928 929
929 930 # In the absence of another requirement superseding a revlog-related
930 931 # requirement, we have to assume the repo is using revlog version 0.
931 932 # This revlog format is super old and we don't bother trying to parse
932 933 # opener options for it because those options wouldn't do anything
933 934 # meaningful on such old repos.
934 935 if (
935 936 b'revlogv1' in requirements
936 937 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
937 938 ):
938 939 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
939 940 else: # explicitly mark repo as using revlogv0
940 941 options[b'revlogv0'] = True
941 942
942 943 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
943 944 options[b'copies-storage'] = b'changeset-sidedata'
944 945 else:
945 946 writecopiesto = ui.config(b'experimental', b'copies.write-to')
946 947 copiesextramode = (b'changeset-only', b'compatibility')
947 948 if writecopiesto in copiesextramode:
948 949 options[b'copies-storage'] = b'extra'
949 950
950 951 return options
951 952
952 953
953 954 def resolverevlogstorevfsoptions(ui, requirements, features):
954 955 """Resolve opener options specific to revlogs."""
955 956
956 957 options = {}
957 958 options[b'flagprocessors'] = {}
958 959
959 960 if b'revlogv1' in requirements:
960 961 options[b'revlogv1'] = True
961 962 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
962 963 options[b'revlogv2'] = True
963 964
964 965 if b'generaldelta' in requirements:
965 966 options[b'generaldelta'] = True
966 967
967 968 # experimental config: format.chunkcachesize
968 969 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
969 970 if chunkcachesize is not None:
970 971 options[b'chunkcachesize'] = chunkcachesize
971 972
972 973 deltabothparents = ui.configbool(
973 974 b'storage', b'revlog.optimize-delta-parent-choice'
974 975 )
975 976 options[b'deltabothparents'] = deltabothparents
976 977
977 978 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
978 979 lazydeltabase = False
979 980 if lazydelta:
980 981 lazydeltabase = ui.configbool(
981 982 b'storage', b'revlog.reuse-external-delta-parent'
982 983 )
983 984 if lazydeltabase is None:
984 985 lazydeltabase = not scmutil.gddeltaconfig(ui)
985 986 options[b'lazydelta'] = lazydelta
986 987 options[b'lazydeltabase'] = lazydeltabase
987 988
988 989 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
989 990 if 0 <= chainspan:
990 991 options[b'maxdeltachainspan'] = chainspan
991 992
992 993 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
993 994 if mmapindexthreshold is not None:
994 995 options[b'mmapindexthreshold'] = mmapindexthreshold
995 996
996 997 withsparseread = ui.configbool(b'experimental', b'sparse-read')
997 998 srdensitythres = float(
998 999 ui.config(b'experimental', b'sparse-read.density-threshold')
999 1000 )
1000 1001 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1001 1002 options[b'with-sparse-read'] = withsparseread
1002 1003 options[b'sparse-read-density-threshold'] = srdensitythres
1003 1004 options[b'sparse-read-min-gap-size'] = srmingapsize
1004 1005
1005 1006 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1006 1007 options[b'sparse-revlog'] = sparserevlog
1007 1008 if sparserevlog:
1008 1009 options[b'generaldelta'] = True
1009 1010
1010 1011 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1011 1012 options[b'side-data'] = sidedata
1012 1013
1013 1014 maxchainlen = None
1014 1015 if sparserevlog:
1015 1016 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1016 1017 # experimental config: format.maxchainlen
1017 1018 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1018 1019 if maxchainlen is not None:
1019 1020 options[b'maxchainlen'] = maxchainlen
1020 1021
1021 1022 for r in requirements:
1022 1023 # we allow multiple compression engine requirement to co-exist because
1023 1024 # strickly speaking, revlog seems to support mixed compression style.
1024 1025 #
1025 1026 # The compression used for new entries will be "the last one"
1026 1027 prefix = r.startswith
1027 1028 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1028 1029 options[b'compengine'] = r.split(b'-', 2)[2]
1029 1030
1030 1031 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1031 1032 if options[b'zlib.level'] is not None:
1032 1033 if not (0 <= options[b'zlib.level'] <= 9):
1033 1034 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1034 1035 raise error.Abort(msg % options[b'zlib.level'])
1035 1036 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1036 1037 if options[b'zstd.level'] is not None:
1037 1038 if not (0 <= options[b'zstd.level'] <= 22):
1038 1039 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1039 1040 raise error.Abort(msg % options[b'zstd.level'])
1040 1041
1041 1042 if requirementsmod.NARROW_REQUIREMENT in requirements:
1042 1043 options[b'enableellipsis'] = True
1043 1044
1044 1045 if ui.configbool(b'experimental', b'rust.index'):
1045 1046 options[b'rust.index'] = True
1046 1047 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1047 1048 slow_path = ui.config(
1048 1049 b'storage', b'revlog.persistent-nodemap.slow-path'
1049 1050 )
1050 if slow_path not in (b'allow'):
1051 if slow_path not in (b'allow', b'warn'):
1051 1052 default = ui.config_default(
1052 1053 b'storage', b'revlog.persistent-nodemap.slow-path'
1053 1054 )
1054 1055 msg = _(
1055 1056 b'unknown value for config '
1056 1057 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1057 1058 )
1058 1059 ui.warn(msg % slow_path)
1059 1060 if not ui.quiet:
1060 1061 ui.warn(_(b'falling back to default value: %s\n') % default)
1061 1062 slow_path = default
1063
1064 msg = _(
1065 b"accessing `persistent-nodemap` repository without associated "
1066 b"fast implementation."
1067 )
1068 hint = _(
1069 b"check `hg help config.format.use-persistent-nodemap` "
1070 b"for details"
1071 )
1072 if slow_path == b'warn' and not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1073 msg = b"warning: " + msg + b'\n'
1074 ui.warn(msg)
1075 if not ui.quiet:
1076 hint = b'(' + hint + b')\n'
1077 ui.warn(hint)
1062 1078 options[b'persistent-nodemap'] = True
1063 1079 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1064 1080 options[b'persistent-nodemap.mmap'] = True
1065 1081 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1066 1082 options[b'persistent-nodemap.mode'] = epnm
1067 1083 if ui.configbool(b'devel', b'persistent-nodemap'):
1068 1084 options[b'devel-force-nodemap'] = True
1069 1085
1070 1086 return options
1071 1087
1072 1088
1073 1089 def makemain(**kwargs):
1074 1090 """Produce a type conforming to ``ilocalrepositorymain``."""
1075 1091 return localrepository
1076 1092
1077 1093
1078 1094 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1079 1095 class revlogfilestorage(object):
1080 1096 """File storage when using revlogs."""
1081 1097
1082 1098 def file(self, path):
1083 1099 if path[0] == b'/':
1084 1100 path = path[1:]
1085 1101
1086 1102 return filelog.filelog(self.svfs, path)
1087 1103
1088 1104
1089 1105 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1090 1106 class revlognarrowfilestorage(object):
1091 1107 """File storage when using revlogs and narrow files."""
1092 1108
1093 1109 def file(self, path):
1094 1110 if path[0] == b'/':
1095 1111 path = path[1:]
1096 1112
1097 1113 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1098 1114
1099 1115
1100 1116 def makefilestorage(requirements, features, **kwargs):
1101 1117 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1102 1118 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1103 1119 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1104 1120
1105 1121 if requirementsmod.NARROW_REQUIREMENT in requirements:
1106 1122 return revlognarrowfilestorage
1107 1123 else:
1108 1124 return revlogfilestorage
1109 1125
1110 1126
1111 1127 # List of repository interfaces and factory functions for them. Each
1112 1128 # will be called in order during ``makelocalrepository()`` to iteratively
1113 1129 # derive the final type for a local repository instance. We capture the
1114 1130 # function as a lambda so we don't hold a reference and the module-level
1115 1131 # functions can be wrapped.
1116 1132 REPO_INTERFACES = [
1117 1133 (repository.ilocalrepositorymain, lambda: makemain),
1118 1134 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1119 1135 ]
1120 1136
1121 1137
1122 1138 @interfaceutil.implementer(repository.ilocalrepositorymain)
1123 1139 class localrepository(object):
1124 1140 """Main class for representing local repositories.
1125 1141
1126 1142 All local repositories are instances of this class.
1127 1143
1128 1144 Constructed on its own, instances of this class are not usable as
1129 1145 repository objects. To obtain a usable repository object, call
1130 1146 ``hg.repository()``, ``localrepo.instance()``, or
1131 1147 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1132 1148 ``instance()`` adds support for creating new repositories.
1133 1149 ``hg.repository()`` adds more extension integration, including calling
1134 1150 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1135 1151 used.
1136 1152 """
1137 1153
1138 1154 # obsolete experimental requirements:
1139 1155 # - manifestv2: An experimental new manifest format that allowed
1140 1156 # for stem compression of long paths. Experiment ended up not
1141 1157 # being successful (repository sizes went up due to worse delta
1142 1158 # chains), and the code was deleted in 4.6.
1143 1159 supportedformats = {
1144 1160 b'revlogv1',
1145 1161 b'generaldelta',
1146 1162 requirementsmod.TREEMANIFEST_REQUIREMENT,
1147 1163 requirementsmod.COPIESSDC_REQUIREMENT,
1148 1164 requirementsmod.REVLOGV2_REQUIREMENT,
1149 1165 requirementsmod.SIDEDATA_REQUIREMENT,
1150 1166 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1151 1167 requirementsmod.NODEMAP_REQUIREMENT,
1152 1168 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1153 1169 requirementsmod.SHARESAFE_REQUIREMENT,
1154 1170 }
1155 1171 _basesupported = supportedformats | {
1156 1172 b'store',
1157 1173 b'fncache',
1158 1174 requirementsmod.SHARED_REQUIREMENT,
1159 1175 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1160 1176 b'dotencode',
1161 1177 requirementsmod.SPARSE_REQUIREMENT,
1162 1178 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1163 1179 }
1164 1180
1165 1181 # list of prefix for file which can be written without 'wlock'
1166 1182 # Extensions should extend this list when needed
1167 1183 _wlockfreeprefix = {
1168 1184 # We migh consider requiring 'wlock' for the next
1169 1185 # two, but pretty much all the existing code assume
1170 1186 # wlock is not needed so we keep them excluded for
1171 1187 # now.
1172 1188 b'hgrc',
1173 1189 b'requires',
1174 1190 # XXX cache is a complicatged business someone
1175 1191 # should investigate this in depth at some point
1176 1192 b'cache/',
1177 1193 # XXX shouldn't be dirstate covered by the wlock?
1178 1194 b'dirstate',
1179 1195 # XXX bisect was still a bit too messy at the time
1180 1196 # this changeset was introduced. Someone should fix
1181 1197 # the remainig bit and drop this line
1182 1198 b'bisect.state',
1183 1199 }
1184 1200
1185 1201 def __init__(
1186 1202 self,
1187 1203 baseui,
1188 1204 ui,
1189 1205 origroot,
1190 1206 wdirvfs,
1191 1207 hgvfs,
1192 1208 requirements,
1193 1209 supportedrequirements,
1194 1210 sharedpath,
1195 1211 store,
1196 1212 cachevfs,
1197 1213 wcachevfs,
1198 1214 features,
1199 1215 intents=None,
1200 1216 ):
1201 1217 """Create a new local repository instance.
1202 1218
1203 1219 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1204 1220 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1205 1221 object.
1206 1222
1207 1223 Arguments:
1208 1224
1209 1225 baseui
1210 1226 ``ui.ui`` instance that ``ui`` argument was based off of.
1211 1227
1212 1228 ui
1213 1229 ``ui.ui`` instance for use by the repository.
1214 1230
1215 1231 origroot
1216 1232 ``bytes`` path to working directory root of this repository.
1217 1233
1218 1234 wdirvfs
1219 1235 ``vfs.vfs`` rooted at the working directory.
1220 1236
1221 1237 hgvfs
1222 1238 ``vfs.vfs`` rooted at .hg/
1223 1239
1224 1240 requirements
1225 1241 ``set`` of bytestrings representing repository opening requirements.
1226 1242
1227 1243 supportedrequirements
1228 1244 ``set`` of bytestrings representing repository requirements that we
1229 1245 know how to open. May be a supetset of ``requirements``.
1230 1246
1231 1247 sharedpath
1232 1248 ``bytes`` Defining path to storage base directory. Points to a
1233 1249 ``.hg/`` directory somewhere.
1234 1250
1235 1251 store
1236 1252 ``store.basicstore`` (or derived) instance providing access to
1237 1253 versioned storage.
1238 1254
1239 1255 cachevfs
1240 1256 ``vfs.vfs`` used for cache files.
1241 1257
1242 1258 wcachevfs
1243 1259 ``vfs.vfs`` used for cache files related to the working copy.
1244 1260
1245 1261 features
1246 1262 ``set`` of bytestrings defining features/capabilities of this
1247 1263 instance.
1248 1264
1249 1265 intents
1250 1266 ``set`` of system strings indicating what this repo will be used
1251 1267 for.
1252 1268 """
1253 1269 self.baseui = baseui
1254 1270 self.ui = ui
1255 1271 self.origroot = origroot
1256 1272 # vfs rooted at working directory.
1257 1273 self.wvfs = wdirvfs
1258 1274 self.root = wdirvfs.base
1259 1275 # vfs rooted at .hg/. Used to access most non-store paths.
1260 1276 self.vfs = hgvfs
1261 1277 self.path = hgvfs.base
1262 1278 self.requirements = requirements
1263 1279 self.supported = supportedrequirements
1264 1280 self.sharedpath = sharedpath
1265 1281 self.store = store
1266 1282 self.cachevfs = cachevfs
1267 1283 self.wcachevfs = wcachevfs
1268 1284 self.features = features
1269 1285
1270 1286 self.filtername = None
1271 1287
1272 1288 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1273 1289 b'devel', b'check-locks'
1274 1290 ):
1275 1291 self.vfs.audit = self._getvfsward(self.vfs.audit)
1276 1292 # A list of callback to shape the phase if no data were found.
1277 1293 # Callback are in the form: func(repo, roots) --> processed root.
1278 1294 # This list it to be filled by extension during repo setup
1279 1295 self._phasedefaults = []
1280 1296
1281 1297 color.setup(self.ui)
1282 1298
1283 1299 self.spath = self.store.path
1284 1300 self.svfs = self.store.vfs
1285 1301 self.sjoin = self.store.join
1286 1302 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1287 1303 b'devel', b'check-locks'
1288 1304 ):
1289 1305 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1290 1306 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1291 1307 else: # standard vfs
1292 1308 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1293 1309
1294 1310 self._dirstatevalidatewarned = False
1295 1311
1296 1312 self._branchcaches = branchmap.BranchMapCache()
1297 1313 self._revbranchcache = None
1298 1314 self._filterpats = {}
1299 1315 self._datafilters = {}
1300 1316 self._transref = self._lockref = self._wlockref = None
1301 1317
1302 1318 # A cache for various files under .hg/ that tracks file changes,
1303 1319 # (used by the filecache decorator)
1304 1320 #
1305 1321 # Maps a property name to its util.filecacheentry
1306 1322 self._filecache = {}
1307 1323
1308 1324 # hold sets of revision to be filtered
1309 1325 # should be cleared when something might have changed the filter value:
1310 1326 # - new changesets,
1311 1327 # - phase change,
1312 1328 # - new obsolescence marker,
1313 1329 # - working directory parent change,
1314 1330 # - bookmark changes
1315 1331 self.filteredrevcache = {}
1316 1332
1317 1333 # post-dirstate-status hooks
1318 1334 self._postdsstatus = []
1319 1335
1320 1336 # generic mapping between names and nodes
1321 1337 self.names = namespaces.namespaces()
1322 1338
1323 1339 # Key to signature value.
1324 1340 self._sparsesignaturecache = {}
1325 1341 # Signature to cached matcher instance.
1326 1342 self._sparsematchercache = {}
1327 1343
1328 1344 self._extrafilterid = repoview.extrafilter(ui)
1329 1345
1330 1346 self.filecopiesmode = None
1331 1347 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1332 1348 self.filecopiesmode = b'changeset-sidedata'
1333 1349
1334 1350 def _getvfsward(self, origfunc):
1335 1351 """build a ward for self.vfs"""
1336 1352 rref = weakref.ref(self)
1337 1353
1338 1354 def checkvfs(path, mode=None):
1339 1355 ret = origfunc(path, mode=mode)
1340 1356 repo = rref()
1341 1357 if (
1342 1358 repo is None
1343 1359 or not util.safehasattr(repo, b'_wlockref')
1344 1360 or not util.safehasattr(repo, b'_lockref')
1345 1361 ):
1346 1362 return
1347 1363 if mode in (None, b'r', b'rb'):
1348 1364 return
1349 1365 if path.startswith(repo.path):
1350 1366 # truncate name relative to the repository (.hg)
1351 1367 path = path[len(repo.path) + 1 :]
1352 1368 if path.startswith(b'cache/'):
1353 1369 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1354 1370 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1355 1371 # path prefixes covered by 'lock'
1356 1372 vfs_path_prefixes = (
1357 1373 b'journal.',
1358 1374 b'undo.',
1359 1375 b'strip-backup/',
1360 1376 b'cache/',
1361 1377 )
1362 1378 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1363 1379 if repo._currentlock(repo._lockref) is None:
1364 1380 repo.ui.develwarn(
1365 1381 b'write with no lock: "%s"' % path,
1366 1382 stacklevel=3,
1367 1383 config=b'check-locks',
1368 1384 )
1369 1385 elif repo._currentlock(repo._wlockref) is None:
1370 1386 # rest of vfs files are covered by 'wlock'
1371 1387 #
1372 1388 # exclude special files
1373 1389 for prefix in self._wlockfreeprefix:
1374 1390 if path.startswith(prefix):
1375 1391 return
1376 1392 repo.ui.develwarn(
1377 1393 b'write with no wlock: "%s"' % path,
1378 1394 stacklevel=3,
1379 1395 config=b'check-locks',
1380 1396 )
1381 1397 return ret
1382 1398
1383 1399 return checkvfs
1384 1400
1385 1401 def _getsvfsward(self, origfunc):
1386 1402 """build a ward for self.svfs"""
1387 1403 rref = weakref.ref(self)
1388 1404
1389 1405 def checksvfs(path, mode=None):
1390 1406 ret = origfunc(path, mode=mode)
1391 1407 repo = rref()
1392 1408 if repo is None or not util.safehasattr(repo, b'_lockref'):
1393 1409 return
1394 1410 if mode in (None, b'r', b'rb'):
1395 1411 return
1396 1412 if path.startswith(repo.sharedpath):
1397 1413 # truncate name relative to the repository (.hg)
1398 1414 path = path[len(repo.sharedpath) + 1 :]
1399 1415 if repo._currentlock(repo._lockref) is None:
1400 1416 repo.ui.develwarn(
1401 1417 b'write with no lock: "%s"' % path, stacklevel=4
1402 1418 )
1403 1419 return ret
1404 1420
1405 1421 return checksvfs
1406 1422
1407 1423 def close(self):
1408 1424 self._writecaches()
1409 1425
1410 1426 def _writecaches(self):
1411 1427 if self._revbranchcache:
1412 1428 self._revbranchcache.write()
1413 1429
1414 1430 def _restrictcapabilities(self, caps):
1415 1431 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1416 1432 caps = set(caps)
1417 1433 capsblob = bundle2.encodecaps(
1418 1434 bundle2.getrepocaps(self, role=b'client')
1419 1435 )
1420 1436 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1421 1437 return caps
1422 1438
1423 1439 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1424 1440 # self -> auditor -> self._checknested -> self
1425 1441
1426 1442 @property
1427 1443 def auditor(self):
1428 1444 # This is only used by context.workingctx.match in order to
1429 1445 # detect files in subrepos.
1430 1446 return pathutil.pathauditor(self.root, callback=self._checknested)
1431 1447
1432 1448 @property
1433 1449 def nofsauditor(self):
1434 1450 # This is only used by context.basectx.match in order to detect
1435 1451 # files in subrepos.
1436 1452 return pathutil.pathauditor(
1437 1453 self.root, callback=self._checknested, realfs=False, cached=True
1438 1454 )
1439 1455
1440 1456 def _checknested(self, path):
1441 1457 """Determine if path is a legal nested repository."""
1442 1458 if not path.startswith(self.root):
1443 1459 return False
1444 1460 subpath = path[len(self.root) + 1 :]
1445 1461 normsubpath = util.pconvert(subpath)
1446 1462
1447 1463 # XXX: Checking against the current working copy is wrong in
1448 1464 # the sense that it can reject things like
1449 1465 #
1450 1466 # $ hg cat -r 10 sub/x.txt
1451 1467 #
1452 1468 # if sub/ is no longer a subrepository in the working copy
1453 1469 # parent revision.
1454 1470 #
1455 1471 # However, it can of course also allow things that would have
1456 1472 # been rejected before, such as the above cat command if sub/
1457 1473 # is a subrepository now, but was a normal directory before.
1458 1474 # The old path auditor would have rejected by mistake since it
1459 1475 # panics when it sees sub/.hg/.
1460 1476 #
1461 1477 # All in all, checking against the working copy seems sensible
1462 1478 # since we want to prevent access to nested repositories on
1463 1479 # the filesystem *now*.
1464 1480 ctx = self[None]
1465 1481 parts = util.splitpath(subpath)
1466 1482 while parts:
1467 1483 prefix = b'/'.join(parts)
1468 1484 if prefix in ctx.substate:
1469 1485 if prefix == normsubpath:
1470 1486 return True
1471 1487 else:
1472 1488 sub = ctx.sub(prefix)
1473 1489 return sub.checknested(subpath[len(prefix) + 1 :])
1474 1490 else:
1475 1491 parts.pop()
1476 1492 return False
1477 1493
1478 1494 def peer(self):
1479 1495 return localpeer(self) # not cached to avoid reference cycle
1480 1496
1481 1497 def unfiltered(self):
1482 1498 """Return unfiltered version of the repository
1483 1499
1484 1500 Intended to be overwritten by filtered repo."""
1485 1501 return self
1486 1502
1487 1503 def filtered(self, name, visibilityexceptions=None):
1488 1504 """Return a filtered version of a repository
1489 1505
1490 1506 The `name` parameter is the identifier of the requested view. This
1491 1507 will return a repoview object set "exactly" to the specified view.
1492 1508
1493 1509 This function does not apply recursive filtering to a repository. For
1494 1510 example calling `repo.filtered("served")` will return a repoview using
1495 1511 the "served" view, regardless of the initial view used by `repo`.
1496 1512
1497 1513 In other word, there is always only one level of `repoview` "filtering".
1498 1514 """
1499 1515 if self._extrafilterid is not None and b'%' not in name:
1500 1516 name = name + b'%' + self._extrafilterid
1501 1517
1502 1518 cls = repoview.newtype(self.unfiltered().__class__)
1503 1519 return cls(self, name, visibilityexceptions)
1504 1520
1505 1521 @mixedrepostorecache(
1506 1522 (b'bookmarks', b'plain'),
1507 1523 (b'bookmarks.current', b'plain'),
1508 1524 (b'bookmarks', b''),
1509 1525 (b'00changelog.i', b''),
1510 1526 )
1511 1527 def _bookmarks(self):
1512 1528 # Since the multiple files involved in the transaction cannot be
1513 1529 # written atomically (with current repository format), there is a race
1514 1530 # condition here.
1515 1531 #
1516 1532 # 1) changelog content A is read
1517 1533 # 2) outside transaction update changelog to content B
1518 1534 # 3) outside transaction update bookmark file referring to content B
1519 1535 # 4) bookmarks file content is read and filtered against changelog-A
1520 1536 #
1521 1537 # When this happens, bookmarks against nodes missing from A are dropped.
1522 1538 #
1523 1539 # Having this happening during read is not great, but it become worse
1524 1540 # when this happen during write because the bookmarks to the "unknown"
1525 1541 # nodes will be dropped for good. However, writes happen within locks.
1526 1542 # This locking makes it possible to have a race free consistent read.
1527 1543 # For this purpose data read from disc before locking are
1528 1544 # "invalidated" right after the locks are taken. This invalidations are
1529 1545 # "light", the `filecache` mechanism keep the data in memory and will
1530 1546 # reuse them if the underlying files did not changed. Not parsing the
1531 1547 # same data multiple times helps performances.
1532 1548 #
1533 1549 # Unfortunately in the case describe above, the files tracked by the
1534 1550 # bookmarks file cache might not have changed, but the in-memory
1535 1551 # content is still "wrong" because we used an older changelog content
1536 1552 # to process the on-disk data. So after locking, the changelog would be
1537 1553 # refreshed but `_bookmarks` would be preserved.
1538 1554 # Adding `00changelog.i` to the list of tracked file is not
1539 1555 # enough, because at the time we build the content for `_bookmarks` in
1540 1556 # (4), the changelog file has already diverged from the content used
1541 1557 # for loading `changelog` in (1)
1542 1558 #
1543 1559 # To prevent the issue, we force the changelog to be explicitly
1544 1560 # reloaded while computing `_bookmarks`. The data race can still happen
1545 1561 # without the lock (with a narrower window), but it would no longer go
1546 1562 # undetected during the lock time refresh.
1547 1563 #
1548 1564 # The new schedule is as follow
1549 1565 #
1550 1566 # 1) filecache logic detect that `_bookmarks` needs to be computed
1551 1567 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1552 1568 # 3) We force `changelog` filecache to be tested
1553 1569 # 4) cachestat for `changelog` are captured (for changelog)
1554 1570 # 5) `_bookmarks` is computed and cached
1555 1571 #
1556 1572 # The step in (3) ensure we have a changelog at least as recent as the
1557 1573 # cache stat computed in (1). As a result at locking time:
1558 1574 # * if the changelog did not changed since (1) -> we can reuse the data
1559 1575 # * otherwise -> the bookmarks get refreshed.
1560 1576 self._refreshchangelog()
1561 1577 return bookmarks.bmstore(self)
1562 1578
1563 1579 def _refreshchangelog(self):
1564 1580 """make sure the in memory changelog match the on-disk one"""
1565 1581 if 'changelog' in vars(self) and self.currenttransaction() is None:
1566 1582 del self.changelog
1567 1583
1568 1584 @property
1569 1585 def _activebookmark(self):
1570 1586 return self._bookmarks.active
1571 1587
1572 1588 # _phasesets depend on changelog. what we need is to call
1573 1589 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1574 1590 # can't be easily expressed in filecache mechanism.
1575 1591 @storecache(b'phaseroots', b'00changelog.i')
1576 1592 def _phasecache(self):
1577 1593 return phases.phasecache(self, self._phasedefaults)
1578 1594
1579 1595 @storecache(b'obsstore')
1580 1596 def obsstore(self):
1581 1597 return obsolete.makestore(self.ui, self)
1582 1598
1583 1599 @storecache(b'00changelog.i')
1584 1600 def changelog(self):
1585 1601 # load dirstate before changelog to avoid race see issue6303
1586 1602 self.dirstate.prefetch_parents()
1587 1603 return self.store.changelog(txnutil.mayhavepending(self.root))
1588 1604
1589 1605 @storecache(b'00manifest.i')
1590 1606 def manifestlog(self):
1591 1607 return self.store.manifestlog(self, self._storenarrowmatch)
1592 1608
1593 1609 @repofilecache(b'dirstate')
1594 1610 def dirstate(self):
1595 1611 return self._makedirstate()
1596 1612
1597 1613 def _makedirstate(self):
1598 1614 """Extension point for wrapping the dirstate per-repo."""
1599 1615 sparsematchfn = lambda: sparse.matcher(self)
1600 1616
1601 1617 return dirstate.dirstate(
1602 1618 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1603 1619 )
1604 1620
1605 1621 def _dirstatevalidate(self, node):
1606 1622 try:
1607 1623 self.changelog.rev(node)
1608 1624 return node
1609 1625 except error.LookupError:
1610 1626 if not self._dirstatevalidatewarned:
1611 1627 self._dirstatevalidatewarned = True
1612 1628 self.ui.warn(
1613 1629 _(b"warning: ignoring unknown working parent %s!\n")
1614 1630 % short(node)
1615 1631 )
1616 1632 return nullid
1617 1633
1618 1634 @storecache(narrowspec.FILENAME)
1619 1635 def narrowpats(self):
1620 1636 """matcher patterns for this repository's narrowspec
1621 1637
1622 1638 A tuple of (includes, excludes).
1623 1639 """
1624 1640 return narrowspec.load(self)
1625 1641
1626 1642 @storecache(narrowspec.FILENAME)
1627 1643 def _storenarrowmatch(self):
1628 1644 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1629 1645 return matchmod.always()
1630 1646 include, exclude = self.narrowpats
1631 1647 return narrowspec.match(self.root, include=include, exclude=exclude)
1632 1648
1633 1649 @storecache(narrowspec.FILENAME)
1634 1650 def _narrowmatch(self):
1635 1651 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1636 1652 return matchmod.always()
1637 1653 narrowspec.checkworkingcopynarrowspec(self)
1638 1654 include, exclude = self.narrowpats
1639 1655 return narrowspec.match(self.root, include=include, exclude=exclude)
1640 1656
1641 1657 def narrowmatch(self, match=None, includeexact=False):
1642 1658 """matcher corresponding the the repo's narrowspec
1643 1659
1644 1660 If `match` is given, then that will be intersected with the narrow
1645 1661 matcher.
1646 1662
1647 1663 If `includeexact` is True, then any exact matches from `match` will
1648 1664 be included even if they're outside the narrowspec.
1649 1665 """
1650 1666 if match:
1651 1667 if includeexact and not self._narrowmatch.always():
1652 1668 # do not exclude explicitly-specified paths so that they can
1653 1669 # be warned later on
1654 1670 em = matchmod.exact(match.files())
1655 1671 nm = matchmod.unionmatcher([self._narrowmatch, em])
1656 1672 return matchmod.intersectmatchers(match, nm)
1657 1673 return matchmod.intersectmatchers(match, self._narrowmatch)
1658 1674 return self._narrowmatch
1659 1675
1660 1676 def setnarrowpats(self, newincludes, newexcludes):
1661 1677 narrowspec.save(self, newincludes, newexcludes)
1662 1678 self.invalidate(clearfilecache=True)
1663 1679
1664 1680 @unfilteredpropertycache
1665 1681 def _quick_access_changeid_null(self):
1666 1682 return {
1667 1683 b'null': (nullrev, nullid),
1668 1684 nullrev: (nullrev, nullid),
1669 1685 nullid: (nullrev, nullid),
1670 1686 }
1671 1687
1672 1688 @unfilteredpropertycache
1673 1689 def _quick_access_changeid_wc(self):
1674 1690 # also fast path access to the working copy parents
1675 1691 # however, only do it for filter that ensure wc is visible.
1676 1692 quick = self._quick_access_changeid_null.copy()
1677 1693 cl = self.unfiltered().changelog
1678 1694 for node in self.dirstate.parents():
1679 1695 if node == nullid:
1680 1696 continue
1681 1697 rev = cl.index.get_rev(node)
1682 1698 if rev is None:
1683 1699 # unknown working copy parent case:
1684 1700 #
1685 1701 # skip the fast path and let higher code deal with it
1686 1702 continue
1687 1703 pair = (rev, node)
1688 1704 quick[rev] = pair
1689 1705 quick[node] = pair
1690 1706 # also add the parents of the parents
1691 1707 for r in cl.parentrevs(rev):
1692 1708 if r == nullrev:
1693 1709 continue
1694 1710 n = cl.node(r)
1695 1711 pair = (r, n)
1696 1712 quick[r] = pair
1697 1713 quick[n] = pair
1698 1714 p1node = self.dirstate.p1()
1699 1715 if p1node != nullid:
1700 1716 quick[b'.'] = quick[p1node]
1701 1717 return quick
1702 1718
1703 1719 @unfilteredmethod
1704 1720 def _quick_access_changeid_invalidate(self):
1705 1721 if '_quick_access_changeid_wc' in vars(self):
1706 1722 del self.__dict__['_quick_access_changeid_wc']
1707 1723
1708 1724 @property
1709 1725 def _quick_access_changeid(self):
1710 1726 """an helper dictionnary for __getitem__ calls
1711 1727
1712 1728 This contains a list of symbol we can recognise right away without
1713 1729 further processing.
1714 1730 """
1715 1731 if self.filtername in repoview.filter_has_wc:
1716 1732 return self._quick_access_changeid_wc
1717 1733 return self._quick_access_changeid_null
1718 1734
1719 1735 def __getitem__(self, changeid):
1720 1736 # dealing with special cases
1721 1737 if changeid is None:
1722 1738 return context.workingctx(self)
1723 1739 if isinstance(changeid, context.basectx):
1724 1740 return changeid
1725 1741
1726 1742 # dealing with multiple revisions
1727 1743 if isinstance(changeid, slice):
1728 1744 # wdirrev isn't contiguous so the slice shouldn't include it
1729 1745 return [
1730 1746 self[i]
1731 1747 for i in pycompat.xrange(*changeid.indices(len(self)))
1732 1748 if i not in self.changelog.filteredrevs
1733 1749 ]
1734 1750
1735 1751 # dealing with some special values
1736 1752 quick_access = self._quick_access_changeid.get(changeid)
1737 1753 if quick_access is not None:
1738 1754 rev, node = quick_access
1739 1755 return context.changectx(self, rev, node, maybe_filtered=False)
1740 1756 if changeid == b'tip':
1741 1757 node = self.changelog.tip()
1742 1758 rev = self.changelog.rev(node)
1743 1759 return context.changectx(self, rev, node)
1744 1760
1745 1761 # dealing with arbitrary values
1746 1762 try:
1747 1763 if isinstance(changeid, int):
1748 1764 node = self.changelog.node(changeid)
1749 1765 rev = changeid
1750 1766 elif changeid == b'.':
1751 1767 # this is a hack to delay/avoid loading obsmarkers
1752 1768 # when we know that '.' won't be hidden
1753 1769 node = self.dirstate.p1()
1754 1770 rev = self.unfiltered().changelog.rev(node)
1755 1771 elif len(changeid) == 20:
1756 1772 try:
1757 1773 node = changeid
1758 1774 rev = self.changelog.rev(changeid)
1759 1775 except error.FilteredLookupError:
1760 1776 changeid = hex(changeid) # for the error message
1761 1777 raise
1762 1778 except LookupError:
1763 1779 # check if it might have come from damaged dirstate
1764 1780 #
1765 1781 # XXX we could avoid the unfiltered if we had a recognizable
1766 1782 # exception for filtered changeset access
1767 1783 if (
1768 1784 self.local()
1769 1785 and changeid in self.unfiltered().dirstate.parents()
1770 1786 ):
1771 1787 msg = _(b"working directory has unknown parent '%s'!")
1772 1788 raise error.Abort(msg % short(changeid))
1773 1789 changeid = hex(changeid) # for the error message
1774 1790 raise
1775 1791
1776 1792 elif len(changeid) == 40:
1777 1793 node = bin(changeid)
1778 1794 rev = self.changelog.rev(node)
1779 1795 else:
1780 1796 raise error.ProgrammingError(
1781 1797 b"unsupported changeid '%s' of type %s"
1782 1798 % (changeid, pycompat.bytestr(type(changeid)))
1783 1799 )
1784 1800
1785 1801 return context.changectx(self, rev, node)
1786 1802
1787 1803 except (error.FilteredIndexError, error.FilteredLookupError):
1788 1804 raise error.FilteredRepoLookupError(
1789 1805 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1790 1806 )
1791 1807 except (IndexError, LookupError):
1792 1808 raise error.RepoLookupError(
1793 1809 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1794 1810 )
1795 1811 except error.WdirUnsupported:
1796 1812 return context.workingctx(self)
1797 1813
1798 1814 def __contains__(self, changeid):
1799 1815 """True if the given changeid exists"""
1800 1816 try:
1801 1817 self[changeid]
1802 1818 return True
1803 1819 except error.RepoLookupError:
1804 1820 return False
1805 1821
1806 1822 def __nonzero__(self):
1807 1823 return True
1808 1824
1809 1825 __bool__ = __nonzero__
1810 1826
1811 1827 def __len__(self):
1812 1828 # no need to pay the cost of repoview.changelog
1813 1829 unfi = self.unfiltered()
1814 1830 return len(unfi.changelog)
1815 1831
1816 1832 def __iter__(self):
1817 1833 return iter(self.changelog)
1818 1834
1819 1835 def revs(self, expr, *args):
1820 1836 """Find revisions matching a revset.
1821 1837
1822 1838 The revset is specified as a string ``expr`` that may contain
1823 1839 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1824 1840
1825 1841 Revset aliases from the configuration are not expanded. To expand
1826 1842 user aliases, consider calling ``scmutil.revrange()`` or
1827 1843 ``repo.anyrevs([expr], user=True)``.
1828 1844
1829 1845 Returns a smartset.abstractsmartset, which is a list-like interface
1830 1846 that contains integer revisions.
1831 1847 """
1832 1848 tree = revsetlang.spectree(expr, *args)
1833 1849 return revset.makematcher(tree)(self)
1834 1850
1835 1851 def set(self, expr, *args):
1836 1852 """Find revisions matching a revset and emit changectx instances.
1837 1853
1838 1854 This is a convenience wrapper around ``revs()`` that iterates the
1839 1855 result and is a generator of changectx instances.
1840 1856
1841 1857 Revset aliases from the configuration are not expanded. To expand
1842 1858 user aliases, consider calling ``scmutil.revrange()``.
1843 1859 """
1844 1860 for r in self.revs(expr, *args):
1845 1861 yield self[r]
1846 1862
1847 1863 def anyrevs(self, specs, user=False, localalias=None):
1848 1864 """Find revisions matching one of the given revsets.
1849 1865
1850 1866 Revset aliases from the configuration are not expanded by default. To
1851 1867 expand user aliases, specify ``user=True``. To provide some local
1852 1868 definitions overriding user aliases, set ``localalias`` to
1853 1869 ``{name: definitionstring}``.
1854 1870 """
1855 1871 if specs == [b'null']:
1856 1872 return revset.baseset([nullrev])
1857 1873 if specs == [b'.']:
1858 1874 quick_data = self._quick_access_changeid.get(b'.')
1859 1875 if quick_data is not None:
1860 1876 return revset.baseset([quick_data[0]])
1861 1877 if user:
1862 1878 m = revset.matchany(
1863 1879 self.ui,
1864 1880 specs,
1865 1881 lookup=revset.lookupfn(self),
1866 1882 localalias=localalias,
1867 1883 )
1868 1884 else:
1869 1885 m = revset.matchany(None, specs, localalias=localalias)
1870 1886 return m(self)
1871 1887
1872 1888 def url(self):
1873 1889 return b'file:' + self.root
1874 1890
1875 1891 def hook(self, name, throw=False, **args):
1876 1892 """Call a hook, passing this repo instance.
1877 1893
1878 1894 This a convenience method to aid invoking hooks. Extensions likely
1879 1895 won't call this unless they have registered a custom hook or are
1880 1896 replacing code that is expected to call a hook.
1881 1897 """
1882 1898 return hook.hook(self.ui, self, name, throw, **args)
1883 1899
1884 1900 @filteredpropertycache
1885 1901 def _tagscache(self):
1886 1902 """Returns a tagscache object that contains various tags related
1887 1903 caches."""
1888 1904
1889 1905 # This simplifies its cache management by having one decorated
1890 1906 # function (this one) and the rest simply fetch things from it.
1891 1907 class tagscache(object):
1892 1908 def __init__(self):
1893 1909 # These two define the set of tags for this repository. tags
1894 1910 # maps tag name to node; tagtypes maps tag name to 'global' or
1895 1911 # 'local'. (Global tags are defined by .hgtags across all
1896 1912 # heads, and local tags are defined in .hg/localtags.)
1897 1913 # They constitute the in-memory cache of tags.
1898 1914 self.tags = self.tagtypes = None
1899 1915
1900 1916 self.nodetagscache = self.tagslist = None
1901 1917
1902 1918 cache = tagscache()
1903 1919 cache.tags, cache.tagtypes = self._findtags()
1904 1920
1905 1921 return cache
1906 1922
1907 1923 def tags(self):
1908 1924 '''return a mapping of tag to node'''
1909 1925 t = {}
1910 1926 if self.changelog.filteredrevs:
1911 1927 tags, tt = self._findtags()
1912 1928 else:
1913 1929 tags = self._tagscache.tags
1914 1930 rev = self.changelog.rev
1915 1931 for k, v in pycompat.iteritems(tags):
1916 1932 try:
1917 1933 # ignore tags to unknown nodes
1918 1934 rev(v)
1919 1935 t[k] = v
1920 1936 except (error.LookupError, ValueError):
1921 1937 pass
1922 1938 return t
1923 1939
1924 1940 def _findtags(self):
1925 1941 """Do the hard work of finding tags. Return a pair of dicts
1926 1942 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1927 1943 maps tag name to a string like \'global\' or \'local\'.
1928 1944 Subclasses or extensions are free to add their own tags, but
1929 1945 should be aware that the returned dicts will be retained for the
1930 1946 duration of the localrepo object."""
1931 1947
1932 1948 # XXX what tagtype should subclasses/extensions use? Currently
1933 1949 # mq and bookmarks add tags, but do not set the tagtype at all.
1934 1950 # Should each extension invent its own tag type? Should there
1935 1951 # be one tagtype for all such "virtual" tags? Or is the status
1936 1952 # quo fine?
1937 1953
1938 1954 # map tag name to (node, hist)
1939 1955 alltags = tagsmod.findglobaltags(self.ui, self)
1940 1956 # map tag name to tag type
1941 1957 tagtypes = {tag: b'global' for tag in alltags}
1942 1958
1943 1959 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1944 1960
1945 1961 # Build the return dicts. Have to re-encode tag names because
1946 1962 # the tags module always uses UTF-8 (in order not to lose info
1947 1963 # writing to the cache), but the rest of Mercurial wants them in
1948 1964 # local encoding.
1949 1965 tags = {}
1950 1966 for (name, (node, hist)) in pycompat.iteritems(alltags):
1951 1967 if node != nullid:
1952 1968 tags[encoding.tolocal(name)] = node
1953 1969 tags[b'tip'] = self.changelog.tip()
1954 1970 tagtypes = {
1955 1971 encoding.tolocal(name): value
1956 1972 for (name, value) in pycompat.iteritems(tagtypes)
1957 1973 }
1958 1974 return (tags, tagtypes)
1959 1975
1960 1976 def tagtype(self, tagname):
1961 1977 """
1962 1978 return the type of the given tag. result can be:
1963 1979
1964 1980 'local' : a local tag
1965 1981 'global' : a global tag
1966 1982 None : tag does not exist
1967 1983 """
1968 1984
1969 1985 return self._tagscache.tagtypes.get(tagname)
1970 1986
1971 1987 def tagslist(self):
1972 1988 '''return a list of tags ordered by revision'''
1973 1989 if not self._tagscache.tagslist:
1974 1990 l = []
1975 1991 for t, n in pycompat.iteritems(self.tags()):
1976 1992 l.append((self.changelog.rev(n), t, n))
1977 1993 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1978 1994
1979 1995 return self._tagscache.tagslist
1980 1996
1981 1997 def nodetags(self, node):
1982 1998 '''return the tags associated with a node'''
1983 1999 if not self._tagscache.nodetagscache:
1984 2000 nodetagscache = {}
1985 2001 for t, n in pycompat.iteritems(self._tagscache.tags):
1986 2002 nodetagscache.setdefault(n, []).append(t)
1987 2003 for tags in pycompat.itervalues(nodetagscache):
1988 2004 tags.sort()
1989 2005 self._tagscache.nodetagscache = nodetagscache
1990 2006 return self._tagscache.nodetagscache.get(node, [])
1991 2007
1992 2008 def nodebookmarks(self, node):
1993 2009 """return the list of bookmarks pointing to the specified node"""
1994 2010 return self._bookmarks.names(node)
1995 2011
1996 2012 def branchmap(self):
1997 2013 """returns a dictionary {branch: [branchheads]} with branchheads
1998 2014 ordered by increasing revision number"""
1999 2015 return self._branchcaches[self]
2000 2016
2001 2017 @unfilteredmethod
2002 2018 def revbranchcache(self):
2003 2019 if not self._revbranchcache:
2004 2020 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2005 2021 return self._revbranchcache
2006 2022
2007 2023 def branchtip(self, branch, ignoremissing=False):
2008 2024 """return the tip node for a given branch
2009 2025
2010 2026 If ignoremissing is True, then this method will not raise an error.
2011 2027 This is helpful for callers that only expect None for a missing branch
2012 2028 (e.g. namespace).
2013 2029
2014 2030 """
2015 2031 try:
2016 2032 return self.branchmap().branchtip(branch)
2017 2033 except KeyError:
2018 2034 if not ignoremissing:
2019 2035 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2020 2036 else:
2021 2037 pass
2022 2038
2023 2039 def lookup(self, key):
2024 2040 node = scmutil.revsymbol(self, key).node()
2025 2041 if node is None:
2026 2042 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2027 2043 return node
2028 2044
2029 2045 def lookupbranch(self, key):
2030 2046 if self.branchmap().hasbranch(key):
2031 2047 return key
2032 2048
2033 2049 return scmutil.revsymbol(self, key).branch()
2034 2050
2035 2051 def known(self, nodes):
2036 2052 cl = self.changelog
2037 2053 get_rev = cl.index.get_rev
2038 2054 filtered = cl.filteredrevs
2039 2055 result = []
2040 2056 for n in nodes:
2041 2057 r = get_rev(n)
2042 2058 resp = not (r is None or r in filtered)
2043 2059 result.append(resp)
2044 2060 return result
2045 2061
2046 2062 def local(self):
2047 2063 return self
2048 2064
2049 2065 def publishing(self):
2050 2066 # it's safe (and desirable) to trust the publish flag unconditionally
2051 2067 # so that we don't finalize changes shared between users via ssh or nfs
2052 2068 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2053 2069
2054 2070 def cancopy(self):
2055 2071 # so statichttprepo's override of local() works
2056 2072 if not self.local():
2057 2073 return False
2058 2074 if not self.publishing():
2059 2075 return True
2060 2076 # if publishing we can't copy if there is filtered content
2061 2077 return not self.filtered(b'visible').changelog.filteredrevs
2062 2078
2063 2079 def shared(self):
2064 2080 '''the type of shared repository (None if not shared)'''
2065 2081 if self.sharedpath != self.path:
2066 2082 return b'store'
2067 2083 return None
2068 2084
2069 2085 def wjoin(self, f, *insidef):
2070 2086 return self.vfs.reljoin(self.root, f, *insidef)
2071 2087
2072 2088 def setparents(self, p1, p2=nullid):
2073 2089 self[None].setparents(p1, p2)
2074 2090 self._quick_access_changeid_invalidate()
2075 2091
2076 2092 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2077 2093 """changeid must be a changeset revision, if specified.
2078 2094 fileid can be a file revision or node."""
2079 2095 return context.filectx(
2080 2096 self, path, changeid, fileid, changectx=changectx
2081 2097 )
2082 2098
2083 2099 def getcwd(self):
2084 2100 return self.dirstate.getcwd()
2085 2101
2086 2102 def pathto(self, f, cwd=None):
2087 2103 return self.dirstate.pathto(f, cwd)
2088 2104
2089 2105 def _loadfilter(self, filter):
2090 2106 if filter not in self._filterpats:
2091 2107 l = []
2092 2108 for pat, cmd in self.ui.configitems(filter):
2093 2109 if cmd == b'!':
2094 2110 continue
2095 2111 mf = matchmod.match(self.root, b'', [pat])
2096 2112 fn = None
2097 2113 params = cmd
2098 2114 for name, filterfn in pycompat.iteritems(self._datafilters):
2099 2115 if cmd.startswith(name):
2100 2116 fn = filterfn
2101 2117 params = cmd[len(name) :].lstrip()
2102 2118 break
2103 2119 if not fn:
2104 2120 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2105 2121 fn.__name__ = 'commandfilter'
2106 2122 # Wrap old filters not supporting keyword arguments
2107 2123 if not pycompat.getargspec(fn)[2]:
2108 2124 oldfn = fn
2109 2125 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2110 2126 fn.__name__ = 'compat-' + oldfn.__name__
2111 2127 l.append((mf, fn, params))
2112 2128 self._filterpats[filter] = l
2113 2129 return self._filterpats[filter]
2114 2130
2115 2131 def _filter(self, filterpats, filename, data):
2116 2132 for mf, fn, cmd in filterpats:
2117 2133 if mf(filename):
2118 2134 self.ui.debug(
2119 2135 b"filtering %s through %s\n"
2120 2136 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2121 2137 )
2122 2138 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2123 2139 break
2124 2140
2125 2141 return data
2126 2142
2127 2143 @unfilteredpropertycache
2128 2144 def _encodefilterpats(self):
2129 2145 return self._loadfilter(b'encode')
2130 2146
2131 2147 @unfilteredpropertycache
2132 2148 def _decodefilterpats(self):
2133 2149 return self._loadfilter(b'decode')
2134 2150
2135 2151 def adddatafilter(self, name, filter):
2136 2152 self._datafilters[name] = filter
2137 2153
2138 2154 def wread(self, filename):
2139 2155 if self.wvfs.islink(filename):
2140 2156 data = self.wvfs.readlink(filename)
2141 2157 else:
2142 2158 data = self.wvfs.read(filename)
2143 2159 return self._filter(self._encodefilterpats, filename, data)
2144 2160
2145 2161 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2146 2162 """write ``data`` into ``filename`` in the working directory
2147 2163
2148 2164 This returns length of written (maybe decoded) data.
2149 2165 """
2150 2166 data = self._filter(self._decodefilterpats, filename, data)
2151 2167 if b'l' in flags:
2152 2168 self.wvfs.symlink(data, filename)
2153 2169 else:
2154 2170 self.wvfs.write(
2155 2171 filename, data, backgroundclose=backgroundclose, **kwargs
2156 2172 )
2157 2173 if b'x' in flags:
2158 2174 self.wvfs.setflags(filename, False, True)
2159 2175 else:
2160 2176 self.wvfs.setflags(filename, False, False)
2161 2177 return len(data)
2162 2178
2163 2179 def wwritedata(self, filename, data):
2164 2180 return self._filter(self._decodefilterpats, filename, data)
2165 2181
2166 2182 def currenttransaction(self):
2167 2183 """return the current transaction or None if non exists"""
2168 2184 if self._transref:
2169 2185 tr = self._transref()
2170 2186 else:
2171 2187 tr = None
2172 2188
2173 2189 if tr and tr.running():
2174 2190 return tr
2175 2191 return None
2176 2192
2177 2193 def transaction(self, desc, report=None):
2178 2194 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2179 2195 b'devel', b'check-locks'
2180 2196 ):
2181 2197 if self._currentlock(self._lockref) is None:
2182 2198 raise error.ProgrammingError(b'transaction requires locking')
2183 2199 tr = self.currenttransaction()
2184 2200 if tr is not None:
2185 2201 return tr.nest(name=desc)
2186 2202
2187 2203 # abort here if the journal already exists
2188 2204 if self.svfs.exists(b"journal"):
2189 2205 raise error.RepoError(
2190 2206 _(b"abandoned transaction found"),
2191 2207 hint=_(b"run 'hg recover' to clean up transaction"),
2192 2208 )
2193 2209
2194 2210 idbase = b"%.40f#%f" % (random.random(), time.time())
2195 2211 ha = hex(hashutil.sha1(idbase).digest())
2196 2212 txnid = b'TXN:' + ha
2197 2213 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2198 2214
2199 2215 self._writejournal(desc)
2200 2216 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2201 2217 if report:
2202 2218 rp = report
2203 2219 else:
2204 2220 rp = self.ui.warn
2205 2221 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2206 2222 # we must avoid cyclic reference between repo and transaction.
2207 2223 reporef = weakref.ref(self)
2208 2224 # Code to track tag movement
2209 2225 #
2210 2226 # Since tags are all handled as file content, it is actually quite hard
2211 2227 # to track these movement from a code perspective. So we fallback to a
2212 2228 # tracking at the repository level. One could envision to track changes
2213 2229 # to the '.hgtags' file through changegroup apply but that fails to
2214 2230 # cope with case where transaction expose new heads without changegroup
2215 2231 # being involved (eg: phase movement).
2216 2232 #
2217 2233 # For now, We gate the feature behind a flag since this likely comes
2218 2234 # with performance impacts. The current code run more often than needed
2219 2235 # and do not use caches as much as it could. The current focus is on
2220 2236 # the behavior of the feature so we disable it by default. The flag
2221 2237 # will be removed when we are happy with the performance impact.
2222 2238 #
2223 2239 # Once this feature is no longer experimental move the following
2224 2240 # documentation to the appropriate help section:
2225 2241 #
2226 2242 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2227 2243 # tags (new or changed or deleted tags). In addition the details of
2228 2244 # these changes are made available in a file at:
2229 2245 # ``REPOROOT/.hg/changes/tags.changes``.
2230 2246 # Make sure you check for HG_TAG_MOVED before reading that file as it
2231 2247 # might exist from a previous transaction even if no tag were touched
2232 2248 # in this one. Changes are recorded in a line base format::
2233 2249 #
2234 2250 # <action> <hex-node> <tag-name>\n
2235 2251 #
2236 2252 # Actions are defined as follow:
2237 2253 # "-R": tag is removed,
2238 2254 # "+A": tag is added,
2239 2255 # "-M": tag is moved (old value),
2240 2256 # "+M": tag is moved (new value),
2241 2257 tracktags = lambda x: None
2242 2258 # experimental config: experimental.hook-track-tags
2243 2259 shouldtracktags = self.ui.configbool(
2244 2260 b'experimental', b'hook-track-tags'
2245 2261 )
2246 2262 if desc != b'strip' and shouldtracktags:
2247 2263 oldheads = self.changelog.headrevs()
2248 2264
2249 2265 def tracktags(tr2):
2250 2266 repo = reporef()
2251 2267 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2252 2268 newheads = repo.changelog.headrevs()
2253 2269 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2254 2270 # notes: we compare lists here.
2255 2271 # As we do it only once buiding set would not be cheaper
2256 2272 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2257 2273 if changes:
2258 2274 tr2.hookargs[b'tag_moved'] = b'1'
2259 2275 with repo.vfs(
2260 2276 b'changes/tags.changes', b'w', atomictemp=True
2261 2277 ) as changesfile:
2262 2278 # note: we do not register the file to the transaction
2263 2279 # because we needs it to still exist on the transaction
2264 2280 # is close (for txnclose hooks)
2265 2281 tagsmod.writediff(changesfile, changes)
2266 2282
2267 2283 def validate(tr2):
2268 2284 """will run pre-closing hooks"""
2269 2285 # XXX the transaction API is a bit lacking here so we take a hacky
2270 2286 # path for now
2271 2287 #
2272 2288 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2273 2289 # dict is copied before these run. In addition we needs the data
2274 2290 # available to in memory hooks too.
2275 2291 #
2276 2292 # Moreover, we also need to make sure this runs before txnclose
2277 2293 # hooks and there is no "pending" mechanism that would execute
2278 2294 # logic only if hooks are about to run.
2279 2295 #
2280 2296 # Fixing this limitation of the transaction is also needed to track
2281 2297 # other families of changes (bookmarks, phases, obsolescence).
2282 2298 #
2283 2299 # This will have to be fixed before we remove the experimental
2284 2300 # gating.
2285 2301 tracktags(tr2)
2286 2302 repo = reporef()
2287 2303
2288 2304 singleheadopt = (b'experimental', b'single-head-per-branch')
2289 2305 singlehead = repo.ui.configbool(*singleheadopt)
2290 2306 if singlehead:
2291 2307 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2292 2308 accountclosed = singleheadsub.get(
2293 2309 b"account-closed-heads", False
2294 2310 )
2295 2311 if singleheadsub.get(b"public-changes-only", False):
2296 2312 filtername = b"immutable"
2297 2313 else:
2298 2314 filtername = b"visible"
2299 2315 scmutil.enforcesinglehead(
2300 2316 repo, tr2, desc, accountclosed, filtername
2301 2317 )
2302 2318 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2303 2319 for name, (old, new) in sorted(
2304 2320 tr.changes[b'bookmarks'].items()
2305 2321 ):
2306 2322 args = tr.hookargs.copy()
2307 2323 args.update(bookmarks.preparehookargs(name, old, new))
2308 2324 repo.hook(
2309 2325 b'pretxnclose-bookmark',
2310 2326 throw=True,
2311 2327 **pycompat.strkwargs(args)
2312 2328 )
2313 2329 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2314 2330 cl = repo.unfiltered().changelog
2315 2331 for revs, (old, new) in tr.changes[b'phases']:
2316 2332 for rev in revs:
2317 2333 args = tr.hookargs.copy()
2318 2334 node = hex(cl.node(rev))
2319 2335 args.update(phases.preparehookargs(node, old, new))
2320 2336 repo.hook(
2321 2337 b'pretxnclose-phase',
2322 2338 throw=True,
2323 2339 **pycompat.strkwargs(args)
2324 2340 )
2325 2341
2326 2342 repo.hook(
2327 2343 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2328 2344 )
2329 2345
2330 2346 def releasefn(tr, success):
2331 2347 repo = reporef()
2332 2348 if repo is None:
2333 2349 # If the repo has been GC'd (and this release function is being
2334 2350 # called from transaction.__del__), there's not much we can do,
2335 2351 # so just leave the unfinished transaction there and let the
2336 2352 # user run `hg recover`.
2337 2353 return
2338 2354 if success:
2339 2355 # this should be explicitly invoked here, because
2340 2356 # in-memory changes aren't written out at closing
2341 2357 # transaction, if tr.addfilegenerator (via
2342 2358 # dirstate.write or so) isn't invoked while
2343 2359 # transaction running
2344 2360 repo.dirstate.write(None)
2345 2361 else:
2346 2362 # discard all changes (including ones already written
2347 2363 # out) in this transaction
2348 2364 narrowspec.restorebackup(self, b'journal.narrowspec')
2349 2365 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2350 2366 repo.dirstate.restorebackup(None, b'journal.dirstate')
2351 2367
2352 2368 repo.invalidate(clearfilecache=True)
2353 2369
2354 2370 tr = transaction.transaction(
2355 2371 rp,
2356 2372 self.svfs,
2357 2373 vfsmap,
2358 2374 b"journal",
2359 2375 b"undo",
2360 2376 aftertrans(renames),
2361 2377 self.store.createmode,
2362 2378 validator=validate,
2363 2379 releasefn=releasefn,
2364 2380 checkambigfiles=_cachedfiles,
2365 2381 name=desc,
2366 2382 )
2367 2383 tr.changes[b'origrepolen'] = len(self)
2368 2384 tr.changes[b'obsmarkers'] = set()
2369 2385 tr.changes[b'phases'] = []
2370 2386 tr.changes[b'bookmarks'] = {}
2371 2387
2372 2388 tr.hookargs[b'txnid'] = txnid
2373 2389 tr.hookargs[b'txnname'] = desc
2374 2390 tr.hookargs[b'changes'] = tr.changes
2375 2391 # note: writing the fncache only during finalize mean that the file is
2376 2392 # outdated when running hooks. As fncache is used for streaming clone,
2377 2393 # this is not expected to break anything that happen during the hooks.
2378 2394 tr.addfinalize(b'flush-fncache', self.store.write)
2379 2395
2380 2396 def txnclosehook(tr2):
2381 2397 """To be run if transaction is successful, will schedule a hook run"""
2382 2398 # Don't reference tr2 in hook() so we don't hold a reference.
2383 2399 # This reduces memory consumption when there are multiple
2384 2400 # transactions per lock. This can likely go away if issue5045
2385 2401 # fixes the function accumulation.
2386 2402 hookargs = tr2.hookargs
2387 2403
2388 2404 def hookfunc(unused_success):
2389 2405 repo = reporef()
2390 2406 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2391 2407 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2392 2408 for name, (old, new) in bmchanges:
2393 2409 args = tr.hookargs.copy()
2394 2410 args.update(bookmarks.preparehookargs(name, old, new))
2395 2411 repo.hook(
2396 2412 b'txnclose-bookmark',
2397 2413 throw=False,
2398 2414 **pycompat.strkwargs(args)
2399 2415 )
2400 2416
2401 2417 if hook.hashook(repo.ui, b'txnclose-phase'):
2402 2418 cl = repo.unfiltered().changelog
2403 2419 phasemv = sorted(
2404 2420 tr.changes[b'phases'], key=lambda r: r[0][0]
2405 2421 )
2406 2422 for revs, (old, new) in phasemv:
2407 2423 for rev in revs:
2408 2424 args = tr.hookargs.copy()
2409 2425 node = hex(cl.node(rev))
2410 2426 args.update(phases.preparehookargs(node, old, new))
2411 2427 repo.hook(
2412 2428 b'txnclose-phase',
2413 2429 throw=False,
2414 2430 **pycompat.strkwargs(args)
2415 2431 )
2416 2432
2417 2433 repo.hook(
2418 2434 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2419 2435 )
2420 2436
2421 2437 reporef()._afterlock(hookfunc)
2422 2438
2423 2439 tr.addfinalize(b'txnclose-hook', txnclosehook)
2424 2440 # Include a leading "-" to make it happen before the transaction summary
2425 2441 # reports registered via scmutil.registersummarycallback() whose names
2426 2442 # are 00-txnreport etc. That way, the caches will be warm when the
2427 2443 # callbacks run.
2428 2444 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2429 2445
2430 2446 def txnaborthook(tr2):
2431 2447 """To be run if transaction is aborted"""
2432 2448 reporef().hook(
2433 2449 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2434 2450 )
2435 2451
2436 2452 tr.addabort(b'txnabort-hook', txnaborthook)
2437 2453 # avoid eager cache invalidation. in-memory data should be identical
2438 2454 # to stored data if transaction has no error.
2439 2455 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2440 2456 self._transref = weakref.ref(tr)
2441 2457 scmutil.registersummarycallback(self, tr, desc)
2442 2458 return tr
2443 2459
2444 2460 def _journalfiles(self):
2445 2461 return (
2446 2462 (self.svfs, b'journal'),
2447 2463 (self.svfs, b'journal.narrowspec'),
2448 2464 (self.vfs, b'journal.narrowspec.dirstate'),
2449 2465 (self.vfs, b'journal.dirstate'),
2450 2466 (self.vfs, b'journal.branch'),
2451 2467 (self.vfs, b'journal.desc'),
2452 2468 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2453 2469 (self.svfs, b'journal.phaseroots'),
2454 2470 )
2455 2471
2456 2472 def undofiles(self):
2457 2473 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2458 2474
2459 2475 @unfilteredmethod
2460 2476 def _writejournal(self, desc):
2461 2477 self.dirstate.savebackup(None, b'journal.dirstate')
2462 2478 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2463 2479 narrowspec.savebackup(self, b'journal.narrowspec')
2464 2480 self.vfs.write(
2465 2481 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2466 2482 )
2467 2483 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2468 2484 bookmarksvfs = bookmarks.bookmarksvfs(self)
2469 2485 bookmarksvfs.write(
2470 2486 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2471 2487 )
2472 2488 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2473 2489
2474 2490 def recover(self):
2475 2491 with self.lock():
2476 2492 if self.svfs.exists(b"journal"):
2477 2493 self.ui.status(_(b"rolling back interrupted transaction\n"))
2478 2494 vfsmap = {
2479 2495 b'': self.svfs,
2480 2496 b'plain': self.vfs,
2481 2497 }
2482 2498 transaction.rollback(
2483 2499 self.svfs,
2484 2500 vfsmap,
2485 2501 b"journal",
2486 2502 self.ui.warn,
2487 2503 checkambigfiles=_cachedfiles,
2488 2504 )
2489 2505 self.invalidate()
2490 2506 return True
2491 2507 else:
2492 2508 self.ui.warn(_(b"no interrupted transaction available\n"))
2493 2509 return False
2494 2510
2495 2511 def rollback(self, dryrun=False, force=False):
2496 2512 wlock = lock = dsguard = None
2497 2513 try:
2498 2514 wlock = self.wlock()
2499 2515 lock = self.lock()
2500 2516 if self.svfs.exists(b"undo"):
2501 2517 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2502 2518
2503 2519 return self._rollback(dryrun, force, dsguard)
2504 2520 else:
2505 2521 self.ui.warn(_(b"no rollback information available\n"))
2506 2522 return 1
2507 2523 finally:
2508 2524 release(dsguard, lock, wlock)
2509 2525
2510 2526 @unfilteredmethod # Until we get smarter cache management
2511 2527 def _rollback(self, dryrun, force, dsguard):
2512 2528 ui = self.ui
2513 2529 try:
2514 2530 args = self.vfs.read(b'undo.desc').splitlines()
2515 2531 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2516 2532 if len(args) >= 3:
2517 2533 detail = args[2]
2518 2534 oldtip = oldlen - 1
2519 2535
2520 2536 if detail and ui.verbose:
2521 2537 msg = _(
2522 2538 b'repository tip rolled back to revision %d'
2523 2539 b' (undo %s: %s)\n'
2524 2540 ) % (oldtip, desc, detail)
2525 2541 else:
2526 2542 msg = _(
2527 2543 b'repository tip rolled back to revision %d (undo %s)\n'
2528 2544 ) % (oldtip, desc)
2529 2545 except IOError:
2530 2546 msg = _(b'rolling back unknown transaction\n')
2531 2547 desc = None
2532 2548
2533 2549 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2534 2550 raise error.Abort(
2535 2551 _(
2536 2552 b'rollback of last commit while not checked out '
2537 2553 b'may lose data'
2538 2554 ),
2539 2555 hint=_(b'use -f to force'),
2540 2556 )
2541 2557
2542 2558 ui.status(msg)
2543 2559 if dryrun:
2544 2560 return 0
2545 2561
2546 2562 parents = self.dirstate.parents()
2547 2563 self.destroying()
2548 2564 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2549 2565 transaction.rollback(
2550 2566 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2551 2567 )
2552 2568 bookmarksvfs = bookmarks.bookmarksvfs(self)
2553 2569 if bookmarksvfs.exists(b'undo.bookmarks'):
2554 2570 bookmarksvfs.rename(
2555 2571 b'undo.bookmarks', b'bookmarks', checkambig=True
2556 2572 )
2557 2573 if self.svfs.exists(b'undo.phaseroots'):
2558 2574 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2559 2575 self.invalidate()
2560 2576
2561 2577 has_node = self.changelog.index.has_node
2562 2578 parentgone = any(not has_node(p) for p in parents)
2563 2579 if parentgone:
2564 2580 # prevent dirstateguard from overwriting already restored one
2565 2581 dsguard.close()
2566 2582
2567 2583 narrowspec.restorebackup(self, b'undo.narrowspec')
2568 2584 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2569 2585 self.dirstate.restorebackup(None, b'undo.dirstate')
2570 2586 try:
2571 2587 branch = self.vfs.read(b'undo.branch')
2572 2588 self.dirstate.setbranch(encoding.tolocal(branch))
2573 2589 except IOError:
2574 2590 ui.warn(
2575 2591 _(
2576 2592 b'named branch could not be reset: '
2577 2593 b'current branch is still \'%s\'\n'
2578 2594 )
2579 2595 % self.dirstate.branch()
2580 2596 )
2581 2597
2582 2598 parents = tuple([p.rev() for p in self[None].parents()])
2583 2599 if len(parents) > 1:
2584 2600 ui.status(
2585 2601 _(
2586 2602 b'working directory now based on '
2587 2603 b'revisions %d and %d\n'
2588 2604 )
2589 2605 % parents
2590 2606 )
2591 2607 else:
2592 2608 ui.status(
2593 2609 _(b'working directory now based on revision %d\n') % parents
2594 2610 )
2595 2611 mergestatemod.mergestate.clean(self)
2596 2612
2597 2613 # TODO: if we know which new heads may result from this rollback, pass
2598 2614 # them to destroy(), which will prevent the branchhead cache from being
2599 2615 # invalidated.
2600 2616 self.destroyed()
2601 2617 return 0
2602 2618
2603 2619 def _buildcacheupdater(self, newtransaction):
2604 2620 """called during transaction to build the callback updating cache
2605 2621
2606 2622 Lives on the repository to help extension who might want to augment
2607 2623 this logic. For this purpose, the created transaction is passed to the
2608 2624 method.
2609 2625 """
2610 2626 # we must avoid cyclic reference between repo and transaction.
2611 2627 reporef = weakref.ref(self)
2612 2628
2613 2629 def updater(tr):
2614 2630 repo = reporef()
2615 2631 repo.updatecaches(tr)
2616 2632
2617 2633 return updater
2618 2634
2619 2635 @unfilteredmethod
2620 2636 def updatecaches(self, tr=None, full=False):
2621 2637 """warm appropriate caches
2622 2638
2623 2639 If this function is called after a transaction closed. The transaction
2624 2640 will be available in the 'tr' argument. This can be used to selectively
2625 2641 update caches relevant to the changes in that transaction.
2626 2642
2627 2643 If 'full' is set, make sure all caches the function knows about have
2628 2644 up-to-date data. Even the ones usually loaded more lazily.
2629 2645 """
2630 2646 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2631 2647 # During strip, many caches are invalid but
2632 2648 # later call to `destroyed` will refresh them.
2633 2649 return
2634 2650
2635 2651 if tr is None or tr.changes[b'origrepolen'] < len(self):
2636 2652 # accessing the 'served' branchmap should refresh all the others,
2637 2653 self.ui.debug(b'updating the branch cache\n')
2638 2654 self.filtered(b'served').branchmap()
2639 2655 self.filtered(b'served.hidden').branchmap()
2640 2656
2641 2657 if full:
2642 2658 unfi = self.unfiltered()
2643 2659
2644 2660 self.changelog.update_caches(transaction=tr)
2645 2661 self.manifestlog.update_caches(transaction=tr)
2646 2662
2647 2663 rbc = unfi.revbranchcache()
2648 2664 for r in unfi.changelog:
2649 2665 rbc.branchinfo(r)
2650 2666 rbc.write()
2651 2667
2652 2668 # ensure the working copy parents are in the manifestfulltextcache
2653 2669 for ctx in self[b'.'].parents():
2654 2670 ctx.manifest() # accessing the manifest is enough
2655 2671
2656 2672 # accessing fnode cache warms the cache
2657 2673 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2658 2674 # accessing tags warm the cache
2659 2675 self.tags()
2660 2676 self.filtered(b'served').tags()
2661 2677
2662 2678 # The `full` arg is documented as updating even the lazily-loaded
2663 2679 # caches immediately, so we're forcing a write to cause these caches
2664 2680 # to be warmed up even if they haven't explicitly been requested
2665 2681 # yet (if they've never been used by hg, they won't ever have been
2666 2682 # written, even if they're a subset of another kind of cache that
2667 2683 # *has* been used).
2668 2684 for filt in repoview.filtertable.keys():
2669 2685 filtered = self.filtered(filt)
2670 2686 filtered.branchmap().write(filtered)
2671 2687
2672 2688 def invalidatecaches(self):
2673 2689
2674 2690 if '_tagscache' in vars(self):
2675 2691 # can't use delattr on proxy
2676 2692 del self.__dict__['_tagscache']
2677 2693
2678 2694 self._branchcaches.clear()
2679 2695 self.invalidatevolatilesets()
2680 2696 self._sparsesignaturecache.clear()
2681 2697
2682 2698 def invalidatevolatilesets(self):
2683 2699 self.filteredrevcache.clear()
2684 2700 obsolete.clearobscaches(self)
2685 2701 self._quick_access_changeid_invalidate()
2686 2702
2687 2703 def invalidatedirstate(self):
2688 2704 """Invalidates the dirstate, causing the next call to dirstate
2689 2705 to check if it was modified since the last time it was read,
2690 2706 rereading it if it has.
2691 2707
2692 2708 This is different to dirstate.invalidate() that it doesn't always
2693 2709 rereads the dirstate. Use dirstate.invalidate() if you want to
2694 2710 explicitly read the dirstate again (i.e. restoring it to a previous
2695 2711 known good state)."""
2696 2712 if hasunfilteredcache(self, 'dirstate'):
2697 2713 for k in self.dirstate._filecache:
2698 2714 try:
2699 2715 delattr(self.dirstate, k)
2700 2716 except AttributeError:
2701 2717 pass
2702 2718 delattr(self.unfiltered(), 'dirstate')
2703 2719
2704 2720 def invalidate(self, clearfilecache=False):
2705 2721 """Invalidates both store and non-store parts other than dirstate
2706 2722
2707 2723 If a transaction is running, invalidation of store is omitted,
2708 2724 because discarding in-memory changes might cause inconsistency
2709 2725 (e.g. incomplete fncache causes unintentional failure, but
2710 2726 redundant one doesn't).
2711 2727 """
2712 2728 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2713 2729 for k in list(self._filecache.keys()):
2714 2730 # dirstate is invalidated separately in invalidatedirstate()
2715 2731 if k == b'dirstate':
2716 2732 continue
2717 2733 if (
2718 2734 k == b'changelog'
2719 2735 and self.currenttransaction()
2720 2736 and self.changelog._delayed
2721 2737 ):
2722 2738 # The changelog object may store unwritten revisions. We don't
2723 2739 # want to lose them.
2724 2740 # TODO: Solve the problem instead of working around it.
2725 2741 continue
2726 2742
2727 2743 if clearfilecache:
2728 2744 del self._filecache[k]
2729 2745 try:
2730 2746 delattr(unfiltered, k)
2731 2747 except AttributeError:
2732 2748 pass
2733 2749 self.invalidatecaches()
2734 2750 if not self.currenttransaction():
2735 2751 # TODO: Changing contents of store outside transaction
2736 2752 # causes inconsistency. We should make in-memory store
2737 2753 # changes detectable, and abort if changed.
2738 2754 self.store.invalidatecaches()
2739 2755
2740 2756 def invalidateall(self):
2741 2757 """Fully invalidates both store and non-store parts, causing the
2742 2758 subsequent operation to reread any outside changes."""
2743 2759 # extension should hook this to invalidate its caches
2744 2760 self.invalidate()
2745 2761 self.invalidatedirstate()
2746 2762
2747 2763 @unfilteredmethod
2748 2764 def _refreshfilecachestats(self, tr):
2749 2765 """Reload stats of cached files so that they are flagged as valid"""
2750 2766 for k, ce in self._filecache.items():
2751 2767 k = pycompat.sysstr(k)
2752 2768 if k == 'dirstate' or k not in self.__dict__:
2753 2769 continue
2754 2770 ce.refresh()
2755 2771
2756 2772 def _lock(
2757 2773 self,
2758 2774 vfs,
2759 2775 lockname,
2760 2776 wait,
2761 2777 releasefn,
2762 2778 acquirefn,
2763 2779 desc,
2764 2780 ):
2765 2781 timeout = 0
2766 2782 warntimeout = 0
2767 2783 if wait:
2768 2784 timeout = self.ui.configint(b"ui", b"timeout")
2769 2785 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2770 2786 # internal config: ui.signal-safe-lock
2771 2787 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2772 2788
2773 2789 l = lockmod.trylock(
2774 2790 self.ui,
2775 2791 vfs,
2776 2792 lockname,
2777 2793 timeout,
2778 2794 warntimeout,
2779 2795 releasefn=releasefn,
2780 2796 acquirefn=acquirefn,
2781 2797 desc=desc,
2782 2798 signalsafe=signalsafe,
2783 2799 )
2784 2800 return l
2785 2801
2786 2802 def _afterlock(self, callback):
2787 2803 """add a callback to be run when the repository is fully unlocked
2788 2804
2789 2805 The callback will be executed when the outermost lock is released
2790 2806 (with wlock being higher level than 'lock')."""
2791 2807 for ref in (self._wlockref, self._lockref):
2792 2808 l = ref and ref()
2793 2809 if l and l.held:
2794 2810 l.postrelease.append(callback)
2795 2811 break
2796 2812 else: # no lock have been found.
2797 2813 callback(True)
2798 2814
2799 2815 def lock(self, wait=True):
2800 2816 """Lock the repository store (.hg/store) and return a weak reference
2801 2817 to the lock. Use this before modifying the store (e.g. committing or
2802 2818 stripping). If you are opening a transaction, get a lock as well.)
2803 2819
2804 2820 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2805 2821 'wlock' first to avoid a dead-lock hazard."""
2806 2822 l = self._currentlock(self._lockref)
2807 2823 if l is not None:
2808 2824 l.lock()
2809 2825 return l
2810 2826
2811 2827 l = self._lock(
2812 2828 vfs=self.svfs,
2813 2829 lockname=b"lock",
2814 2830 wait=wait,
2815 2831 releasefn=None,
2816 2832 acquirefn=self.invalidate,
2817 2833 desc=_(b'repository %s') % self.origroot,
2818 2834 )
2819 2835 self._lockref = weakref.ref(l)
2820 2836 return l
2821 2837
2822 2838 def wlock(self, wait=True):
2823 2839 """Lock the non-store parts of the repository (everything under
2824 2840 .hg except .hg/store) and return a weak reference to the lock.
2825 2841
2826 2842 Use this before modifying files in .hg.
2827 2843
2828 2844 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2829 2845 'wlock' first to avoid a dead-lock hazard."""
2830 2846 l = self._wlockref and self._wlockref()
2831 2847 if l is not None and l.held:
2832 2848 l.lock()
2833 2849 return l
2834 2850
2835 2851 # We do not need to check for non-waiting lock acquisition. Such
2836 2852 # acquisition would not cause dead-lock as they would just fail.
2837 2853 if wait and (
2838 2854 self.ui.configbool(b'devel', b'all-warnings')
2839 2855 or self.ui.configbool(b'devel', b'check-locks')
2840 2856 ):
2841 2857 if self._currentlock(self._lockref) is not None:
2842 2858 self.ui.develwarn(b'"wlock" acquired after "lock"')
2843 2859
2844 2860 def unlock():
2845 2861 if self.dirstate.pendingparentchange():
2846 2862 self.dirstate.invalidate()
2847 2863 else:
2848 2864 self.dirstate.write(None)
2849 2865
2850 2866 self._filecache[b'dirstate'].refresh()
2851 2867
2852 2868 l = self._lock(
2853 2869 self.vfs,
2854 2870 b"wlock",
2855 2871 wait,
2856 2872 unlock,
2857 2873 self.invalidatedirstate,
2858 2874 _(b'working directory of %s') % self.origroot,
2859 2875 )
2860 2876 self._wlockref = weakref.ref(l)
2861 2877 return l
2862 2878
2863 2879 def _currentlock(self, lockref):
2864 2880 """Returns the lock if it's held, or None if it's not."""
2865 2881 if lockref is None:
2866 2882 return None
2867 2883 l = lockref()
2868 2884 if l is None or not l.held:
2869 2885 return None
2870 2886 return l
2871 2887
2872 2888 def currentwlock(self):
2873 2889 """Returns the wlock if it's held, or None if it's not."""
2874 2890 return self._currentlock(self._wlockref)
2875 2891
2876 2892 def checkcommitpatterns(self, wctx, match, status, fail):
2877 2893 """check for commit arguments that aren't committable"""
2878 2894 if match.isexact() or match.prefix():
2879 2895 matched = set(status.modified + status.added + status.removed)
2880 2896
2881 2897 for f in match.files():
2882 2898 f = self.dirstate.normalize(f)
2883 2899 if f == b'.' or f in matched or f in wctx.substate:
2884 2900 continue
2885 2901 if f in status.deleted:
2886 2902 fail(f, _(b'file not found!'))
2887 2903 # Is it a directory that exists or used to exist?
2888 2904 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2889 2905 d = f + b'/'
2890 2906 for mf in matched:
2891 2907 if mf.startswith(d):
2892 2908 break
2893 2909 else:
2894 2910 fail(f, _(b"no match under directory!"))
2895 2911 elif f not in self.dirstate:
2896 2912 fail(f, _(b"file not tracked!"))
2897 2913
2898 2914 @unfilteredmethod
2899 2915 def commit(
2900 2916 self,
2901 2917 text=b"",
2902 2918 user=None,
2903 2919 date=None,
2904 2920 match=None,
2905 2921 force=False,
2906 2922 editor=None,
2907 2923 extra=None,
2908 2924 ):
2909 2925 """Add a new revision to current repository.
2910 2926
2911 2927 Revision information is gathered from the working directory,
2912 2928 match can be used to filter the committed files. If editor is
2913 2929 supplied, it is called to get a commit message.
2914 2930 """
2915 2931 if extra is None:
2916 2932 extra = {}
2917 2933
2918 2934 def fail(f, msg):
2919 2935 raise error.InputError(b'%s: %s' % (f, msg))
2920 2936
2921 2937 if not match:
2922 2938 match = matchmod.always()
2923 2939
2924 2940 if not force:
2925 2941 match.bad = fail
2926 2942
2927 2943 # lock() for recent changelog (see issue4368)
2928 2944 with self.wlock(), self.lock():
2929 2945 wctx = self[None]
2930 2946 merge = len(wctx.parents()) > 1
2931 2947
2932 2948 if not force and merge and not match.always():
2933 2949 raise error.Abort(
2934 2950 _(
2935 2951 b'cannot partially commit a merge '
2936 2952 b'(do not specify files or patterns)'
2937 2953 )
2938 2954 )
2939 2955
2940 2956 status = self.status(match=match, clean=force)
2941 2957 if force:
2942 2958 status.modified.extend(
2943 2959 status.clean
2944 2960 ) # mq may commit clean files
2945 2961
2946 2962 # check subrepos
2947 2963 subs, commitsubs, newstate = subrepoutil.precommit(
2948 2964 self.ui, wctx, status, match, force=force
2949 2965 )
2950 2966
2951 2967 # make sure all explicit patterns are matched
2952 2968 if not force:
2953 2969 self.checkcommitpatterns(wctx, match, status, fail)
2954 2970
2955 2971 cctx = context.workingcommitctx(
2956 2972 self, status, text, user, date, extra
2957 2973 )
2958 2974
2959 2975 ms = mergestatemod.mergestate.read(self)
2960 2976 mergeutil.checkunresolved(ms)
2961 2977
2962 2978 # internal config: ui.allowemptycommit
2963 2979 if cctx.isempty() and not self.ui.configbool(
2964 2980 b'ui', b'allowemptycommit'
2965 2981 ):
2966 2982 self.ui.debug(b'nothing to commit, clearing merge state\n')
2967 2983 ms.reset()
2968 2984 return None
2969 2985
2970 2986 if merge and cctx.deleted():
2971 2987 raise error.Abort(_(b"cannot commit merge with missing files"))
2972 2988
2973 2989 if editor:
2974 2990 cctx._text = editor(self, cctx, subs)
2975 2991 edited = text != cctx._text
2976 2992
2977 2993 # Save commit message in case this transaction gets rolled back
2978 2994 # (e.g. by a pretxncommit hook). Leave the content alone on
2979 2995 # the assumption that the user will use the same editor again.
2980 2996 msgfn = self.savecommitmessage(cctx._text)
2981 2997
2982 2998 # commit subs and write new state
2983 2999 if subs:
2984 3000 uipathfn = scmutil.getuipathfn(self)
2985 3001 for s in sorted(commitsubs):
2986 3002 sub = wctx.sub(s)
2987 3003 self.ui.status(
2988 3004 _(b'committing subrepository %s\n')
2989 3005 % uipathfn(subrepoutil.subrelpath(sub))
2990 3006 )
2991 3007 sr = sub.commit(cctx._text, user, date)
2992 3008 newstate[s] = (newstate[s][0], sr)
2993 3009 subrepoutil.writestate(self, newstate)
2994 3010
2995 3011 p1, p2 = self.dirstate.parents()
2996 3012 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2997 3013 try:
2998 3014 self.hook(
2999 3015 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3000 3016 )
3001 3017 with self.transaction(b'commit'):
3002 3018 ret = self.commitctx(cctx, True)
3003 3019 # update bookmarks, dirstate and mergestate
3004 3020 bookmarks.update(self, [p1, p2], ret)
3005 3021 cctx.markcommitted(ret)
3006 3022 ms.reset()
3007 3023 except: # re-raises
3008 3024 if edited:
3009 3025 self.ui.write(
3010 3026 _(b'note: commit message saved in %s\n') % msgfn
3011 3027 )
3012 3028 self.ui.write(
3013 3029 _(
3014 3030 b"note: use 'hg commit --logfile "
3015 3031 b".hg/last-message.txt --edit' to reuse it\n"
3016 3032 )
3017 3033 )
3018 3034 raise
3019 3035
3020 3036 def commithook(unused_success):
3021 3037 # hack for command that use a temporary commit (eg: histedit)
3022 3038 # temporary commit got stripped before hook release
3023 3039 if self.changelog.hasnode(ret):
3024 3040 self.hook(
3025 3041 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3026 3042 )
3027 3043
3028 3044 self._afterlock(commithook)
3029 3045 return ret
3030 3046
3031 3047 @unfilteredmethod
3032 3048 def commitctx(self, ctx, error=False, origctx=None):
3033 3049 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3034 3050
3035 3051 @unfilteredmethod
3036 3052 def destroying(self):
3037 3053 """Inform the repository that nodes are about to be destroyed.
3038 3054 Intended for use by strip and rollback, so there's a common
3039 3055 place for anything that has to be done before destroying history.
3040 3056
3041 3057 This is mostly useful for saving state that is in memory and waiting
3042 3058 to be flushed when the current lock is released. Because a call to
3043 3059 destroyed is imminent, the repo will be invalidated causing those
3044 3060 changes to stay in memory (waiting for the next unlock), or vanish
3045 3061 completely.
3046 3062 """
3047 3063 # When using the same lock to commit and strip, the phasecache is left
3048 3064 # dirty after committing. Then when we strip, the repo is invalidated,
3049 3065 # causing those changes to disappear.
3050 3066 if '_phasecache' in vars(self):
3051 3067 self._phasecache.write()
3052 3068
3053 3069 @unfilteredmethod
3054 3070 def destroyed(self):
3055 3071 """Inform the repository that nodes have been destroyed.
3056 3072 Intended for use by strip and rollback, so there's a common
3057 3073 place for anything that has to be done after destroying history.
3058 3074 """
3059 3075 # When one tries to:
3060 3076 # 1) destroy nodes thus calling this method (e.g. strip)
3061 3077 # 2) use phasecache somewhere (e.g. commit)
3062 3078 #
3063 3079 # then 2) will fail because the phasecache contains nodes that were
3064 3080 # removed. We can either remove phasecache from the filecache,
3065 3081 # causing it to reload next time it is accessed, or simply filter
3066 3082 # the removed nodes now and write the updated cache.
3067 3083 self._phasecache.filterunknown(self)
3068 3084 self._phasecache.write()
3069 3085
3070 3086 # refresh all repository caches
3071 3087 self.updatecaches()
3072 3088
3073 3089 # Ensure the persistent tag cache is updated. Doing it now
3074 3090 # means that the tag cache only has to worry about destroyed
3075 3091 # heads immediately after a strip/rollback. That in turn
3076 3092 # guarantees that "cachetip == currenttip" (comparing both rev
3077 3093 # and node) always means no nodes have been added or destroyed.
3078 3094
3079 3095 # XXX this is suboptimal when qrefresh'ing: we strip the current
3080 3096 # head, refresh the tag cache, then immediately add a new head.
3081 3097 # But I think doing it this way is necessary for the "instant
3082 3098 # tag cache retrieval" case to work.
3083 3099 self.invalidate()
3084 3100
3085 3101 def status(
3086 3102 self,
3087 3103 node1=b'.',
3088 3104 node2=None,
3089 3105 match=None,
3090 3106 ignored=False,
3091 3107 clean=False,
3092 3108 unknown=False,
3093 3109 listsubrepos=False,
3094 3110 ):
3095 3111 '''a convenience method that calls node1.status(node2)'''
3096 3112 return self[node1].status(
3097 3113 node2, match, ignored, clean, unknown, listsubrepos
3098 3114 )
3099 3115
3100 3116 def addpostdsstatus(self, ps):
3101 3117 """Add a callback to run within the wlock, at the point at which status
3102 3118 fixups happen.
3103 3119
3104 3120 On status completion, callback(wctx, status) will be called with the
3105 3121 wlock held, unless the dirstate has changed from underneath or the wlock
3106 3122 couldn't be grabbed.
3107 3123
3108 3124 Callbacks should not capture and use a cached copy of the dirstate --
3109 3125 it might change in the meanwhile. Instead, they should access the
3110 3126 dirstate via wctx.repo().dirstate.
3111 3127
3112 3128 This list is emptied out after each status run -- extensions should
3113 3129 make sure it adds to this list each time dirstate.status is called.
3114 3130 Extensions should also make sure they don't call this for statuses
3115 3131 that don't involve the dirstate.
3116 3132 """
3117 3133
3118 3134 # The list is located here for uniqueness reasons -- it is actually
3119 3135 # managed by the workingctx, but that isn't unique per-repo.
3120 3136 self._postdsstatus.append(ps)
3121 3137
3122 3138 def postdsstatus(self):
3123 3139 """Used by workingctx to get the list of post-dirstate-status hooks."""
3124 3140 return self._postdsstatus
3125 3141
3126 3142 def clearpostdsstatus(self):
3127 3143 """Used by workingctx to clear post-dirstate-status hooks."""
3128 3144 del self._postdsstatus[:]
3129 3145
3130 3146 def heads(self, start=None):
3131 3147 if start is None:
3132 3148 cl = self.changelog
3133 3149 headrevs = reversed(cl.headrevs())
3134 3150 return [cl.node(rev) for rev in headrevs]
3135 3151
3136 3152 heads = self.changelog.heads(start)
3137 3153 # sort the output in rev descending order
3138 3154 return sorted(heads, key=self.changelog.rev, reverse=True)
3139 3155
3140 3156 def branchheads(self, branch=None, start=None, closed=False):
3141 3157 """return a (possibly filtered) list of heads for the given branch
3142 3158
3143 3159 Heads are returned in topological order, from newest to oldest.
3144 3160 If branch is None, use the dirstate branch.
3145 3161 If start is not None, return only heads reachable from start.
3146 3162 If closed is True, return heads that are marked as closed as well.
3147 3163 """
3148 3164 if branch is None:
3149 3165 branch = self[None].branch()
3150 3166 branches = self.branchmap()
3151 3167 if not branches.hasbranch(branch):
3152 3168 return []
3153 3169 # the cache returns heads ordered lowest to highest
3154 3170 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3155 3171 if start is not None:
3156 3172 # filter out the heads that cannot be reached from startrev
3157 3173 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3158 3174 bheads = [h for h in bheads if h in fbheads]
3159 3175 return bheads
3160 3176
3161 3177 def branches(self, nodes):
3162 3178 if not nodes:
3163 3179 nodes = [self.changelog.tip()]
3164 3180 b = []
3165 3181 for n in nodes:
3166 3182 t = n
3167 3183 while True:
3168 3184 p = self.changelog.parents(n)
3169 3185 if p[1] != nullid or p[0] == nullid:
3170 3186 b.append((t, n, p[0], p[1]))
3171 3187 break
3172 3188 n = p[0]
3173 3189 return b
3174 3190
3175 3191 def between(self, pairs):
3176 3192 r = []
3177 3193
3178 3194 for top, bottom in pairs:
3179 3195 n, l, i = top, [], 0
3180 3196 f = 1
3181 3197
3182 3198 while n != bottom and n != nullid:
3183 3199 p = self.changelog.parents(n)[0]
3184 3200 if i == f:
3185 3201 l.append(n)
3186 3202 f = f * 2
3187 3203 n = p
3188 3204 i += 1
3189 3205
3190 3206 r.append(l)
3191 3207
3192 3208 return r
3193 3209
3194 3210 def checkpush(self, pushop):
3195 3211 """Extensions can override this function if additional checks have
3196 3212 to be performed before pushing, or call it if they override push
3197 3213 command.
3198 3214 """
3199 3215
3200 3216 @unfilteredpropertycache
3201 3217 def prepushoutgoinghooks(self):
3202 3218 """Return util.hooks consists of a pushop with repo, remote, outgoing
3203 3219 methods, which are called before pushing changesets.
3204 3220 """
3205 3221 return util.hooks()
3206 3222
3207 3223 def pushkey(self, namespace, key, old, new):
3208 3224 try:
3209 3225 tr = self.currenttransaction()
3210 3226 hookargs = {}
3211 3227 if tr is not None:
3212 3228 hookargs.update(tr.hookargs)
3213 3229 hookargs = pycompat.strkwargs(hookargs)
3214 3230 hookargs['namespace'] = namespace
3215 3231 hookargs['key'] = key
3216 3232 hookargs['old'] = old
3217 3233 hookargs['new'] = new
3218 3234 self.hook(b'prepushkey', throw=True, **hookargs)
3219 3235 except error.HookAbort as exc:
3220 3236 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3221 3237 if exc.hint:
3222 3238 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3223 3239 return False
3224 3240 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3225 3241 ret = pushkey.push(self, namespace, key, old, new)
3226 3242
3227 3243 def runhook(unused_success):
3228 3244 self.hook(
3229 3245 b'pushkey',
3230 3246 namespace=namespace,
3231 3247 key=key,
3232 3248 old=old,
3233 3249 new=new,
3234 3250 ret=ret,
3235 3251 )
3236 3252
3237 3253 self._afterlock(runhook)
3238 3254 return ret
3239 3255
3240 3256 def listkeys(self, namespace):
3241 3257 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3242 3258 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3243 3259 values = pushkey.list(self, namespace)
3244 3260 self.hook(b'listkeys', namespace=namespace, values=values)
3245 3261 return values
3246 3262
3247 3263 def debugwireargs(self, one, two, three=None, four=None, five=None):
3248 3264 '''used to test argument passing over the wire'''
3249 3265 return b"%s %s %s %s %s" % (
3250 3266 one,
3251 3267 two,
3252 3268 pycompat.bytestr(three),
3253 3269 pycompat.bytestr(four),
3254 3270 pycompat.bytestr(five),
3255 3271 )
3256 3272
3257 3273 def savecommitmessage(self, text):
3258 3274 fp = self.vfs(b'last-message.txt', b'wb')
3259 3275 try:
3260 3276 fp.write(text)
3261 3277 finally:
3262 3278 fp.close()
3263 3279 return self.pathto(fp.name[len(self.root) + 1 :])
3264 3280
3265 3281
3266 3282 # used to avoid circular references so destructors work
3267 3283 def aftertrans(files):
3268 3284 renamefiles = [tuple(t) for t in files]
3269 3285
3270 3286 def a():
3271 3287 for vfs, src, dest in renamefiles:
3272 3288 # if src and dest refer to a same file, vfs.rename is a no-op,
3273 3289 # leaving both src and dest on disk. delete dest to make sure
3274 3290 # the rename couldn't be such a no-op.
3275 3291 vfs.tryunlink(dest)
3276 3292 try:
3277 3293 vfs.rename(src, dest)
3278 3294 except OSError: # journal file does not yet exist
3279 3295 pass
3280 3296
3281 3297 return a
3282 3298
3283 3299
3284 3300 def undoname(fn):
3285 3301 base, name = os.path.split(fn)
3286 3302 assert name.startswith(b'journal')
3287 3303 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3288 3304
3289 3305
3290 3306 def instance(ui, path, create, intents=None, createopts=None):
3291 3307 localpath = util.urllocalpath(path)
3292 3308 if create:
3293 3309 createrepository(ui, localpath, createopts=createopts)
3294 3310
3295 3311 return makelocalrepository(ui, localpath, intents=intents)
3296 3312
3297 3313
3298 3314 def islocal(path):
3299 3315 return True
3300 3316
3301 3317
3302 3318 def defaultcreateopts(ui, createopts=None):
3303 3319 """Populate the default creation options for a repository.
3304 3320
3305 3321 A dictionary of explicitly requested creation options can be passed
3306 3322 in. Missing keys will be populated.
3307 3323 """
3308 3324 createopts = dict(createopts or {})
3309 3325
3310 3326 if b'backend' not in createopts:
3311 3327 # experimental config: storage.new-repo-backend
3312 3328 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3313 3329
3314 3330 return createopts
3315 3331
3316 3332
3317 3333 def newreporequirements(ui, createopts):
3318 3334 """Determine the set of requirements for a new local repository.
3319 3335
3320 3336 Extensions can wrap this function to specify custom requirements for
3321 3337 new repositories.
3322 3338 """
3323 3339 # If the repo is being created from a shared repository, we copy
3324 3340 # its requirements.
3325 3341 if b'sharedrepo' in createopts:
3326 3342 requirements = set(createopts[b'sharedrepo'].requirements)
3327 3343 if createopts.get(b'sharedrelative'):
3328 3344 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3329 3345 else:
3330 3346 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3331 3347
3332 3348 return requirements
3333 3349
3334 3350 if b'backend' not in createopts:
3335 3351 raise error.ProgrammingError(
3336 3352 b'backend key not present in createopts; '
3337 3353 b'was defaultcreateopts() called?'
3338 3354 )
3339 3355
3340 3356 if createopts[b'backend'] != b'revlogv1':
3341 3357 raise error.Abort(
3342 3358 _(
3343 3359 b'unable to determine repository requirements for '
3344 3360 b'storage backend: %s'
3345 3361 )
3346 3362 % createopts[b'backend']
3347 3363 )
3348 3364
3349 3365 requirements = {b'revlogv1'}
3350 3366 if ui.configbool(b'format', b'usestore'):
3351 3367 requirements.add(b'store')
3352 3368 if ui.configbool(b'format', b'usefncache'):
3353 3369 requirements.add(b'fncache')
3354 3370 if ui.configbool(b'format', b'dotencode'):
3355 3371 requirements.add(b'dotencode')
3356 3372
3357 3373 compengines = ui.configlist(b'format', b'revlog-compression')
3358 3374 for compengine in compengines:
3359 3375 if compengine in util.compengines:
3360 3376 break
3361 3377 else:
3362 3378 raise error.Abort(
3363 3379 _(
3364 3380 b'compression engines %s defined by '
3365 3381 b'format.revlog-compression not available'
3366 3382 )
3367 3383 % b', '.join(b'"%s"' % e for e in compengines),
3368 3384 hint=_(
3369 3385 b'run "hg debuginstall" to list available '
3370 3386 b'compression engines'
3371 3387 ),
3372 3388 )
3373 3389
3374 3390 # zlib is the historical default and doesn't need an explicit requirement.
3375 3391 if compengine == b'zstd':
3376 3392 requirements.add(b'revlog-compression-zstd')
3377 3393 elif compengine != b'zlib':
3378 3394 requirements.add(b'exp-compression-%s' % compengine)
3379 3395
3380 3396 if scmutil.gdinitconfig(ui):
3381 3397 requirements.add(b'generaldelta')
3382 3398 if ui.configbool(b'format', b'sparse-revlog'):
3383 3399 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3384 3400
3385 3401 # experimental config: format.exp-use-side-data
3386 3402 if ui.configbool(b'format', b'exp-use-side-data'):
3387 3403 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3388 3404 # experimental config: format.exp-use-copies-side-data-changeset
3389 3405 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3390 3406 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3391 3407 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3392 3408 if ui.configbool(b'experimental', b'treemanifest'):
3393 3409 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3394 3410
3395 3411 revlogv2 = ui.config(b'experimental', b'revlogv2')
3396 3412 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3397 3413 requirements.remove(b'revlogv1')
3398 3414 # generaldelta is implied by revlogv2.
3399 3415 requirements.discard(b'generaldelta')
3400 3416 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3401 3417 # experimental config: format.internal-phase
3402 3418 if ui.configbool(b'format', b'internal-phase'):
3403 3419 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3404 3420
3405 3421 if createopts.get(b'narrowfiles'):
3406 3422 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3407 3423
3408 3424 if createopts.get(b'lfs'):
3409 3425 requirements.add(b'lfs')
3410 3426
3411 3427 if ui.configbool(b'format', b'bookmarks-in-store'):
3412 3428 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3413 3429
3414 3430 if ui.configbool(b'format', b'use-persistent-nodemap'):
3415 3431 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3416 3432
3417 3433 # if share-safe is enabled, let's create the new repository with the new
3418 3434 # requirement
3419 3435 if ui.configbool(b'format', b'exp-share-safe'):
3420 3436 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3421 3437
3422 3438 return requirements
3423 3439
3424 3440
3425 3441 def checkrequirementscompat(ui, requirements):
3426 3442 """Checks compatibility of repository requirements enabled and disabled.
3427 3443
3428 3444 Returns a set of requirements which needs to be dropped because dependend
3429 3445 requirements are not enabled. Also warns users about it"""
3430 3446
3431 3447 dropped = set()
3432 3448
3433 3449 if b'store' not in requirements:
3434 3450 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3435 3451 ui.warn(
3436 3452 _(
3437 3453 b'ignoring enabled \'format.bookmarks-in-store\' config '
3438 3454 b'beacuse it is incompatible with disabled '
3439 3455 b'\'format.usestore\' config\n'
3440 3456 )
3441 3457 )
3442 3458 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3443 3459
3444 3460 if (
3445 3461 requirementsmod.SHARED_REQUIREMENT in requirements
3446 3462 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3447 3463 ):
3448 3464 raise error.Abort(
3449 3465 _(
3450 3466 b"cannot create shared repository as source was created"
3451 3467 b" with 'format.usestore' config disabled"
3452 3468 )
3453 3469 )
3454 3470
3455 3471 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3456 3472 ui.warn(
3457 3473 _(
3458 3474 b"ignoring enabled 'format.exp-share-safe' config because "
3459 3475 b"it is incompatible with disabled 'format.usestore'"
3460 3476 b" config\n"
3461 3477 )
3462 3478 )
3463 3479 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3464 3480
3465 3481 return dropped
3466 3482
3467 3483
3468 3484 def filterknowncreateopts(ui, createopts):
3469 3485 """Filters a dict of repo creation options against options that are known.
3470 3486
3471 3487 Receives a dict of repo creation options and returns a dict of those
3472 3488 options that we don't know how to handle.
3473 3489
3474 3490 This function is called as part of repository creation. If the
3475 3491 returned dict contains any items, repository creation will not
3476 3492 be allowed, as it means there was a request to create a repository
3477 3493 with options not recognized by loaded code.
3478 3494
3479 3495 Extensions can wrap this function to filter out creation options
3480 3496 they know how to handle.
3481 3497 """
3482 3498 known = {
3483 3499 b'backend',
3484 3500 b'lfs',
3485 3501 b'narrowfiles',
3486 3502 b'sharedrepo',
3487 3503 b'sharedrelative',
3488 3504 b'shareditems',
3489 3505 b'shallowfilestore',
3490 3506 }
3491 3507
3492 3508 return {k: v for k, v in createopts.items() if k not in known}
3493 3509
3494 3510
3495 3511 def createrepository(ui, path, createopts=None):
3496 3512 """Create a new repository in a vfs.
3497 3513
3498 3514 ``path`` path to the new repo's working directory.
3499 3515 ``createopts`` options for the new repository.
3500 3516
3501 3517 The following keys for ``createopts`` are recognized:
3502 3518
3503 3519 backend
3504 3520 The storage backend to use.
3505 3521 lfs
3506 3522 Repository will be created with ``lfs`` requirement. The lfs extension
3507 3523 will automatically be loaded when the repository is accessed.
3508 3524 narrowfiles
3509 3525 Set up repository to support narrow file storage.
3510 3526 sharedrepo
3511 3527 Repository object from which storage should be shared.
3512 3528 sharedrelative
3513 3529 Boolean indicating if the path to the shared repo should be
3514 3530 stored as relative. By default, the pointer to the "parent" repo
3515 3531 is stored as an absolute path.
3516 3532 shareditems
3517 3533 Set of items to share to the new repository (in addition to storage).
3518 3534 shallowfilestore
3519 3535 Indicates that storage for files should be shallow (not all ancestor
3520 3536 revisions are known).
3521 3537 """
3522 3538 createopts = defaultcreateopts(ui, createopts=createopts)
3523 3539
3524 3540 unknownopts = filterknowncreateopts(ui, createopts)
3525 3541
3526 3542 if not isinstance(unknownopts, dict):
3527 3543 raise error.ProgrammingError(
3528 3544 b'filterknowncreateopts() did not return a dict'
3529 3545 )
3530 3546
3531 3547 if unknownopts:
3532 3548 raise error.Abort(
3533 3549 _(
3534 3550 b'unable to create repository because of unknown '
3535 3551 b'creation option: %s'
3536 3552 )
3537 3553 % b', '.join(sorted(unknownopts)),
3538 3554 hint=_(b'is a required extension not loaded?'),
3539 3555 )
3540 3556
3541 3557 requirements = newreporequirements(ui, createopts=createopts)
3542 3558 requirements -= checkrequirementscompat(ui, requirements)
3543 3559
3544 3560 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3545 3561
3546 3562 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3547 3563 if hgvfs.exists():
3548 3564 raise error.RepoError(_(b'repository %s already exists') % path)
3549 3565
3550 3566 if b'sharedrepo' in createopts:
3551 3567 sharedpath = createopts[b'sharedrepo'].sharedpath
3552 3568
3553 3569 if createopts.get(b'sharedrelative'):
3554 3570 try:
3555 3571 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3556 3572 except (IOError, ValueError) as e:
3557 3573 # ValueError is raised on Windows if the drive letters differ
3558 3574 # on each path.
3559 3575 raise error.Abort(
3560 3576 _(b'cannot calculate relative path'),
3561 3577 hint=stringutil.forcebytestr(e),
3562 3578 )
3563 3579
3564 3580 if not wdirvfs.exists():
3565 3581 wdirvfs.makedirs()
3566 3582
3567 3583 hgvfs.makedir(notindexed=True)
3568 3584 if b'sharedrepo' not in createopts:
3569 3585 hgvfs.mkdir(b'cache')
3570 3586 hgvfs.mkdir(b'wcache')
3571 3587
3572 3588 if b'store' in requirements and b'sharedrepo' not in createopts:
3573 3589 hgvfs.mkdir(b'store')
3574 3590
3575 3591 # We create an invalid changelog outside the store so very old
3576 3592 # Mercurial versions (which didn't know about the requirements
3577 3593 # file) encounter an error on reading the changelog. This
3578 3594 # effectively locks out old clients and prevents them from
3579 3595 # mucking with a repo in an unknown format.
3580 3596 #
3581 3597 # The revlog header has version 2, which won't be recognized by
3582 3598 # such old clients.
3583 3599 hgvfs.append(
3584 3600 b'00changelog.i',
3585 3601 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3586 3602 b'layout',
3587 3603 )
3588 3604
3589 3605 # Filter the requirements into working copy and store ones
3590 3606 wcreq, storereq = scmutil.filterrequirements(requirements)
3591 3607 # write working copy ones
3592 3608 scmutil.writerequires(hgvfs, wcreq)
3593 3609 # If there are store requirements and the current repository
3594 3610 # is not a shared one, write stored requirements
3595 3611 # For new shared repository, we don't need to write the store
3596 3612 # requirements as they are already present in store requires
3597 3613 if storereq and b'sharedrepo' not in createopts:
3598 3614 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3599 3615 scmutil.writerequires(storevfs, storereq)
3600 3616
3601 3617 # Write out file telling readers where to find the shared store.
3602 3618 if b'sharedrepo' in createopts:
3603 3619 hgvfs.write(b'sharedpath', sharedpath)
3604 3620
3605 3621 if createopts.get(b'shareditems'):
3606 3622 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3607 3623 hgvfs.write(b'shared', shared)
3608 3624
3609 3625
3610 3626 def poisonrepository(repo):
3611 3627 """Poison a repository instance so it can no longer be used."""
3612 3628 # Perform any cleanup on the instance.
3613 3629 repo.close()
3614 3630
3615 3631 # Our strategy is to replace the type of the object with one that
3616 3632 # has all attribute lookups result in error.
3617 3633 #
3618 3634 # But we have to allow the close() method because some constructors
3619 3635 # of repos call close() on repo references.
3620 3636 class poisonedrepository(object):
3621 3637 def __getattribute__(self, item):
3622 3638 if item == 'close':
3623 3639 return object.__getattribute__(self, item)
3624 3640
3625 3641 raise error.ProgrammingError(
3626 3642 b'repo instances should not be used after unshare'
3627 3643 )
3628 3644
3629 3645 def close(self):
3630 3646 pass
3631 3647
3632 3648 # We may have a repoview, which intercepts __setattr__. So be sure
3633 3649 # we operate at the lowest level possible.
3634 3650 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3079 +1,3089 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullhex,
29 29 nullid,
30 30 nullrev,
31 31 short,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .pycompat import getattr
39 39 from .revlogutils.constants import (
40 40 FLAG_GENERALDELTA,
41 41 FLAG_INLINE_DATA,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 )
51 51 from .revlogutils.flagutil import (
52 52 REVIDX_DEFAULT_FLAGS,
53 53 REVIDX_ELLIPSIS,
54 54 REVIDX_EXTSTORED,
55 55 REVIDX_FLAGS_ORDER,
56 56 REVIDX_HASCOPIESINFO,
57 57 REVIDX_ISCENSORED,
58 58 REVIDX_RAWTEXT_CHANGING_FLAGS,
59 59 REVIDX_SIDEDATA,
60 60 )
61 61 from .thirdparty import attr
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 templatefilters,
70 70 util,
71 71 )
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76 from .revlogutils import (
77 77 deltas as deltautil,
78 78 flagutil,
79 79 nodemap as nodemaputil,
80 80 sidedata as sidedatautil,
81 81 )
82 82 from .utils import (
83 83 storageutil,
84 84 stringutil,
85 85 )
86 86
87 87 # blanked usage of all the name to prevent pyflakes constraints
88 88 # We need these name available in the module for extensions.
89 89 REVLOGV0
90 90 REVLOGV1
91 91 REVLOGV2
92 92 FLAG_INLINE_DATA
93 93 FLAG_GENERALDELTA
94 94 REVLOG_DEFAULT_FLAGS
95 95 REVLOG_DEFAULT_FORMAT
96 96 REVLOG_DEFAULT_VERSION
97 97 REVLOGV1_FLAGS
98 98 REVLOGV2_FLAGS
99 99 REVIDX_ISCENSORED
100 100 REVIDX_ELLIPSIS
101 101 REVIDX_SIDEDATA
102 102 REVIDX_HASCOPIESINFO
103 103 REVIDX_EXTSTORED
104 104 REVIDX_DEFAULT_FLAGS
105 105 REVIDX_FLAGS_ORDER
106 106 REVIDX_RAWTEXT_CHANGING_FLAGS
107 107
108 108 parsers = policy.importmod('parsers')
109 109 rustancestor = policy.importrust('ancestor')
110 110 rustdagop = policy.importrust('dagop')
111 111 rustrevlog = policy.importrust('revlog')
112 112
113 113 # Aliased for performance.
114 114 _zlibdecompress = zlib.decompress
115 115
116 116 # max size of revlog with inline data
117 117 _maxinline = 131072
118 118 _chunksize = 1048576
119 119
120 120 # Flag processors for REVIDX_ELLIPSIS.
121 121 def ellipsisreadprocessor(rl, text):
122 122 return text, False, {}
123 123
124 124
125 125 def ellipsiswriteprocessor(rl, text, sidedata):
126 126 return text, False
127 127
128 128
129 129 def ellipsisrawprocessor(rl, text):
130 130 return False
131 131
132 132
133 133 ellipsisprocessor = (
134 134 ellipsisreadprocessor,
135 135 ellipsiswriteprocessor,
136 136 ellipsisrawprocessor,
137 137 )
138 138
139 139
140 140 def getoffset(q):
141 141 return int(q >> 16)
142 142
143 143
144 144 def gettype(q):
145 145 return int(q & 0xFFFF)
146 146
147 147
148 148 def offset_type(offset, type):
149 149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
150 150 raise ValueError(b'unknown revlog index flags')
151 151 return int(int(offset) << 16 | type)
152 152
153 153
154 154 def _verify_revision(rl, skipflags, state, node):
155 155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 156 point for extensions to influence the operation."""
157 157 if skipflags:
158 158 state[b'skipread'].add(node)
159 159 else:
160 160 # Side-effect: read content and verify hash.
161 161 rl.revision(node)
162 162
163 163
164 # True if a fast implementation for persistent-nodemap is available
165 #
166 # We also consider we have a "fast" implementation in "pure" python because
167 # people using pure don't really have performance consideration (and a
168 # wheelbarrow of other slowness source)
169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
170 parsers, 'BaseIndexObject'
171 )
172
173
164 174 @attr.s(slots=True, frozen=True)
165 175 class _revisioninfo(object):
166 176 """Information about a revision that allows building its fulltext
167 177 node: expected hash of the revision
168 178 p1, p2: parent revs of the revision
169 179 btext: built text cache consisting of a one-element list
170 180 cachedelta: (baserev, uncompressed_delta) or None
171 181 flags: flags associated to the revision storage
172 182
173 183 One of btext[0] or cachedelta must be set.
174 184 """
175 185
176 186 node = attr.ib()
177 187 p1 = attr.ib()
178 188 p2 = attr.ib()
179 189 btext = attr.ib()
180 190 textlen = attr.ib()
181 191 cachedelta = attr.ib()
182 192 flags = attr.ib()
183 193
184 194
185 195 @interfaceutil.implementer(repository.irevisiondelta)
186 196 @attr.s(slots=True)
187 197 class revlogrevisiondelta(object):
188 198 node = attr.ib()
189 199 p1node = attr.ib()
190 200 p2node = attr.ib()
191 201 basenode = attr.ib()
192 202 flags = attr.ib()
193 203 baserevisionsize = attr.ib()
194 204 revision = attr.ib()
195 205 delta = attr.ib()
196 206 linknode = attr.ib(default=None)
197 207
198 208
199 209 @interfaceutil.implementer(repository.iverifyproblem)
200 210 @attr.s(frozen=True)
201 211 class revlogproblem(object):
202 212 warning = attr.ib(default=None)
203 213 error = attr.ib(default=None)
204 214 node = attr.ib(default=None)
205 215
206 216
207 217 # index v0:
208 218 # 4 bytes: offset
209 219 # 4 bytes: compressed length
210 220 # 4 bytes: base rev
211 221 # 4 bytes: link rev
212 222 # 20 bytes: parent 1 nodeid
213 223 # 20 bytes: parent 2 nodeid
214 224 # 20 bytes: nodeid
215 225 indexformatv0 = struct.Struct(b">4l20s20s20s")
216 226 indexformatv0_pack = indexformatv0.pack
217 227 indexformatv0_unpack = indexformatv0.unpack
218 228
219 229
220 230 class revlogoldindex(list):
221 231 @property
222 232 def nodemap(self):
223 233 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
224 234 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
225 235 return self._nodemap
226 236
227 237 @util.propertycache
228 238 def _nodemap(self):
229 239 nodemap = nodemaputil.NodeMap({nullid: nullrev})
230 240 for r in range(0, len(self)):
231 241 n = self[r][7]
232 242 nodemap[n] = r
233 243 return nodemap
234 244
235 245 def has_node(self, node):
236 246 """return True if the node exist in the index"""
237 247 return node in self._nodemap
238 248
239 249 def rev(self, node):
240 250 """return a revision for a node
241 251
242 252 If the node is unknown, raise a RevlogError"""
243 253 return self._nodemap[node]
244 254
245 255 def get_rev(self, node):
246 256 """return a revision for a node
247 257
248 258 If the node is unknown, return None"""
249 259 return self._nodemap.get(node)
250 260
251 261 def append(self, tup):
252 262 self._nodemap[tup[7]] = len(self)
253 263 super(revlogoldindex, self).append(tup)
254 264
255 265 def __delitem__(self, i):
256 266 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
257 267 raise ValueError(b"deleting slices only supports a:-1 with step 1")
258 268 for r in pycompat.xrange(i.start, len(self)):
259 269 del self._nodemap[self[r][7]]
260 270 super(revlogoldindex, self).__delitem__(i)
261 271
262 272 def clearcaches(self):
263 273 self.__dict__.pop('_nodemap', None)
264 274
265 275 def __getitem__(self, i):
266 276 if i == -1:
267 277 return (0, 0, 0, -1, -1, -1, -1, nullid)
268 278 return list.__getitem__(self, i)
269 279
270 280
271 281 class revlogoldio(object):
272 282 def __init__(self):
273 283 self.size = indexformatv0.size
274 284
275 285 def parseindex(self, data, inline):
276 286 s = self.size
277 287 index = []
278 288 nodemap = nodemaputil.NodeMap({nullid: nullrev})
279 289 n = off = 0
280 290 l = len(data)
281 291 while off + s <= l:
282 292 cur = data[off : off + s]
283 293 off += s
284 294 e = indexformatv0_unpack(cur)
285 295 # transform to revlogv1 format
286 296 e2 = (
287 297 offset_type(e[0], 0),
288 298 e[1],
289 299 -1,
290 300 e[2],
291 301 e[3],
292 302 nodemap.get(e[4], nullrev),
293 303 nodemap.get(e[5], nullrev),
294 304 e[6],
295 305 )
296 306 index.append(e2)
297 307 nodemap[e[6]] = n
298 308 n += 1
299 309
300 310 index = revlogoldindex(index)
301 311 return index, None
302 312
303 313 def packentry(self, entry, node, version, rev):
304 314 if gettype(entry[0]):
305 315 raise error.RevlogError(
306 316 _(b'index entry flags need revlog version 1')
307 317 )
308 318 e2 = (
309 319 getoffset(entry[0]),
310 320 entry[1],
311 321 entry[3],
312 322 entry[4],
313 323 node(entry[5]),
314 324 node(entry[6]),
315 325 entry[7],
316 326 )
317 327 return indexformatv0_pack(*e2)
318 328
319 329
320 330 # index ng:
321 331 # 6 bytes: offset
322 332 # 2 bytes: flags
323 333 # 4 bytes: compressed length
324 334 # 4 bytes: uncompressed length
325 335 # 4 bytes: base rev
326 336 # 4 bytes: link rev
327 337 # 4 bytes: parent 1 rev
328 338 # 4 bytes: parent 2 rev
329 339 # 32 bytes: nodeid
330 340 indexformatng = struct.Struct(b">Qiiiiii20s12x")
331 341 indexformatng_pack = indexformatng.pack
332 342 versionformat = struct.Struct(b">I")
333 343 versionformat_pack = versionformat.pack
334 344 versionformat_unpack = versionformat.unpack
335 345
336 346 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
337 347 # signed integer)
338 348 _maxentrysize = 0x7FFFFFFF
339 349
340 350
341 351 class revlogio(object):
342 352 def __init__(self):
343 353 self.size = indexformatng.size
344 354
345 355 def parseindex(self, data, inline):
346 356 # call the C implementation to parse the index data
347 357 index, cache = parsers.parse_index2(data, inline)
348 358 return index, cache
349 359
350 360 def packentry(self, entry, node, version, rev):
351 361 p = indexformatng_pack(*entry)
352 362 if rev == 0:
353 363 p = versionformat_pack(version) + p[4:]
354 364 return p
355 365
356 366
357 367 NodemapRevlogIO = None
358 368
359 369 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
360 370
361 371 class NodemapRevlogIO(revlogio):
362 372 """A debug oriented IO class that return a PersistentNodeMapIndexObject
363 373
364 374 The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
365 375 """
366 376
367 377 def parseindex(self, data, inline):
368 378 index, cache = parsers.parse_index_devel_nodemap(data, inline)
369 379 return index, cache
370 380
371 381
372 382 class rustrevlogio(revlogio):
373 383 def parseindex(self, data, inline):
374 384 index, cache = super(rustrevlogio, self).parseindex(data, inline)
375 385 return rustrevlog.MixedIndex(index), cache
376 386
377 387
378 388 class revlog(object):
379 389 """
380 390 the underlying revision storage object
381 391
382 392 A revlog consists of two parts, an index and the revision data.
383 393
384 394 The index is a file with a fixed record size containing
385 395 information on each revision, including its nodeid (hash), the
386 396 nodeids of its parents, the position and offset of its data within
387 397 the data file, and the revision it's based on. Finally, each entry
388 398 contains a linkrev entry that can serve as a pointer to external
389 399 data.
390 400
391 401 The revision data itself is a linear collection of data chunks.
392 402 Each chunk represents a revision and is usually represented as a
393 403 delta against the previous chunk. To bound lookup time, runs of
394 404 deltas are limited to about 2 times the length of the original
395 405 version data. This makes retrieval of a version proportional to
396 406 its size, or O(1) relative to the number of revisions.
397 407
398 408 Both pieces of the revlog are written to in an append-only
399 409 fashion, which means we never need to rewrite a file to insert or
400 410 remove data, and can use some simple techniques to avoid the need
401 411 for locking while reading.
402 412
403 413 If checkambig, indexfile is opened with checkambig=True at
404 414 writing, to avoid file stat ambiguity.
405 415
406 416 If mmaplargeindex is True, and an mmapindexthreshold is set, the
407 417 index will be mmapped rather than read if it is larger than the
408 418 configured threshold.
409 419
410 420 If censorable is True, the revlog can have censored revisions.
411 421
412 422 If `upperboundcomp` is not None, this is the expected maximal gain from
413 423 compression for the data content.
414 424 """
415 425
416 426 _flagserrorclass = error.RevlogError
417 427
418 428 def __init__(
419 429 self,
420 430 opener,
421 431 indexfile,
422 432 datafile=None,
423 433 checkambig=False,
424 434 mmaplargeindex=False,
425 435 censorable=False,
426 436 upperboundcomp=None,
427 437 persistentnodemap=False,
428 438 ):
429 439 """
430 440 create a revlog object
431 441
432 442 opener is a function that abstracts the file opening operation
433 443 and can be used to implement COW semantics or the like.
434 444
435 445 """
436 446 self.upperboundcomp = upperboundcomp
437 447 self.indexfile = indexfile
438 448 self.datafile = datafile or (indexfile[:-2] + b".d")
439 449 self.nodemap_file = None
440 450 if persistentnodemap:
441 451 if indexfile.endswith(b'.a'):
442 452 pending_path = indexfile[:-4] + b".n.a"
443 453 if opener.exists(pending_path):
444 454 self.nodemap_file = pending_path
445 455 else:
446 456 self.nodemap_file = indexfile[:-4] + b".n"
447 457 else:
448 458 self.nodemap_file = indexfile[:-2] + b".n"
449 459
450 460 self.opener = opener
451 461 # When True, indexfile is opened with checkambig=True at writing, to
452 462 # avoid file stat ambiguity.
453 463 self._checkambig = checkambig
454 464 self._mmaplargeindex = mmaplargeindex
455 465 self._censorable = censorable
456 466 # 3-tuple of (node, rev, text) for a raw revision.
457 467 self._revisioncache = None
458 468 # Maps rev to chain base rev.
459 469 self._chainbasecache = util.lrucachedict(100)
460 470 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
461 471 self._chunkcache = (0, b'')
462 472 # How much data to read and cache into the raw revlog data cache.
463 473 self._chunkcachesize = 65536
464 474 self._maxchainlen = None
465 475 self._deltabothparents = True
466 476 self.index = None
467 477 self._nodemap_docket = None
468 478 # Mapping of partial identifiers to full nodes.
469 479 self._pcache = {}
470 480 # Mapping of revision integer to full node.
471 481 self._compengine = b'zlib'
472 482 self._compengineopts = {}
473 483 self._maxdeltachainspan = -1
474 484 self._withsparseread = False
475 485 self._sparserevlog = False
476 486 self._srdensitythreshold = 0.50
477 487 self._srmingapsize = 262144
478 488
479 489 # Make copy of flag processors so each revlog instance can support
480 490 # custom flags.
481 491 self._flagprocessors = dict(flagutil.flagprocessors)
482 492
483 493 # 2-tuple of file handles being used for active writing.
484 494 self._writinghandles = None
485 495
486 496 self._loadindex()
487 497
488 498 def _loadindex(self):
489 499 mmapindexthreshold = None
490 500 opts = self.opener.options
491 501
492 502 if b'revlogv2' in opts:
493 503 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
494 504 elif b'revlogv1' in opts:
495 505 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
496 506 if b'generaldelta' in opts:
497 507 newversionflags |= FLAG_GENERALDELTA
498 508 elif b'revlogv0' in self.opener.options:
499 509 newversionflags = REVLOGV0
500 510 else:
501 511 newversionflags = REVLOG_DEFAULT_VERSION
502 512
503 513 if b'chunkcachesize' in opts:
504 514 self._chunkcachesize = opts[b'chunkcachesize']
505 515 if b'maxchainlen' in opts:
506 516 self._maxchainlen = opts[b'maxchainlen']
507 517 if b'deltabothparents' in opts:
508 518 self._deltabothparents = opts[b'deltabothparents']
509 519 self._lazydelta = bool(opts.get(b'lazydelta', True))
510 520 self._lazydeltabase = False
511 521 if self._lazydelta:
512 522 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
513 523 if b'compengine' in opts:
514 524 self._compengine = opts[b'compengine']
515 525 if b'zlib.level' in opts:
516 526 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
517 527 if b'zstd.level' in opts:
518 528 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
519 529 if b'maxdeltachainspan' in opts:
520 530 self._maxdeltachainspan = opts[b'maxdeltachainspan']
521 531 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
522 532 mmapindexthreshold = opts[b'mmapindexthreshold']
523 533 self.hassidedata = bool(opts.get(b'side-data', False))
524 534 if self.hassidedata:
525 535 self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
526 536 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
527 537 withsparseread = bool(opts.get(b'with-sparse-read', False))
528 538 # sparse-revlog forces sparse-read
529 539 self._withsparseread = self._sparserevlog or withsparseread
530 540 if b'sparse-read-density-threshold' in opts:
531 541 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
532 542 if b'sparse-read-min-gap-size' in opts:
533 543 self._srmingapsize = opts[b'sparse-read-min-gap-size']
534 544 if opts.get(b'enableellipsis'):
535 545 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
536 546
537 547 # revlog v0 doesn't have flag processors
538 548 for flag, processor in pycompat.iteritems(
539 549 opts.get(b'flagprocessors', {})
540 550 ):
541 551 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
542 552
543 553 if self._chunkcachesize <= 0:
544 554 raise error.RevlogError(
545 555 _(b'revlog chunk cache size %r is not greater than 0')
546 556 % self._chunkcachesize
547 557 )
548 558 elif self._chunkcachesize & (self._chunkcachesize - 1):
549 559 raise error.RevlogError(
550 560 _(b'revlog chunk cache size %r is not a power of 2')
551 561 % self._chunkcachesize
552 562 )
553 563
554 564 indexdata = b''
555 565 self._initempty = True
556 566 try:
557 567 with self._indexfp() as f:
558 568 if (
559 569 mmapindexthreshold is not None
560 570 and self.opener.fstat(f).st_size >= mmapindexthreshold
561 571 ):
562 572 # TODO: should .close() to release resources without
563 573 # relying on Python GC
564 574 indexdata = util.buffer(util.mmapread(f))
565 575 else:
566 576 indexdata = f.read()
567 577 if len(indexdata) > 0:
568 578 versionflags = versionformat_unpack(indexdata[:4])[0]
569 579 self._initempty = False
570 580 else:
571 581 versionflags = newversionflags
572 582 except IOError as inst:
573 583 if inst.errno != errno.ENOENT:
574 584 raise
575 585
576 586 versionflags = newversionflags
577 587
578 588 self.version = versionflags
579 589
580 590 flags = versionflags & ~0xFFFF
581 591 fmt = versionflags & 0xFFFF
582 592
583 593 if fmt == REVLOGV0:
584 594 if flags:
585 595 raise error.RevlogError(
586 596 _(b'unknown flags (%#04x) in version %d revlog %s')
587 597 % (flags >> 16, fmt, self.indexfile)
588 598 )
589 599
590 600 self._inline = False
591 601 self._generaldelta = False
592 602
593 603 elif fmt == REVLOGV1:
594 604 if flags & ~REVLOGV1_FLAGS:
595 605 raise error.RevlogError(
596 606 _(b'unknown flags (%#04x) in version %d revlog %s')
597 607 % (flags >> 16, fmt, self.indexfile)
598 608 )
599 609
600 610 self._inline = versionflags & FLAG_INLINE_DATA
601 611 self._generaldelta = versionflags & FLAG_GENERALDELTA
602 612
603 613 elif fmt == REVLOGV2:
604 614 if flags & ~REVLOGV2_FLAGS:
605 615 raise error.RevlogError(
606 616 _(b'unknown flags (%#04x) in version %d revlog %s')
607 617 % (flags >> 16, fmt, self.indexfile)
608 618 )
609 619
610 620 self._inline = versionflags & FLAG_INLINE_DATA
611 621 # generaldelta implied by version 2 revlogs.
612 622 self._generaldelta = True
613 623
614 624 else:
615 625 raise error.RevlogError(
616 626 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
617 627 )
618 628 # sparse-revlog can't be on without general-delta (issue6056)
619 629 if not self._generaldelta:
620 630 self._sparserevlog = False
621 631
622 632 self._storedeltachains = True
623 633
624 634 devel_nodemap = (
625 635 self.nodemap_file
626 636 and opts.get(b'devel-force-nodemap', False)
627 637 and NodemapRevlogIO is not None
628 638 )
629 639
630 640 use_rust_index = False
631 641 if rustrevlog is not None:
632 642 if self.nodemap_file is not None:
633 643 use_rust_index = True
634 644 else:
635 645 use_rust_index = self.opener.options.get(b'rust.index')
636 646
637 647 self._io = revlogio()
638 648 if self.version == REVLOGV0:
639 649 self._io = revlogoldio()
640 650 elif devel_nodemap:
641 651 self._io = NodemapRevlogIO()
642 652 elif use_rust_index:
643 653 self._io = rustrevlogio()
644 654 try:
645 655 d = self._io.parseindex(indexdata, self._inline)
646 656 index, _chunkcache = d
647 657 use_nodemap = (
648 658 not self._inline
649 659 and self.nodemap_file is not None
650 660 and util.safehasattr(index, 'update_nodemap_data')
651 661 )
652 662 if use_nodemap:
653 663 nodemap_data = nodemaputil.persisted_data(self)
654 664 if nodemap_data is not None:
655 665 docket = nodemap_data[0]
656 666 if (
657 667 len(d[0]) > docket.tip_rev
658 668 and d[0][docket.tip_rev][7] == docket.tip_node
659 669 ):
660 670 # no changelog tampering
661 671 self._nodemap_docket = docket
662 672 index.update_nodemap_data(*nodemap_data)
663 673 except (ValueError, IndexError):
664 674 raise error.RevlogError(
665 675 _(b"index %s is corrupted") % self.indexfile
666 676 )
667 677 self.index, self._chunkcache = d
668 678 if not self._chunkcache:
669 679 self._chunkclear()
670 680 # revnum -> (chain-length, sum-delta-length)
671 681 self._chaininfocache = util.lrucachedict(500)
672 682 # revlog header -> revlog compressor
673 683 self._decompressors = {}
674 684
675 685 @util.propertycache
676 686 def _compressor(self):
677 687 engine = util.compengines[self._compengine]
678 688 return engine.revlogcompressor(self._compengineopts)
679 689
680 690 def _indexfp(self, mode=b'r'):
681 691 """file object for the revlog's index file"""
682 692 args = {'mode': mode}
683 693 if mode != b'r':
684 694 args['checkambig'] = self._checkambig
685 695 if mode == b'w':
686 696 args['atomictemp'] = True
687 697 return self.opener(self.indexfile, **args)
688 698
689 699 def _datafp(self, mode=b'r'):
690 700 """file object for the revlog's data file"""
691 701 return self.opener(self.datafile, mode=mode)
692 702
693 703 @contextlib.contextmanager
694 704 def _datareadfp(self, existingfp=None):
695 705 """file object suitable to read data"""
696 706 # Use explicit file handle, if given.
697 707 if existingfp is not None:
698 708 yield existingfp
699 709
700 710 # Use a file handle being actively used for writes, if available.
701 711 # There is some danger to doing this because reads will seek the
702 712 # file. However, _writeentry() performs a SEEK_END before all writes,
703 713 # so we should be safe.
704 714 elif self._writinghandles:
705 715 if self._inline:
706 716 yield self._writinghandles[0]
707 717 else:
708 718 yield self._writinghandles[1]
709 719
710 720 # Otherwise open a new file handle.
711 721 else:
712 722 if self._inline:
713 723 func = self._indexfp
714 724 else:
715 725 func = self._datafp
716 726 with func() as fp:
717 727 yield fp
718 728
719 729 def tiprev(self):
720 730 return len(self.index) - 1
721 731
722 732 def tip(self):
723 733 return self.node(self.tiprev())
724 734
725 735 def __contains__(self, rev):
726 736 return 0 <= rev < len(self)
727 737
728 738 def __len__(self):
729 739 return len(self.index)
730 740
731 741 def __iter__(self):
732 742 return iter(pycompat.xrange(len(self)))
733 743
734 744 def revs(self, start=0, stop=None):
735 745 """iterate over all rev in this revlog (from start to stop)"""
736 746 return storageutil.iterrevs(len(self), start=start, stop=stop)
737 747
738 748 @property
739 749 def nodemap(self):
740 750 msg = (
741 751 b"revlog.nodemap is deprecated, "
742 752 b"use revlog.index.[has_node|rev|get_rev]"
743 753 )
744 754 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
745 755 return self.index.nodemap
746 756
747 757 @property
748 758 def _nodecache(self):
749 759 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
750 760 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
751 761 return self.index.nodemap
752 762
753 763 def hasnode(self, node):
754 764 try:
755 765 self.rev(node)
756 766 return True
757 767 except KeyError:
758 768 return False
759 769
760 770 def candelta(self, baserev, rev):
761 771 """whether two revisions (baserev, rev) can be delta-ed or not"""
762 772 # Disable delta if either rev requires a content-changing flag
763 773 # processor (ex. LFS). This is because such flag processor can alter
764 774 # the rawtext content that the delta will be based on, and two clients
765 775 # could have a same revlog node with different flags (i.e. different
766 776 # rawtext contents) and the delta could be incompatible.
767 777 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
768 778 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
769 779 ):
770 780 return False
771 781 return True
772 782
773 783 def update_caches(self, transaction):
774 784 if self.nodemap_file is not None:
775 785 if transaction is None:
776 786 nodemaputil.update_persistent_nodemap(self)
777 787 else:
778 788 nodemaputil.setup_persistent_nodemap(transaction, self)
779 789
780 790 def clearcaches(self):
781 791 self._revisioncache = None
782 792 self._chainbasecache.clear()
783 793 self._chunkcache = (0, b'')
784 794 self._pcache = {}
785 795 self._nodemap_docket = None
786 796 self.index.clearcaches()
787 797 # The python code is the one responsible for validating the docket, we
788 798 # end up having to refresh it here.
789 799 use_nodemap = (
790 800 not self._inline
791 801 and self.nodemap_file is not None
792 802 and util.safehasattr(self.index, 'update_nodemap_data')
793 803 )
794 804 if use_nodemap:
795 805 nodemap_data = nodemaputil.persisted_data(self)
796 806 if nodemap_data is not None:
797 807 self._nodemap_docket = nodemap_data[0]
798 808 self.index.update_nodemap_data(*nodemap_data)
799 809
800 810 def rev(self, node):
801 811 try:
802 812 return self.index.rev(node)
803 813 except TypeError:
804 814 raise
805 815 except error.RevlogError:
806 816 # parsers.c radix tree lookup failed
807 817 if node == wdirid or node in wdirfilenodeids:
808 818 raise error.WdirUnsupported
809 819 raise error.LookupError(node, self.indexfile, _(b'no node'))
810 820
811 821 # Accessors for index entries.
812 822
813 823 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
814 824 # are flags.
815 825 def start(self, rev):
816 826 return int(self.index[rev][0] >> 16)
817 827
818 828 def flags(self, rev):
819 829 return self.index[rev][0] & 0xFFFF
820 830
821 831 def length(self, rev):
822 832 return self.index[rev][1]
823 833
824 834 def rawsize(self, rev):
825 835 """return the length of the uncompressed text for a given revision"""
826 836 l = self.index[rev][2]
827 837 if l >= 0:
828 838 return l
829 839
830 840 t = self.rawdata(rev)
831 841 return len(t)
832 842
833 843 def size(self, rev):
834 844 """length of non-raw text (processed by a "read" flag processor)"""
835 845 # fast path: if no "read" flag processor could change the content,
836 846 # size is rawsize. note: ELLIPSIS is known to not change the content.
837 847 flags = self.flags(rev)
838 848 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
839 849 return self.rawsize(rev)
840 850
841 851 return len(self.revision(rev, raw=False))
842 852
843 853 def chainbase(self, rev):
844 854 base = self._chainbasecache.get(rev)
845 855 if base is not None:
846 856 return base
847 857
848 858 index = self.index
849 859 iterrev = rev
850 860 base = index[iterrev][3]
851 861 while base != iterrev:
852 862 iterrev = base
853 863 base = index[iterrev][3]
854 864
855 865 self._chainbasecache[rev] = base
856 866 return base
857 867
858 868 def linkrev(self, rev):
859 869 return self.index[rev][4]
860 870
861 871 def parentrevs(self, rev):
862 872 try:
863 873 entry = self.index[rev]
864 874 except IndexError:
865 875 if rev == wdirrev:
866 876 raise error.WdirUnsupported
867 877 raise
868 878
869 879 return entry[5], entry[6]
870 880
871 881 # fast parentrevs(rev) where rev isn't filtered
872 882 _uncheckedparentrevs = parentrevs
873 883
874 884 def node(self, rev):
875 885 try:
876 886 return self.index[rev][7]
877 887 except IndexError:
878 888 if rev == wdirrev:
879 889 raise error.WdirUnsupported
880 890 raise
881 891
882 892 # Derived from index values.
883 893
884 894 def end(self, rev):
885 895 return self.start(rev) + self.length(rev)
886 896
887 897 def parents(self, node):
888 898 i = self.index
889 899 d = i[self.rev(node)]
890 900 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
891 901
892 902 def chainlen(self, rev):
893 903 return self._chaininfo(rev)[0]
894 904
895 905 def _chaininfo(self, rev):
896 906 chaininfocache = self._chaininfocache
897 907 if rev in chaininfocache:
898 908 return chaininfocache[rev]
899 909 index = self.index
900 910 generaldelta = self._generaldelta
901 911 iterrev = rev
902 912 e = index[iterrev]
903 913 clen = 0
904 914 compresseddeltalen = 0
905 915 while iterrev != e[3]:
906 916 clen += 1
907 917 compresseddeltalen += e[1]
908 918 if generaldelta:
909 919 iterrev = e[3]
910 920 else:
911 921 iterrev -= 1
912 922 if iterrev in chaininfocache:
913 923 t = chaininfocache[iterrev]
914 924 clen += t[0]
915 925 compresseddeltalen += t[1]
916 926 break
917 927 e = index[iterrev]
918 928 else:
919 929 # Add text length of base since decompressing that also takes
920 930 # work. For cache hits the length is already included.
921 931 compresseddeltalen += e[1]
922 932 r = (clen, compresseddeltalen)
923 933 chaininfocache[rev] = r
924 934 return r
925 935
926 936 def _deltachain(self, rev, stoprev=None):
927 937 """Obtain the delta chain for a revision.
928 938
929 939 ``stoprev`` specifies a revision to stop at. If not specified, we
930 940 stop at the base of the chain.
931 941
932 942 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
933 943 revs in ascending order and ``stopped`` is a bool indicating whether
934 944 ``stoprev`` was hit.
935 945 """
936 946 # Try C implementation.
937 947 try:
938 948 return self.index.deltachain(rev, stoprev, self._generaldelta)
939 949 except AttributeError:
940 950 pass
941 951
942 952 chain = []
943 953
944 954 # Alias to prevent attribute lookup in tight loop.
945 955 index = self.index
946 956 generaldelta = self._generaldelta
947 957
948 958 iterrev = rev
949 959 e = index[iterrev]
950 960 while iterrev != e[3] and iterrev != stoprev:
951 961 chain.append(iterrev)
952 962 if generaldelta:
953 963 iterrev = e[3]
954 964 else:
955 965 iterrev -= 1
956 966 e = index[iterrev]
957 967
958 968 if iterrev == stoprev:
959 969 stopped = True
960 970 else:
961 971 chain.append(iterrev)
962 972 stopped = False
963 973
964 974 chain.reverse()
965 975 return chain, stopped
966 976
967 977 def ancestors(self, revs, stoprev=0, inclusive=False):
968 978 """Generate the ancestors of 'revs' in reverse revision order.
969 979 Does not generate revs lower than stoprev.
970 980
971 981 See the documentation for ancestor.lazyancestors for more details."""
972 982
973 983 # first, make sure start revisions aren't filtered
974 984 revs = list(revs)
975 985 checkrev = self.node
976 986 for r in revs:
977 987 checkrev(r)
978 988 # and we're sure ancestors aren't filtered as well
979 989
980 990 if rustancestor is not None:
981 991 lazyancestors = rustancestor.LazyAncestors
982 992 arg = self.index
983 993 else:
984 994 lazyancestors = ancestor.lazyancestors
985 995 arg = self._uncheckedparentrevs
986 996 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
987 997
988 998 def descendants(self, revs):
989 999 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
990 1000
991 1001 def findcommonmissing(self, common=None, heads=None):
992 1002 """Return a tuple of the ancestors of common and the ancestors of heads
993 1003 that are not ancestors of common. In revset terminology, we return the
994 1004 tuple:
995 1005
996 1006 ::common, (::heads) - (::common)
997 1007
998 1008 The list is sorted by revision number, meaning it is
999 1009 topologically sorted.
1000 1010
1001 1011 'heads' and 'common' are both lists of node IDs. If heads is
1002 1012 not supplied, uses all of the revlog's heads. If common is not
1003 1013 supplied, uses nullid."""
1004 1014 if common is None:
1005 1015 common = [nullid]
1006 1016 if heads is None:
1007 1017 heads = self.heads()
1008 1018
1009 1019 common = [self.rev(n) for n in common]
1010 1020 heads = [self.rev(n) for n in heads]
1011 1021
1012 1022 # we want the ancestors, but inclusive
1013 1023 class lazyset(object):
1014 1024 def __init__(self, lazyvalues):
1015 1025 self.addedvalues = set()
1016 1026 self.lazyvalues = lazyvalues
1017 1027
1018 1028 def __contains__(self, value):
1019 1029 return value in self.addedvalues or value in self.lazyvalues
1020 1030
1021 1031 def __iter__(self):
1022 1032 added = self.addedvalues
1023 1033 for r in added:
1024 1034 yield r
1025 1035 for r in self.lazyvalues:
1026 1036 if not r in added:
1027 1037 yield r
1028 1038
1029 1039 def add(self, value):
1030 1040 self.addedvalues.add(value)
1031 1041
1032 1042 def update(self, values):
1033 1043 self.addedvalues.update(values)
1034 1044
1035 1045 has = lazyset(self.ancestors(common))
1036 1046 has.add(nullrev)
1037 1047 has.update(common)
1038 1048
1039 1049 # take all ancestors from heads that aren't in has
1040 1050 missing = set()
1041 1051 visit = collections.deque(r for r in heads if r not in has)
1042 1052 while visit:
1043 1053 r = visit.popleft()
1044 1054 if r in missing:
1045 1055 continue
1046 1056 else:
1047 1057 missing.add(r)
1048 1058 for p in self.parentrevs(r):
1049 1059 if p not in has:
1050 1060 visit.append(p)
1051 1061 missing = list(missing)
1052 1062 missing.sort()
1053 1063 return has, [self.node(miss) for miss in missing]
1054 1064
1055 1065 def incrementalmissingrevs(self, common=None):
1056 1066 """Return an object that can be used to incrementally compute the
1057 1067 revision numbers of the ancestors of arbitrary sets that are not
1058 1068 ancestors of common. This is an ancestor.incrementalmissingancestors
1059 1069 object.
1060 1070
1061 1071 'common' is a list of revision numbers. If common is not supplied, uses
1062 1072 nullrev.
1063 1073 """
1064 1074 if common is None:
1065 1075 common = [nullrev]
1066 1076
1067 1077 if rustancestor is not None:
1068 1078 return rustancestor.MissingAncestors(self.index, common)
1069 1079 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1070 1080
1071 1081 def findmissingrevs(self, common=None, heads=None):
1072 1082 """Return the revision numbers of the ancestors of heads that
1073 1083 are not ancestors of common.
1074 1084
1075 1085 More specifically, return a list of revision numbers corresponding to
1076 1086 nodes N such that every N satisfies the following constraints:
1077 1087
1078 1088 1. N is an ancestor of some node in 'heads'
1079 1089 2. N is not an ancestor of any node in 'common'
1080 1090
1081 1091 The list is sorted by revision number, meaning it is
1082 1092 topologically sorted.
1083 1093
1084 1094 'heads' and 'common' are both lists of revision numbers. If heads is
1085 1095 not supplied, uses all of the revlog's heads. If common is not
1086 1096 supplied, uses nullid."""
1087 1097 if common is None:
1088 1098 common = [nullrev]
1089 1099 if heads is None:
1090 1100 heads = self.headrevs()
1091 1101
1092 1102 inc = self.incrementalmissingrevs(common=common)
1093 1103 return inc.missingancestors(heads)
1094 1104
1095 1105 def findmissing(self, common=None, heads=None):
1096 1106 """Return the ancestors of heads that are not ancestors of common.
1097 1107
1098 1108 More specifically, return a list of nodes N such that every N
1099 1109 satisfies the following constraints:
1100 1110
1101 1111 1. N is an ancestor of some node in 'heads'
1102 1112 2. N is not an ancestor of any node in 'common'
1103 1113
1104 1114 The list is sorted by revision number, meaning it is
1105 1115 topologically sorted.
1106 1116
1107 1117 'heads' and 'common' are both lists of node IDs. If heads is
1108 1118 not supplied, uses all of the revlog's heads. If common is not
1109 1119 supplied, uses nullid."""
1110 1120 if common is None:
1111 1121 common = [nullid]
1112 1122 if heads is None:
1113 1123 heads = self.heads()
1114 1124
1115 1125 common = [self.rev(n) for n in common]
1116 1126 heads = [self.rev(n) for n in heads]
1117 1127
1118 1128 inc = self.incrementalmissingrevs(common=common)
1119 1129 return [self.node(r) for r in inc.missingancestors(heads)]
1120 1130
1121 1131 def nodesbetween(self, roots=None, heads=None):
1122 1132 """Return a topological path from 'roots' to 'heads'.
1123 1133
1124 1134 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1125 1135 topologically sorted list of all nodes N that satisfy both of
1126 1136 these constraints:
1127 1137
1128 1138 1. N is a descendant of some node in 'roots'
1129 1139 2. N is an ancestor of some node in 'heads'
1130 1140
1131 1141 Every node is considered to be both a descendant and an ancestor
1132 1142 of itself, so every reachable node in 'roots' and 'heads' will be
1133 1143 included in 'nodes'.
1134 1144
1135 1145 'outroots' is the list of reachable nodes in 'roots', i.e., the
1136 1146 subset of 'roots' that is returned in 'nodes'. Likewise,
1137 1147 'outheads' is the subset of 'heads' that is also in 'nodes'.
1138 1148
1139 1149 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1140 1150 unspecified, uses nullid as the only root. If 'heads' is
1141 1151 unspecified, uses list of all of the revlog's heads."""
1142 1152 nonodes = ([], [], [])
1143 1153 if roots is not None:
1144 1154 roots = list(roots)
1145 1155 if not roots:
1146 1156 return nonodes
1147 1157 lowestrev = min([self.rev(n) for n in roots])
1148 1158 else:
1149 1159 roots = [nullid] # Everybody's a descendant of nullid
1150 1160 lowestrev = nullrev
1151 1161 if (lowestrev == nullrev) and (heads is None):
1152 1162 # We want _all_ the nodes!
1153 1163 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1154 1164 if heads is None:
1155 1165 # All nodes are ancestors, so the latest ancestor is the last
1156 1166 # node.
1157 1167 highestrev = len(self) - 1
1158 1168 # Set ancestors to None to signal that every node is an ancestor.
1159 1169 ancestors = None
1160 1170 # Set heads to an empty dictionary for later discovery of heads
1161 1171 heads = {}
1162 1172 else:
1163 1173 heads = list(heads)
1164 1174 if not heads:
1165 1175 return nonodes
1166 1176 ancestors = set()
1167 1177 # Turn heads into a dictionary so we can remove 'fake' heads.
1168 1178 # Also, later we will be using it to filter out the heads we can't
1169 1179 # find from roots.
1170 1180 heads = dict.fromkeys(heads, False)
1171 1181 # Start at the top and keep marking parents until we're done.
1172 1182 nodestotag = set(heads)
1173 1183 # Remember where the top was so we can use it as a limit later.
1174 1184 highestrev = max([self.rev(n) for n in nodestotag])
1175 1185 while nodestotag:
1176 1186 # grab a node to tag
1177 1187 n = nodestotag.pop()
1178 1188 # Never tag nullid
1179 1189 if n == nullid:
1180 1190 continue
1181 1191 # A node's revision number represents its place in a
1182 1192 # topologically sorted list of nodes.
1183 1193 r = self.rev(n)
1184 1194 if r >= lowestrev:
1185 1195 if n not in ancestors:
1186 1196 # If we are possibly a descendant of one of the roots
1187 1197 # and we haven't already been marked as an ancestor
1188 1198 ancestors.add(n) # Mark as ancestor
1189 1199 # Add non-nullid parents to list of nodes to tag.
1190 1200 nodestotag.update(
1191 1201 [p for p in self.parents(n) if p != nullid]
1192 1202 )
1193 1203 elif n in heads: # We've seen it before, is it a fake head?
1194 1204 # So it is, real heads should not be the ancestors of
1195 1205 # any other heads.
1196 1206 heads.pop(n)
1197 1207 if not ancestors:
1198 1208 return nonodes
1199 1209 # Now that we have our set of ancestors, we want to remove any
1200 1210 # roots that are not ancestors.
1201 1211
1202 1212 # If one of the roots was nullid, everything is included anyway.
1203 1213 if lowestrev > nullrev:
1204 1214 # But, since we weren't, let's recompute the lowest rev to not
1205 1215 # include roots that aren't ancestors.
1206 1216
1207 1217 # Filter out roots that aren't ancestors of heads
1208 1218 roots = [root for root in roots if root in ancestors]
1209 1219 # Recompute the lowest revision
1210 1220 if roots:
1211 1221 lowestrev = min([self.rev(root) for root in roots])
1212 1222 else:
1213 1223 # No more roots? Return empty list
1214 1224 return nonodes
1215 1225 else:
1216 1226 # We are descending from nullid, and don't need to care about
1217 1227 # any other roots.
1218 1228 lowestrev = nullrev
1219 1229 roots = [nullid]
1220 1230 # Transform our roots list into a set.
1221 1231 descendants = set(roots)
1222 1232 # Also, keep the original roots so we can filter out roots that aren't
1223 1233 # 'real' roots (i.e. are descended from other roots).
1224 1234 roots = descendants.copy()
1225 1235 # Our topologically sorted list of output nodes.
1226 1236 orderedout = []
1227 1237 # Don't start at nullid since we don't want nullid in our output list,
1228 1238 # and if nullid shows up in descendants, empty parents will look like
1229 1239 # they're descendants.
1230 1240 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1231 1241 n = self.node(r)
1232 1242 isdescendant = False
1233 1243 if lowestrev == nullrev: # Everybody is a descendant of nullid
1234 1244 isdescendant = True
1235 1245 elif n in descendants:
1236 1246 # n is already a descendant
1237 1247 isdescendant = True
1238 1248 # This check only needs to be done here because all the roots
1239 1249 # will start being marked is descendants before the loop.
1240 1250 if n in roots:
1241 1251 # If n was a root, check if it's a 'real' root.
1242 1252 p = tuple(self.parents(n))
1243 1253 # If any of its parents are descendants, it's not a root.
1244 1254 if (p[0] in descendants) or (p[1] in descendants):
1245 1255 roots.remove(n)
1246 1256 else:
1247 1257 p = tuple(self.parents(n))
1248 1258 # A node is a descendant if either of its parents are
1249 1259 # descendants. (We seeded the dependents list with the roots
1250 1260 # up there, remember?)
1251 1261 if (p[0] in descendants) or (p[1] in descendants):
1252 1262 descendants.add(n)
1253 1263 isdescendant = True
1254 1264 if isdescendant and ((ancestors is None) or (n in ancestors)):
1255 1265 # Only include nodes that are both descendants and ancestors.
1256 1266 orderedout.append(n)
1257 1267 if (ancestors is not None) and (n in heads):
1258 1268 # We're trying to figure out which heads are reachable
1259 1269 # from roots.
1260 1270 # Mark this head as having been reached
1261 1271 heads[n] = True
1262 1272 elif ancestors is None:
1263 1273 # Otherwise, we're trying to discover the heads.
1264 1274 # Assume this is a head because if it isn't, the next step
1265 1275 # will eventually remove it.
1266 1276 heads[n] = True
1267 1277 # But, obviously its parents aren't.
1268 1278 for p in self.parents(n):
1269 1279 heads.pop(p, None)
1270 1280 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1271 1281 roots = list(roots)
1272 1282 assert orderedout
1273 1283 assert roots
1274 1284 assert heads
1275 1285 return (orderedout, roots, heads)
1276 1286
1277 1287 def headrevs(self, revs=None):
1278 1288 if revs is None:
1279 1289 try:
1280 1290 return self.index.headrevs()
1281 1291 except AttributeError:
1282 1292 return self._headrevs()
1283 1293 if rustdagop is not None:
1284 1294 return rustdagop.headrevs(self.index, revs)
1285 1295 return dagop.headrevs(revs, self._uncheckedparentrevs)
1286 1296
1287 1297 def computephases(self, roots):
1288 1298 return self.index.computephasesmapsets(roots)
1289 1299
1290 1300 def _headrevs(self):
1291 1301 count = len(self)
1292 1302 if not count:
1293 1303 return [nullrev]
1294 1304 # we won't iter over filtered rev so nobody is a head at start
1295 1305 ishead = [0] * (count + 1)
1296 1306 index = self.index
1297 1307 for r in self:
1298 1308 ishead[r] = 1 # I may be an head
1299 1309 e = index[r]
1300 1310 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1301 1311 return [r for r, val in enumerate(ishead) if val]
1302 1312
1303 1313 def heads(self, start=None, stop=None):
1304 1314 """return the list of all nodes that have no children
1305 1315
1306 1316 if start is specified, only heads that are descendants of
1307 1317 start will be returned
1308 1318 if stop is specified, it will consider all the revs from stop
1309 1319 as if they had no children
1310 1320 """
1311 1321 if start is None and stop is None:
1312 1322 if not len(self):
1313 1323 return [nullid]
1314 1324 return [self.node(r) for r in self.headrevs()]
1315 1325
1316 1326 if start is None:
1317 1327 start = nullrev
1318 1328 else:
1319 1329 start = self.rev(start)
1320 1330
1321 1331 stoprevs = {self.rev(n) for n in stop or []}
1322 1332
1323 1333 revs = dagop.headrevssubset(
1324 1334 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1325 1335 )
1326 1336
1327 1337 return [self.node(rev) for rev in revs]
1328 1338
1329 1339 def children(self, node):
1330 1340 """find the children of a given node"""
1331 1341 c = []
1332 1342 p = self.rev(node)
1333 1343 for r in self.revs(start=p + 1):
1334 1344 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1335 1345 if prevs:
1336 1346 for pr in prevs:
1337 1347 if pr == p:
1338 1348 c.append(self.node(r))
1339 1349 elif p == nullrev:
1340 1350 c.append(self.node(r))
1341 1351 return c
1342 1352
1343 1353 def commonancestorsheads(self, a, b):
1344 1354 """calculate all the heads of the common ancestors of nodes a and b"""
1345 1355 a, b = self.rev(a), self.rev(b)
1346 1356 ancs = self._commonancestorsheads(a, b)
1347 1357 return pycompat.maplist(self.node, ancs)
1348 1358
1349 1359 def _commonancestorsheads(self, *revs):
1350 1360 """calculate all the heads of the common ancestors of revs"""
1351 1361 try:
1352 1362 ancs = self.index.commonancestorsheads(*revs)
1353 1363 except (AttributeError, OverflowError): # C implementation failed
1354 1364 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1355 1365 return ancs
1356 1366
1357 1367 def isancestor(self, a, b):
1358 1368 """return True if node a is an ancestor of node b
1359 1369
1360 1370 A revision is considered an ancestor of itself."""
1361 1371 a, b = self.rev(a), self.rev(b)
1362 1372 return self.isancestorrev(a, b)
1363 1373
1364 1374 def isancestorrev(self, a, b):
1365 1375 """return True if revision a is an ancestor of revision b
1366 1376
1367 1377 A revision is considered an ancestor of itself.
1368 1378
1369 1379 The implementation of this is trivial but the use of
1370 1380 reachableroots is not."""
1371 1381 if a == nullrev:
1372 1382 return True
1373 1383 elif a == b:
1374 1384 return True
1375 1385 elif a > b:
1376 1386 return False
1377 1387 return bool(self.reachableroots(a, [b], [a], includepath=False))
1378 1388
1379 1389 def reachableroots(self, minroot, heads, roots, includepath=False):
1380 1390 """return (heads(::(<roots> and <roots>::<heads>)))
1381 1391
1382 1392 If includepath is True, return (<roots>::<heads>)."""
1383 1393 try:
1384 1394 return self.index.reachableroots2(
1385 1395 minroot, heads, roots, includepath
1386 1396 )
1387 1397 except AttributeError:
1388 1398 return dagop._reachablerootspure(
1389 1399 self.parentrevs, minroot, roots, heads, includepath
1390 1400 )
1391 1401
1392 1402 def ancestor(self, a, b):
1393 1403 """calculate the "best" common ancestor of nodes a and b"""
1394 1404
1395 1405 a, b = self.rev(a), self.rev(b)
1396 1406 try:
1397 1407 ancs = self.index.ancestors(a, b)
1398 1408 except (AttributeError, OverflowError):
1399 1409 ancs = ancestor.ancestors(self.parentrevs, a, b)
1400 1410 if ancs:
1401 1411 # choose a consistent winner when there's a tie
1402 1412 return min(map(self.node, ancs))
1403 1413 return nullid
1404 1414
1405 1415 def _match(self, id):
1406 1416 if isinstance(id, int):
1407 1417 # rev
1408 1418 return self.node(id)
1409 1419 if len(id) == 20:
1410 1420 # possibly a binary node
1411 1421 # odds of a binary node being all hex in ASCII are 1 in 10**25
1412 1422 try:
1413 1423 node = id
1414 1424 self.rev(node) # quick search the index
1415 1425 return node
1416 1426 except error.LookupError:
1417 1427 pass # may be partial hex id
1418 1428 try:
1419 1429 # str(rev)
1420 1430 rev = int(id)
1421 1431 if b"%d" % rev != id:
1422 1432 raise ValueError
1423 1433 if rev < 0:
1424 1434 rev = len(self) + rev
1425 1435 if rev < 0 or rev >= len(self):
1426 1436 raise ValueError
1427 1437 return self.node(rev)
1428 1438 except (ValueError, OverflowError):
1429 1439 pass
1430 1440 if len(id) == 40:
1431 1441 try:
1432 1442 # a full hex nodeid?
1433 1443 node = bin(id)
1434 1444 self.rev(node)
1435 1445 return node
1436 1446 except (TypeError, error.LookupError):
1437 1447 pass
1438 1448
1439 1449 def _partialmatch(self, id):
1440 1450 # we don't care wdirfilenodeids as they should be always full hash
1441 1451 maybewdir = wdirhex.startswith(id)
1442 1452 try:
1443 1453 partial = self.index.partialmatch(id)
1444 1454 if partial and self.hasnode(partial):
1445 1455 if maybewdir:
1446 1456 # single 'ff...' match in radix tree, ambiguous with wdir
1447 1457 raise error.RevlogError
1448 1458 return partial
1449 1459 if maybewdir:
1450 1460 # no 'ff...' match in radix tree, wdir identified
1451 1461 raise error.WdirUnsupported
1452 1462 return None
1453 1463 except error.RevlogError:
1454 1464 # parsers.c radix tree lookup gave multiple matches
1455 1465 # fast path: for unfiltered changelog, radix tree is accurate
1456 1466 if not getattr(self, 'filteredrevs', None):
1457 1467 raise error.AmbiguousPrefixLookupError(
1458 1468 id, self.indexfile, _(b'ambiguous identifier')
1459 1469 )
1460 1470 # fall through to slow path that filters hidden revisions
1461 1471 except (AttributeError, ValueError):
1462 1472 # we are pure python, or key was too short to search radix tree
1463 1473 pass
1464 1474
1465 1475 if id in self._pcache:
1466 1476 return self._pcache[id]
1467 1477
1468 1478 if len(id) <= 40:
1469 1479 try:
1470 1480 # hex(node)[:...]
1471 1481 l = len(id) // 2 # grab an even number of digits
1472 1482 prefix = bin(id[: l * 2])
1473 1483 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1474 1484 nl = [
1475 1485 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1476 1486 ]
1477 1487 if nullhex.startswith(id):
1478 1488 nl.append(nullid)
1479 1489 if len(nl) > 0:
1480 1490 if len(nl) == 1 and not maybewdir:
1481 1491 self._pcache[id] = nl[0]
1482 1492 return nl[0]
1483 1493 raise error.AmbiguousPrefixLookupError(
1484 1494 id, self.indexfile, _(b'ambiguous identifier')
1485 1495 )
1486 1496 if maybewdir:
1487 1497 raise error.WdirUnsupported
1488 1498 return None
1489 1499 except TypeError:
1490 1500 pass
1491 1501
1492 1502 def lookup(self, id):
1493 1503 """locate a node based on:
1494 1504 - revision number or str(revision number)
1495 1505 - nodeid or subset of hex nodeid
1496 1506 """
1497 1507 n = self._match(id)
1498 1508 if n is not None:
1499 1509 return n
1500 1510 n = self._partialmatch(id)
1501 1511 if n:
1502 1512 return n
1503 1513
1504 1514 raise error.LookupError(id, self.indexfile, _(b'no match found'))
1505 1515
1506 1516 def shortest(self, node, minlength=1):
1507 1517 """Find the shortest unambiguous prefix that matches node."""
1508 1518
1509 1519 def isvalid(prefix):
1510 1520 try:
1511 1521 matchednode = self._partialmatch(prefix)
1512 1522 except error.AmbiguousPrefixLookupError:
1513 1523 return False
1514 1524 except error.WdirUnsupported:
1515 1525 # single 'ff...' match
1516 1526 return True
1517 1527 if matchednode is None:
1518 1528 raise error.LookupError(node, self.indexfile, _(b'no node'))
1519 1529 return True
1520 1530
1521 1531 def maybewdir(prefix):
1522 1532 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1523 1533
1524 1534 hexnode = hex(node)
1525 1535
1526 1536 def disambiguate(hexnode, minlength):
1527 1537 """Disambiguate against wdirid."""
1528 1538 for length in range(minlength, len(hexnode) + 1):
1529 1539 prefix = hexnode[:length]
1530 1540 if not maybewdir(prefix):
1531 1541 return prefix
1532 1542
1533 1543 if not getattr(self, 'filteredrevs', None):
1534 1544 try:
1535 1545 length = max(self.index.shortest(node), minlength)
1536 1546 return disambiguate(hexnode, length)
1537 1547 except error.RevlogError:
1538 1548 if node != wdirid:
1539 1549 raise error.LookupError(node, self.indexfile, _(b'no node'))
1540 1550 except AttributeError:
1541 1551 # Fall through to pure code
1542 1552 pass
1543 1553
1544 1554 if node == wdirid:
1545 1555 for length in range(minlength, len(hexnode) + 1):
1546 1556 prefix = hexnode[:length]
1547 1557 if isvalid(prefix):
1548 1558 return prefix
1549 1559
1550 1560 for length in range(minlength, len(hexnode) + 1):
1551 1561 prefix = hexnode[:length]
1552 1562 if isvalid(prefix):
1553 1563 return disambiguate(hexnode, length)
1554 1564
1555 1565 def cmp(self, node, text):
1556 1566 """compare text with a given file revision
1557 1567
1558 1568 returns True if text is different than what is stored.
1559 1569 """
1560 1570 p1, p2 = self.parents(node)
1561 1571 return storageutil.hashrevisionsha1(text, p1, p2) != node
1562 1572
1563 1573 def _cachesegment(self, offset, data):
1564 1574 """Add a segment to the revlog cache.
1565 1575
1566 1576 Accepts an absolute offset and the data that is at that location.
1567 1577 """
1568 1578 o, d = self._chunkcache
1569 1579 # try to add to existing cache
1570 1580 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1571 1581 self._chunkcache = o, d + data
1572 1582 else:
1573 1583 self._chunkcache = offset, data
1574 1584
1575 1585 def _readsegment(self, offset, length, df=None):
1576 1586 """Load a segment of raw data from the revlog.
1577 1587
1578 1588 Accepts an absolute offset, length to read, and an optional existing
1579 1589 file handle to read from.
1580 1590
1581 1591 If an existing file handle is passed, it will be seeked and the
1582 1592 original seek position will NOT be restored.
1583 1593
1584 1594 Returns a str or buffer of raw byte data.
1585 1595
1586 1596 Raises if the requested number of bytes could not be read.
1587 1597 """
1588 1598 # Cache data both forward and backward around the requested
1589 1599 # data, in a fixed size window. This helps speed up operations
1590 1600 # involving reading the revlog backwards.
1591 1601 cachesize = self._chunkcachesize
1592 1602 realoffset = offset & ~(cachesize - 1)
1593 1603 reallength = (
1594 1604 (offset + length + cachesize) & ~(cachesize - 1)
1595 1605 ) - realoffset
1596 1606 with self._datareadfp(df) as df:
1597 1607 df.seek(realoffset)
1598 1608 d = df.read(reallength)
1599 1609
1600 1610 self._cachesegment(realoffset, d)
1601 1611 if offset != realoffset or reallength != length:
1602 1612 startoffset = offset - realoffset
1603 1613 if len(d) - startoffset < length:
1604 1614 raise error.RevlogError(
1605 1615 _(
1606 1616 b'partial read of revlog %s; expected %d bytes from '
1607 1617 b'offset %d, got %d'
1608 1618 )
1609 1619 % (
1610 1620 self.indexfile if self._inline else self.datafile,
1611 1621 length,
1612 1622 realoffset,
1613 1623 len(d) - startoffset,
1614 1624 )
1615 1625 )
1616 1626
1617 1627 return util.buffer(d, startoffset, length)
1618 1628
1619 1629 if len(d) < length:
1620 1630 raise error.RevlogError(
1621 1631 _(
1622 1632 b'partial read of revlog %s; expected %d bytes from offset '
1623 1633 b'%d, got %d'
1624 1634 )
1625 1635 % (
1626 1636 self.indexfile if self._inline else self.datafile,
1627 1637 length,
1628 1638 offset,
1629 1639 len(d),
1630 1640 )
1631 1641 )
1632 1642
1633 1643 return d
1634 1644
1635 1645 def _getsegment(self, offset, length, df=None):
1636 1646 """Obtain a segment of raw data from the revlog.
1637 1647
1638 1648 Accepts an absolute offset, length of bytes to obtain, and an
1639 1649 optional file handle to the already-opened revlog. If the file
1640 1650 handle is used, it's original seek position will not be preserved.
1641 1651
1642 1652 Requests for data may be returned from a cache.
1643 1653
1644 1654 Returns a str or a buffer instance of raw byte data.
1645 1655 """
1646 1656 o, d = self._chunkcache
1647 1657 l = len(d)
1648 1658
1649 1659 # is it in the cache?
1650 1660 cachestart = offset - o
1651 1661 cacheend = cachestart + length
1652 1662 if cachestart >= 0 and cacheend <= l:
1653 1663 if cachestart == 0 and cacheend == l:
1654 1664 return d # avoid a copy
1655 1665 return util.buffer(d, cachestart, cacheend - cachestart)
1656 1666
1657 1667 return self._readsegment(offset, length, df=df)
1658 1668
1659 1669 def _getsegmentforrevs(self, startrev, endrev, df=None):
1660 1670 """Obtain a segment of raw data corresponding to a range of revisions.
1661 1671
1662 1672 Accepts the start and end revisions and an optional already-open
1663 1673 file handle to be used for reading. If the file handle is read, its
1664 1674 seek position will not be preserved.
1665 1675
1666 1676 Requests for data may be satisfied by a cache.
1667 1677
1668 1678 Returns a 2-tuple of (offset, data) for the requested range of
1669 1679 revisions. Offset is the integer offset from the beginning of the
1670 1680 revlog and data is a str or buffer of the raw byte data.
1671 1681
1672 1682 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1673 1683 to determine where each revision's data begins and ends.
1674 1684 """
1675 1685 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1676 1686 # (functions are expensive).
1677 1687 index = self.index
1678 1688 istart = index[startrev]
1679 1689 start = int(istart[0] >> 16)
1680 1690 if startrev == endrev:
1681 1691 end = start + istart[1]
1682 1692 else:
1683 1693 iend = index[endrev]
1684 1694 end = int(iend[0] >> 16) + iend[1]
1685 1695
1686 1696 if self._inline:
1687 1697 start += (startrev + 1) * self._io.size
1688 1698 end += (endrev + 1) * self._io.size
1689 1699 length = end - start
1690 1700
1691 1701 return start, self._getsegment(start, length, df=df)
1692 1702
1693 1703 def _chunk(self, rev, df=None):
1694 1704 """Obtain a single decompressed chunk for a revision.
1695 1705
1696 1706 Accepts an integer revision and an optional already-open file handle
1697 1707 to be used for reading. If used, the seek position of the file will not
1698 1708 be preserved.
1699 1709
1700 1710 Returns a str holding uncompressed data for the requested revision.
1701 1711 """
1702 1712 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1703 1713
1704 1714 def _chunks(self, revs, df=None, targetsize=None):
1705 1715 """Obtain decompressed chunks for the specified revisions.
1706 1716
1707 1717 Accepts an iterable of numeric revisions that are assumed to be in
1708 1718 ascending order. Also accepts an optional already-open file handle
1709 1719 to be used for reading. If used, the seek position of the file will
1710 1720 not be preserved.
1711 1721
1712 1722 This function is similar to calling ``self._chunk()`` multiple times,
1713 1723 but is faster.
1714 1724
1715 1725 Returns a list with decompressed data for each requested revision.
1716 1726 """
1717 1727 if not revs:
1718 1728 return []
1719 1729 start = self.start
1720 1730 length = self.length
1721 1731 inline = self._inline
1722 1732 iosize = self._io.size
1723 1733 buffer = util.buffer
1724 1734
1725 1735 l = []
1726 1736 ladd = l.append
1727 1737
1728 1738 if not self._withsparseread:
1729 1739 slicedchunks = (revs,)
1730 1740 else:
1731 1741 slicedchunks = deltautil.slicechunk(
1732 1742 self, revs, targetsize=targetsize
1733 1743 )
1734 1744
1735 1745 for revschunk in slicedchunks:
1736 1746 firstrev = revschunk[0]
1737 1747 # Skip trailing revisions with empty diff
1738 1748 for lastrev in revschunk[::-1]:
1739 1749 if length(lastrev) != 0:
1740 1750 break
1741 1751
1742 1752 try:
1743 1753 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1744 1754 except OverflowError:
1745 1755 # issue4215 - we can't cache a run of chunks greater than
1746 1756 # 2G on Windows
1747 1757 return [self._chunk(rev, df=df) for rev in revschunk]
1748 1758
1749 1759 decomp = self.decompress
1750 1760 for rev in revschunk:
1751 1761 chunkstart = start(rev)
1752 1762 if inline:
1753 1763 chunkstart += (rev + 1) * iosize
1754 1764 chunklength = length(rev)
1755 1765 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1756 1766
1757 1767 return l
1758 1768
1759 1769 def _chunkclear(self):
1760 1770 """Clear the raw chunk cache."""
1761 1771 self._chunkcache = (0, b'')
1762 1772
1763 1773 def deltaparent(self, rev):
1764 1774 """return deltaparent of the given revision"""
1765 1775 base = self.index[rev][3]
1766 1776 if base == rev:
1767 1777 return nullrev
1768 1778 elif self._generaldelta:
1769 1779 return base
1770 1780 else:
1771 1781 return rev - 1
1772 1782
1773 1783 def issnapshot(self, rev):
1774 1784 """tells whether rev is a snapshot"""
1775 1785 if not self._sparserevlog:
1776 1786 return self.deltaparent(rev) == nullrev
1777 1787 elif util.safehasattr(self.index, b'issnapshot'):
1778 1788 # directly assign the method to cache the testing and access
1779 1789 self.issnapshot = self.index.issnapshot
1780 1790 return self.issnapshot(rev)
1781 1791 if rev == nullrev:
1782 1792 return True
1783 1793 entry = self.index[rev]
1784 1794 base = entry[3]
1785 1795 if base == rev:
1786 1796 return True
1787 1797 if base == nullrev:
1788 1798 return True
1789 1799 p1 = entry[5]
1790 1800 p2 = entry[6]
1791 1801 if base == p1 or base == p2:
1792 1802 return False
1793 1803 return self.issnapshot(base)
1794 1804
1795 1805 def snapshotdepth(self, rev):
1796 1806 """number of snapshot in the chain before this one"""
1797 1807 if not self.issnapshot(rev):
1798 1808 raise error.ProgrammingError(b'revision %d not a snapshot')
1799 1809 return len(self._deltachain(rev)[0]) - 1
1800 1810
1801 1811 def revdiff(self, rev1, rev2):
1802 1812 """return or calculate a delta between two revisions
1803 1813
1804 1814 The delta calculated is in binary form and is intended to be written to
1805 1815 revlog data directly. So this function needs raw revision data.
1806 1816 """
1807 1817 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1808 1818 return bytes(self._chunk(rev2))
1809 1819
1810 1820 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1811 1821
1812 1822 def _processflags(self, text, flags, operation, raw=False):
1813 1823 """deprecated entry point to access flag processors"""
1814 1824 msg = b'_processflag(...) use the specialized variant'
1815 1825 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1816 1826 if raw:
1817 1827 return text, flagutil.processflagsraw(self, text, flags)
1818 1828 elif operation == b'read':
1819 1829 return flagutil.processflagsread(self, text, flags)
1820 1830 else: # write operation
1821 1831 return flagutil.processflagswrite(self, text, flags, None)
1822 1832
1823 1833 def revision(self, nodeorrev, _df=None, raw=False):
1824 1834 """return an uncompressed revision of a given node or revision
1825 1835 number.
1826 1836
1827 1837 _df - an existing file handle to read from. (internal-only)
1828 1838 raw - an optional argument specifying if the revision data is to be
1829 1839 treated as raw data when applying flag transforms. 'raw' should be set
1830 1840 to True when generating changegroups or in debug commands.
1831 1841 """
1832 1842 if raw:
1833 1843 msg = (
1834 1844 b'revlog.revision(..., raw=True) is deprecated, '
1835 1845 b'use revlog.rawdata(...)'
1836 1846 )
1837 1847 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1838 1848 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1839 1849
1840 1850 def sidedata(self, nodeorrev, _df=None):
1841 1851 """a map of extra data related to the changeset but not part of the hash
1842 1852
1843 1853 This function currently return a dictionary. However, more advanced
1844 1854 mapping object will likely be used in the future for a more
1845 1855 efficient/lazy code.
1846 1856 """
1847 1857 return self._revisiondata(nodeorrev, _df)[1]
1848 1858
1849 1859 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1850 1860 # deal with <nodeorrev> argument type
1851 1861 if isinstance(nodeorrev, int):
1852 1862 rev = nodeorrev
1853 1863 node = self.node(rev)
1854 1864 else:
1855 1865 node = nodeorrev
1856 1866 rev = None
1857 1867
1858 1868 # fast path the special `nullid` rev
1859 1869 if node == nullid:
1860 1870 return b"", {}
1861 1871
1862 1872 # ``rawtext`` is the text as stored inside the revlog. Might be the
1863 1873 # revision or might need to be processed to retrieve the revision.
1864 1874 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1865 1875
1866 1876 if raw and validated:
1867 1877 # if we don't want to process the raw text and that raw
1868 1878 # text is cached, we can exit early.
1869 1879 return rawtext, {}
1870 1880 if rev is None:
1871 1881 rev = self.rev(node)
1872 1882 # the revlog's flag for this revision
1873 1883 # (usually alter its state or content)
1874 1884 flags = self.flags(rev)
1875 1885
1876 1886 if validated and flags == REVIDX_DEFAULT_FLAGS:
1877 1887 # no extra flags set, no flag processor runs, text = rawtext
1878 1888 return rawtext, {}
1879 1889
1880 1890 sidedata = {}
1881 1891 if raw:
1882 1892 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1883 1893 text = rawtext
1884 1894 else:
1885 1895 try:
1886 1896 r = flagutil.processflagsread(self, rawtext, flags)
1887 1897 except error.SidedataHashError as exc:
1888 1898 msg = _(b"integrity check failed on %s:%s sidedata key %d")
1889 1899 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
1890 1900 raise error.RevlogError(msg)
1891 1901 text, validatehash, sidedata = r
1892 1902 if validatehash:
1893 1903 self.checkhash(text, node, rev=rev)
1894 1904 if not validated:
1895 1905 self._revisioncache = (node, rev, rawtext)
1896 1906
1897 1907 return text, sidedata
1898 1908
1899 1909 def _rawtext(self, node, rev, _df=None):
1900 1910 """return the possibly unvalidated rawtext for a revision
1901 1911
1902 1912 returns (rev, rawtext, validated)
1903 1913 """
1904 1914
1905 1915 # revision in the cache (could be useful to apply delta)
1906 1916 cachedrev = None
1907 1917 # An intermediate text to apply deltas to
1908 1918 basetext = None
1909 1919
1910 1920 # Check if we have the entry in cache
1911 1921 # The cache entry looks like (node, rev, rawtext)
1912 1922 if self._revisioncache:
1913 1923 if self._revisioncache[0] == node:
1914 1924 return (rev, self._revisioncache[2], True)
1915 1925 cachedrev = self._revisioncache[1]
1916 1926
1917 1927 if rev is None:
1918 1928 rev = self.rev(node)
1919 1929
1920 1930 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1921 1931 if stopped:
1922 1932 basetext = self._revisioncache[2]
1923 1933
1924 1934 # drop cache to save memory, the caller is expected to
1925 1935 # update self._revisioncache after validating the text
1926 1936 self._revisioncache = None
1927 1937
1928 1938 targetsize = None
1929 1939 rawsize = self.index[rev][2]
1930 1940 if 0 <= rawsize:
1931 1941 targetsize = 4 * rawsize
1932 1942
1933 1943 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1934 1944 if basetext is None:
1935 1945 basetext = bytes(bins[0])
1936 1946 bins = bins[1:]
1937 1947
1938 1948 rawtext = mdiff.patches(basetext, bins)
1939 1949 del basetext # let us have a chance to free memory early
1940 1950 return (rev, rawtext, False)
1941 1951
1942 1952 def rawdata(self, nodeorrev, _df=None):
1943 1953 """return an uncompressed raw data of a given node or revision number.
1944 1954
1945 1955 _df - an existing file handle to read from. (internal-only)
1946 1956 """
1947 1957 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1948 1958
1949 1959 def hash(self, text, p1, p2):
1950 1960 """Compute a node hash.
1951 1961
1952 1962 Available as a function so that subclasses can replace the hash
1953 1963 as needed.
1954 1964 """
1955 1965 return storageutil.hashrevisionsha1(text, p1, p2)
1956 1966
1957 1967 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1958 1968 """Check node hash integrity.
1959 1969
1960 1970 Available as a function so that subclasses can extend hash mismatch
1961 1971 behaviors as needed.
1962 1972 """
1963 1973 try:
1964 1974 if p1 is None and p2 is None:
1965 1975 p1, p2 = self.parents(node)
1966 1976 if node != self.hash(text, p1, p2):
1967 1977 # Clear the revision cache on hash failure. The revision cache
1968 1978 # only stores the raw revision and clearing the cache does have
1969 1979 # the side-effect that we won't have a cache hit when the raw
1970 1980 # revision data is accessed. But this case should be rare and
1971 1981 # it is extra work to teach the cache about the hash
1972 1982 # verification state.
1973 1983 if self._revisioncache and self._revisioncache[0] == node:
1974 1984 self._revisioncache = None
1975 1985
1976 1986 revornode = rev
1977 1987 if revornode is None:
1978 1988 revornode = templatefilters.short(hex(node))
1979 1989 raise error.RevlogError(
1980 1990 _(b"integrity check failed on %s:%s")
1981 1991 % (self.indexfile, pycompat.bytestr(revornode))
1982 1992 )
1983 1993 except error.RevlogError:
1984 1994 if self._censorable and storageutil.iscensoredtext(text):
1985 1995 raise error.CensoredNodeError(self.indexfile, node, text)
1986 1996 raise
1987 1997
1988 1998 def _enforceinlinesize(self, tr, fp=None):
1989 1999 """Check if the revlog is too big for inline and convert if so.
1990 2000
1991 2001 This should be called after revisions are added to the revlog. If the
1992 2002 revlog has grown too large to be an inline revlog, it will convert it
1993 2003 to use multiple index and data files.
1994 2004 """
1995 2005 tiprev = len(self) - 1
1996 2006 if (
1997 2007 not self._inline
1998 2008 or (self.start(tiprev) + self.length(tiprev)) < _maxinline
1999 2009 ):
2000 2010 return
2001 2011
2002 2012 troffset = tr.findoffset(self.indexfile)
2003 2013 if troffset is None:
2004 2014 raise error.RevlogError(
2005 2015 _(b"%s not found in the transaction") % self.indexfile
2006 2016 )
2007 2017 trindex = 0
2008 2018 tr.add(self.datafile, 0)
2009 2019
2010 2020 if fp:
2011 2021 fp.flush()
2012 2022 fp.close()
2013 2023 # We can't use the cached file handle after close(). So prevent
2014 2024 # its usage.
2015 2025 self._writinghandles = None
2016 2026
2017 2027 with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
2018 2028 for r in self:
2019 2029 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
2020 2030 if troffset <= self.start(r):
2021 2031 trindex = r
2022 2032
2023 2033 with self._indexfp(b'w') as fp:
2024 2034 self.version &= ~FLAG_INLINE_DATA
2025 2035 self._inline = False
2026 2036 io = self._io
2027 2037 for i in self:
2028 2038 e = io.packentry(self.index[i], self.node, self.version, i)
2029 2039 fp.write(e)
2030 2040
2031 2041 # the temp file replace the real index when we exit the context
2032 2042 # manager
2033 2043
2034 2044 tr.replace(self.indexfile, trindex * self._io.size)
2035 2045 nodemaputil.setup_persistent_nodemap(tr, self)
2036 2046 self._chunkclear()
2037 2047
2038 2048 def _nodeduplicatecallback(self, transaction, node):
2039 2049 """called when trying to add a node already stored."""
2040 2050
2041 2051 def addrevision(
2042 2052 self,
2043 2053 text,
2044 2054 transaction,
2045 2055 link,
2046 2056 p1,
2047 2057 p2,
2048 2058 cachedelta=None,
2049 2059 node=None,
2050 2060 flags=REVIDX_DEFAULT_FLAGS,
2051 2061 deltacomputer=None,
2052 2062 sidedata=None,
2053 2063 ):
2054 2064 """add a revision to the log
2055 2065
2056 2066 text - the revision data to add
2057 2067 transaction - the transaction object used for rollback
2058 2068 link - the linkrev data to add
2059 2069 p1, p2 - the parent nodeids of the revision
2060 2070 cachedelta - an optional precomputed delta
2061 2071 node - nodeid of revision; typically node is not specified, and it is
2062 2072 computed by default as hash(text, p1, p2), however subclasses might
2063 2073 use different hashing method (and override checkhash() in such case)
2064 2074 flags - the known flags to set on the revision
2065 2075 deltacomputer - an optional deltacomputer instance shared between
2066 2076 multiple calls
2067 2077 """
2068 2078 if link == nullrev:
2069 2079 raise error.RevlogError(
2070 2080 _(b"attempted to add linkrev -1 to %s") % self.indexfile
2071 2081 )
2072 2082
2073 2083 if sidedata is None:
2074 2084 sidedata = {}
2075 2085 flags = flags & ~REVIDX_SIDEDATA
2076 2086 elif not self.hassidedata:
2077 2087 raise error.ProgrammingError(
2078 2088 _(b"trying to add sidedata to a revlog who don't support them")
2079 2089 )
2080 2090 else:
2081 2091 flags |= REVIDX_SIDEDATA
2082 2092
2083 2093 if flags:
2084 2094 node = node or self.hash(text, p1, p2)
2085 2095
2086 2096 rawtext, validatehash = flagutil.processflagswrite(
2087 2097 self, text, flags, sidedata=sidedata
2088 2098 )
2089 2099
2090 2100 # If the flag processor modifies the revision data, ignore any provided
2091 2101 # cachedelta.
2092 2102 if rawtext != text:
2093 2103 cachedelta = None
2094 2104
2095 2105 if len(rawtext) > _maxentrysize:
2096 2106 raise error.RevlogError(
2097 2107 _(
2098 2108 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2099 2109 )
2100 2110 % (self.indexfile, len(rawtext))
2101 2111 )
2102 2112
2103 2113 node = node or self.hash(rawtext, p1, p2)
2104 2114 if self.index.has_node(node):
2105 2115 return node
2106 2116
2107 2117 if validatehash:
2108 2118 self.checkhash(rawtext, node, p1=p1, p2=p2)
2109 2119
2110 2120 return self.addrawrevision(
2111 2121 rawtext,
2112 2122 transaction,
2113 2123 link,
2114 2124 p1,
2115 2125 p2,
2116 2126 node,
2117 2127 flags,
2118 2128 cachedelta=cachedelta,
2119 2129 deltacomputer=deltacomputer,
2120 2130 )
2121 2131
2122 2132 def addrawrevision(
2123 2133 self,
2124 2134 rawtext,
2125 2135 transaction,
2126 2136 link,
2127 2137 p1,
2128 2138 p2,
2129 2139 node,
2130 2140 flags,
2131 2141 cachedelta=None,
2132 2142 deltacomputer=None,
2133 2143 ):
2134 2144 """add a raw revision with known flags, node and parents
2135 2145 useful when reusing a revision not stored in this revlog (ex: received
2136 2146 over wire, or read from an external bundle).
2137 2147 """
2138 2148 dfh = None
2139 2149 if not self._inline:
2140 2150 dfh = self._datafp(b"a+")
2141 2151 ifh = self._indexfp(b"a+")
2142 2152 try:
2143 2153 return self._addrevision(
2144 2154 node,
2145 2155 rawtext,
2146 2156 transaction,
2147 2157 link,
2148 2158 p1,
2149 2159 p2,
2150 2160 flags,
2151 2161 cachedelta,
2152 2162 ifh,
2153 2163 dfh,
2154 2164 deltacomputer=deltacomputer,
2155 2165 )
2156 2166 finally:
2157 2167 if dfh:
2158 2168 dfh.close()
2159 2169 ifh.close()
2160 2170
2161 2171 def compress(self, data):
2162 2172 """Generate a possibly-compressed representation of data."""
2163 2173 if not data:
2164 2174 return b'', data
2165 2175
2166 2176 compressed = self._compressor.compress(data)
2167 2177
2168 2178 if compressed:
2169 2179 # The revlog compressor added the header in the returned data.
2170 2180 return b'', compressed
2171 2181
2172 2182 if data[0:1] == b'\0':
2173 2183 return b'', data
2174 2184 return b'u', data
2175 2185
2176 2186 def decompress(self, data):
2177 2187 """Decompress a revlog chunk.
2178 2188
2179 2189 The chunk is expected to begin with a header identifying the
2180 2190 format type so it can be routed to an appropriate decompressor.
2181 2191 """
2182 2192 if not data:
2183 2193 return data
2184 2194
2185 2195 # Revlogs are read much more frequently than they are written and many
2186 2196 # chunks only take microseconds to decompress, so performance is
2187 2197 # important here.
2188 2198 #
2189 2199 # We can make a few assumptions about revlogs:
2190 2200 #
2191 2201 # 1) the majority of chunks will be compressed (as opposed to inline
2192 2202 # raw data).
2193 2203 # 2) decompressing *any* data will likely by at least 10x slower than
2194 2204 # returning raw inline data.
2195 2205 # 3) we want to prioritize common and officially supported compression
2196 2206 # engines
2197 2207 #
2198 2208 # It follows that we want to optimize for "decompress compressed data
2199 2209 # when encoded with common and officially supported compression engines"
2200 2210 # case over "raw data" and "data encoded by less common or non-official
2201 2211 # compression engines." That is why we have the inline lookup first
2202 2212 # followed by the compengines lookup.
2203 2213 #
2204 2214 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2205 2215 # compressed chunks. And this matters for changelog and manifest reads.
2206 2216 t = data[0:1]
2207 2217
2208 2218 if t == b'x':
2209 2219 try:
2210 2220 return _zlibdecompress(data)
2211 2221 except zlib.error as e:
2212 2222 raise error.RevlogError(
2213 2223 _(b'revlog decompress error: %s')
2214 2224 % stringutil.forcebytestr(e)
2215 2225 )
2216 2226 # '\0' is more common than 'u' so it goes first.
2217 2227 elif t == b'\0':
2218 2228 return data
2219 2229 elif t == b'u':
2220 2230 return util.buffer(data, 1)
2221 2231
2222 2232 try:
2223 2233 compressor = self._decompressors[t]
2224 2234 except KeyError:
2225 2235 try:
2226 2236 engine = util.compengines.forrevlogheader(t)
2227 2237 compressor = engine.revlogcompressor(self._compengineopts)
2228 2238 self._decompressors[t] = compressor
2229 2239 except KeyError:
2230 2240 raise error.RevlogError(_(b'unknown compression type %r') % t)
2231 2241
2232 2242 return compressor.decompress(data)
2233 2243
2234 2244 def _addrevision(
2235 2245 self,
2236 2246 node,
2237 2247 rawtext,
2238 2248 transaction,
2239 2249 link,
2240 2250 p1,
2241 2251 p2,
2242 2252 flags,
2243 2253 cachedelta,
2244 2254 ifh,
2245 2255 dfh,
2246 2256 alwayscache=False,
2247 2257 deltacomputer=None,
2248 2258 ):
2249 2259 """internal function to add revisions to the log
2250 2260
2251 2261 see addrevision for argument descriptions.
2252 2262
2253 2263 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2254 2264
2255 2265 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2256 2266 be used.
2257 2267
2258 2268 invariants:
2259 2269 - rawtext is optional (can be None); if not set, cachedelta must be set.
2260 2270 if both are set, they must correspond to each other.
2261 2271 """
2262 2272 if node == nullid:
2263 2273 raise error.RevlogError(
2264 2274 _(b"%s: attempt to add null revision") % self.indexfile
2265 2275 )
2266 2276 if node == wdirid or node in wdirfilenodeids:
2267 2277 raise error.RevlogError(
2268 2278 _(b"%s: attempt to add wdir revision") % self.indexfile
2269 2279 )
2270 2280
2271 2281 if self._inline:
2272 2282 fh = ifh
2273 2283 else:
2274 2284 fh = dfh
2275 2285
2276 2286 btext = [rawtext]
2277 2287
2278 2288 curr = len(self)
2279 2289 prev = curr - 1
2280 2290 offset = self.end(prev)
2281 2291 p1r, p2r = self.rev(p1), self.rev(p2)
2282 2292
2283 2293 # full versions are inserted when the needed deltas
2284 2294 # become comparable to the uncompressed text
2285 2295 if rawtext is None:
2286 2296 # need rawtext size, before changed by flag processors, which is
2287 2297 # the non-raw size. use revlog explicitly to avoid filelog's extra
2288 2298 # logic that might remove metadata size.
2289 2299 textlen = mdiff.patchedsize(
2290 2300 revlog.size(self, cachedelta[0]), cachedelta[1]
2291 2301 )
2292 2302 else:
2293 2303 textlen = len(rawtext)
2294 2304
2295 2305 if deltacomputer is None:
2296 2306 deltacomputer = deltautil.deltacomputer(self)
2297 2307
2298 2308 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2299 2309
2300 2310 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2301 2311
2302 2312 e = (
2303 2313 offset_type(offset, flags),
2304 2314 deltainfo.deltalen,
2305 2315 textlen,
2306 2316 deltainfo.base,
2307 2317 link,
2308 2318 p1r,
2309 2319 p2r,
2310 2320 node,
2311 2321 )
2312 2322 self.index.append(e)
2313 2323
2314 2324 entry = self._io.packentry(e, self.node, self.version, curr)
2315 2325 self._writeentry(
2316 2326 transaction, ifh, dfh, entry, deltainfo.data, link, offset
2317 2327 )
2318 2328
2319 2329 rawtext = btext[0]
2320 2330
2321 2331 if alwayscache and rawtext is None:
2322 2332 rawtext = deltacomputer.buildtext(revinfo, fh)
2323 2333
2324 2334 if type(rawtext) == bytes: # only accept immutable objects
2325 2335 self._revisioncache = (node, curr, rawtext)
2326 2336 self._chainbasecache[curr] = deltainfo.chainbase
2327 2337 return node
2328 2338
2329 2339 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2330 2340 # Files opened in a+ mode have inconsistent behavior on various
2331 2341 # platforms. Windows requires that a file positioning call be made
2332 2342 # when the file handle transitions between reads and writes. See
2333 2343 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2334 2344 # platforms, Python or the platform itself can be buggy. Some versions
2335 2345 # of Solaris have been observed to not append at the end of the file
2336 2346 # if the file was seeked to before the end. See issue4943 for more.
2337 2347 #
2338 2348 # We work around this issue by inserting a seek() before writing.
2339 2349 # Note: This is likely not necessary on Python 3. However, because
2340 2350 # the file handle is reused for reads and may be seeked there, we need
2341 2351 # to be careful before changing this.
2342 2352 ifh.seek(0, os.SEEK_END)
2343 2353 if dfh:
2344 2354 dfh.seek(0, os.SEEK_END)
2345 2355
2346 2356 curr = len(self) - 1
2347 2357 if not self._inline:
2348 2358 transaction.add(self.datafile, offset)
2349 2359 transaction.add(self.indexfile, curr * len(entry))
2350 2360 if data[0]:
2351 2361 dfh.write(data[0])
2352 2362 dfh.write(data[1])
2353 2363 ifh.write(entry)
2354 2364 else:
2355 2365 offset += curr * self._io.size
2356 2366 transaction.add(self.indexfile, offset)
2357 2367 ifh.write(entry)
2358 2368 ifh.write(data[0])
2359 2369 ifh.write(data[1])
2360 2370 self._enforceinlinesize(transaction, ifh)
2361 2371 nodemaputil.setup_persistent_nodemap(transaction, self)
2362 2372
2363 2373 def addgroup(
2364 2374 self,
2365 2375 deltas,
2366 2376 linkmapper,
2367 2377 transaction,
2368 2378 addrevisioncb=None,
2369 2379 duplicaterevisioncb=None,
2370 2380 ):
2371 2381 """
2372 2382 add a delta group
2373 2383
2374 2384 given a set of deltas, add them to the revision log. the
2375 2385 first delta is against its parent, which should be in our
2376 2386 log, the rest are against the previous delta.
2377 2387
2378 2388 If ``addrevisioncb`` is defined, it will be called with arguments of
2379 2389 this revlog and the node that was added.
2380 2390 """
2381 2391
2382 2392 if self._writinghandles:
2383 2393 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2384 2394
2385 2395 r = len(self)
2386 2396 end = 0
2387 2397 if r:
2388 2398 end = self.end(r - 1)
2389 2399 ifh = self._indexfp(b"a+")
2390 2400 isize = r * self._io.size
2391 2401 if self._inline:
2392 2402 transaction.add(self.indexfile, end + isize)
2393 2403 dfh = None
2394 2404 else:
2395 2405 transaction.add(self.indexfile, isize)
2396 2406 transaction.add(self.datafile, end)
2397 2407 dfh = self._datafp(b"a+")
2398 2408
2399 2409 def flush():
2400 2410 if dfh:
2401 2411 dfh.flush()
2402 2412 ifh.flush()
2403 2413
2404 2414 self._writinghandles = (ifh, dfh)
2405 2415 empty = True
2406 2416
2407 2417 try:
2408 2418 deltacomputer = deltautil.deltacomputer(self)
2409 2419 # loop through our set of deltas
2410 2420 for data in deltas:
2411 2421 node, p1, p2, linknode, deltabase, delta, flags = data
2412 2422 link = linkmapper(linknode)
2413 2423 flags = flags or REVIDX_DEFAULT_FLAGS
2414 2424
2415 2425 if self.index.has_node(node):
2416 2426 # this can happen if two branches make the same change
2417 2427 self._nodeduplicatecallback(transaction, node)
2418 2428 if duplicaterevisioncb:
2419 2429 duplicaterevisioncb(self, node)
2420 2430 empty = False
2421 2431 continue
2422 2432
2423 2433 for p in (p1, p2):
2424 2434 if not self.index.has_node(p):
2425 2435 raise error.LookupError(
2426 2436 p, self.indexfile, _(b'unknown parent')
2427 2437 )
2428 2438
2429 2439 if not self.index.has_node(deltabase):
2430 2440 raise error.LookupError(
2431 2441 deltabase, self.indexfile, _(b'unknown delta base')
2432 2442 )
2433 2443
2434 2444 baserev = self.rev(deltabase)
2435 2445
2436 2446 if baserev != nullrev and self.iscensored(baserev):
2437 2447 # if base is censored, delta must be full replacement in a
2438 2448 # single patch operation
2439 2449 hlen = struct.calcsize(b">lll")
2440 2450 oldlen = self.rawsize(baserev)
2441 2451 newlen = len(delta) - hlen
2442 2452 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2443 2453 raise error.CensoredBaseError(
2444 2454 self.indexfile, self.node(baserev)
2445 2455 )
2446 2456
2447 2457 if not flags and self._peek_iscensored(baserev, delta, flush):
2448 2458 flags |= REVIDX_ISCENSORED
2449 2459
2450 2460 # We assume consumers of addrevisioncb will want to retrieve
2451 2461 # the added revision, which will require a call to
2452 2462 # revision(). revision() will fast path if there is a cache
2453 2463 # hit. So, we tell _addrevision() to always cache in this case.
2454 2464 # We're only using addgroup() in the context of changegroup
2455 2465 # generation so the revision data can always be handled as raw
2456 2466 # by the flagprocessor.
2457 2467 self._addrevision(
2458 2468 node,
2459 2469 None,
2460 2470 transaction,
2461 2471 link,
2462 2472 p1,
2463 2473 p2,
2464 2474 flags,
2465 2475 (baserev, delta),
2466 2476 ifh,
2467 2477 dfh,
2468 2478 alwayscache=bool(addrevisioncb),
2469 2479 deltacomputer=deltacomputer,
2470 2480 )
2471 2481
2472 2482 if addrevisioncb:
2473 2483 addrevisioncb(self, node)
2474 2484 empty = False
2475 2485
2476 2486 if not dfh and not self._inline:
2477 2487 # addrevision switched from inline to conventional
2478 2488 # reopen the index
2479 2489 ifh.close()
2480 2490 dfh = self._datafp(b"a+")
2481 2491 ifh = self._indexfp(b"a+")
2482 2492 self._writinghandles = (ifh, dfh)
2483 2493 finally:
2484 2494 self._writinghandles = None
2485 2495
2486 2496 if dfh:
2487 2497 dfh.close()
2488 2498 ifh.close()
2489 2499 return not empty
2490 2500
2491 2501 def iscensored(self, rev):
2492 2502 """Check if a file revision is censored."""
2493 2503 if not self._censorable:
2494 2504 return False
2495 2505
2496 2506 return self.flags(rev) & REVIDX_ISCENSORED
2497 2507
2498 2508 def _peek_iscensored(self, baserev, delta, flush):
2499 2509 """Quickly check if a delta produces a censored revision."""
2500 2510 if not self._censorable:
2501 2511 return False
2502 2512
2503 2513 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2504 2514
2505 2515 def getstrippoint(self, minlink):
2506 2516 """find the minimum rev that must be stripped to strip the linkrev
2507 2517
2508 2518 Returns a tuple containing the minimum rev and a set of all revs that
2509 2519 have linkrevs that will be broken by this strip.
2510 2520 """
2511 2521 return storageutil.resolvestripinfo(
2512 2522 minlink,
2513 2523 len(self) - 1,
2514 2524 self.headrevs(),
2515 2525 self.linkrev,
2516 2526 self.parentrevs,
2517 2527 )
2518 2528
2519 2529 def strip(self, minlink, transaction):
2520 2530 """truncate the revlog on the first revision with a linkrev >= minlink
2521 2531
2522 2532 This function is called when we're stripping revision minlink and
2523 2533 its descendants from the repository.
2524 2534
2525 2535 We have to remove all revisions with linkrev >= minlink, because
2526 2536 the equivalent changelog revisions will be renumbered after the
2527 2537 strip.
2528 2538
2529 2539 So we truncate the revlog on the first of these revisions, and
2530 2540 trust that the caller has saved the revisions that shouldn't be
2531 2541 removed and that it'll re-add them after this truncation.
2532 2542 """
2533 2543 if len(self) == 0:
2534 2544 return
2535 2545
2536 2546 rev, _ = self.getstrippoint(minlink)
2537 2547 if rev == len(self):
2538 2548 return
2539 2549
2540 2550 # first truncate the files on disk
2541 2551 end = self.start(rev)
2542 2552 if not self._inline:
2543 2553 transaction.add(self.datafile, end)
2544 2554 end = rev * self._io.size
2545 2555 else:
2546 2556 end += rev * self._io.size
2547 2557
2548 2558 transaction.add(self.indexfile, end)
2549 2559
2550 2560 # then reset internal state in memory to forget those revisions
2551 2561 self._revisioncache = None
2552 2562 self._chaininfocache = util.lrucachedict(500)
2553 2563 self._chunkclear()
2554 2564
2555 2565 del self.index[rev:-1]
2556 2566
2557 2567 def checksize(self):
2558 2568 """Check size of index and data files
2559 2569
2560 2570 return a (dd, di) tuple.
2561 2571 - dd: extra bytes for the "data" file
2562 2572 - di: extra bytes for the "index" file
2563 2573
2564 2574 A healthy revlog will return (0, 0).
2565 2575 """
2566 2576 expected = 0
2567 2577 if len(self):
2568 2578 expected = max(0, self.end(len(self) - 1))
2569 2579
2570 2580 try:
2571 2581 with self._datafp() as f:
2572 2582 f.seek(0, io.SEEK_END)
2573 2583 actual = f.tell()
2574 2584 dd = actual - expected
2575 2585 except IOError as inst:
2576 2586 if inst.errno != errno.ENOENT:
2577 2587 raise
2578 2588 dd = 0
2579 2589
2580 2590 try:
2581 2591 f = self.opener(self.indexfile)
2582 2592 f.seek(0, io.SEEK_END)
2583 2593 actual = f.tell()
2584 2594 f.close()
2585 2595 s = self._io.size
2586 2596 i = max(0, actual // s)
2587 2597 di = actual - (i * s)
2588 2598 if self._inline:
2589 2599 databytes = 0
2590 2600 for r in self:
2591 2601 databytes += max(0, self.length(r))
2592 2602 dd = 0
2593 2603 di = actual - len(self) * s - databytes
2594 2604 except IOError as inst:
2595 2605 if inst.errno != errno.ENOENT:
2596 2606 raise
2597 2607 di = 0
2598 2608
2599 2609 return (dd, di)
2600 2610
2601 2611 def files(self):
2602 2612 res = [self.indexfile]
2603 2613 if not self._inline:
2604 2614 res.append(self.datafile)
2605 2615 return res
2606 2616
2607 2617 def emitrevisions(
2608 2618 self,
2609 2619 nodes,
2610 2620 nodesorder=None,
2611 2621 revisiondata=False,
2612 2622 assumehaveparentrevisions=False,
2613 2623 deltamode=repository.CG_DELTAMODE_STD,
2614 2624 ):
2615 2625 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2616 2626 raise error.ProgrammingError(
2617 2627 b'unhandled value for nodesorder: %s' % nodesorder
2618 2628 )
2619 2629
2620 2630 if nodesorder is None and not self._generaldelta:
2621 2631 nodesorder = b'storage'
2622 2632
2623 2633 if (
2624 2634 not self._storedeltachains
2625 2635 and deltamode != repository.CG_DELTAMODE_PREV
2626 2636 ):
2627 2637 deltamode = repository.CG_DELTAMODE_FULL
2628 2638
2629 2639 return storageutil.emitrevisions(
2630 2640 self,
2631 2641 nodes,
2632 2642 nodesorder,
2633 2643 revlogrevisiondelta,
2634 2644 deltaparentfn=self.deltaparent,
2635 2645 candeltafn=self.candelta,
2636 2646 rawsizefn=self.rawsize,
2637 2647 revdifffn=self.revdiff,
2638 2648 flagsfn=self.flags,
2639 2649 deltamode=deltamode,
2640 2650 revisiondata=revisiondata,
2641 2651 assumehaveparentrevisions=assumehaveparentrevisions,
2642 2652 )
2643 2653
2644 2654 DELTAREUSEALWAYS = b'always'
2645 2655 DELTAREUSESAMEREVS = b'samerevs'
2646 2656 DELTAREUSENEVER = b'never'
2647 2657
2648 2658 DELTAREUSEFULLADD = b'fulladd'
2649 2659
2650 2660 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2651 2661
2652 2662 def clone(
2653 2663 self,
2654 2664 tr,
2655 2665 destrevlog,
2656 2666 addrevisioncb=None,
2657 2667 deltareuse=DELTAREUSESAMEREVS,
2658 2668 forcedeltabothparents=None,
2659 2669 sidedatacompanion=None,
2660 2670 ):
2661 2671 """Copy this revlog to another, possibly with format changes.
2662 2672
2663 2673 The destination revlog will contain the same revisions and nodes.
2664 2674 However, it may not be bit-for-bit identical due to e.g. delta encoding
2665 2675 differences.
2666 2676
2667 2677 The ``deltareuse`` argument control how deltas from the existing revlog
2668 2678 are preserved in the destination revlog. The argument can have the
2669 2679 following values:
2670 2680
2671 2681 DELTAREUSEALWAYS
2672 2682 Deltas will always be reused (if possible), even if the destination
2673 2683 revlog would not select the same revisions for the delta. This is the
2674 2684 fastest mode of operation.
2675 2685 DELTAREUSESAMEREVS
2676 2686 Deltas will be reused if the destination revlog would pick the same
2677 2687 revisions for the delta. This mode strikes a balance between speed
2678 2688 and optimization.
2679 2689 DELTAREUSENEVER
2680 2690 Deltas will never be reused. This is the slowest mode of execution.
2681 2691 This mode can be used to recompute deltas (e.g. if the diff/delta
2682 2692 algorithm changes).
2683 2693 DELTAREUSEFULLADD
2684 2694 Revision will be re-added as if their were new content. This is
2685 2695 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2686 2696 eg: large file detection and handling.
2687 2697
2688 2698 Delta computation can be slow, so the choice of delta reuse policy can
2689 2699 significantly affect run time.
2690 2700
2691 2701 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2692 2702 two extremes. Deltas will be reused if they are appropriate. But if the
2693 2703 delta could choose a better revision, it will do so. This means if you
2694 2704 are converting a non-generaldelta revlog to a generaldelta revlog,
2695 2705 deltas will be recomputed if the delta's parent isn't a parent of the
2696 2706 revision.
2697 2707
2698 2708 In addition to the delta policy, the ``forcedeltabothparents``
2699 2709 argument controls whether to force compute deltas against both parents
2700 2710 for merges. By default, the current default is used.
2701 2711
2702 2712 If not None, the `sidedatacompanion` is callable that accept two
2703 2713 arguments:
2704 2714
2705 2715 (srcrevlog, rev)
2706 2716
2707 2717 and return a quintet that control changes to sidedata content from the
2708 2718 old revision to the new clone result:
2709 2719
2710 2720 (dropall, filterout, update, new_flags, dropped_flags)
2711 2721
2712 2722 * if `dropall` is True, all sidedata should be dropped
2713 2723 * `filterout` is a set of sidedata keys that should be dropped
2714 2724 * `update` is a mapping of additionnal/new key -> value
2715 2725 * new_flags is a bitfields of new flags that the revision should get
2716 2726 * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
2717 2727 """
2718 2728 if deltareuse not in self.DELTAREUSEALL:
2719 2729 raise ValueError(
2720 2730 _(b'value for deltareuse invalid: %s') % deltareuse
2721 2731 )
2722 2732
2723 2733 if len(destrevlog):
2724 2734 raise ValueError(_(b'destination revlog is not empty'))
2725 2735
2726 2736 if getattr(self, 'filteredrevs', None):
2727 2737 raise ValueError(_(b'source revlog has filtered revisions'))
2728 2738 if getattr(destrevlog, 'filteredrevs', None):
2729 2739 raise ValueError(_(b'destination revlog has filtered revisions'))
2730 2740
2731 2741 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2732 2742 # if possible.
2733 2743 oldlazydelta = destrevlog._lazydelta
2734 2744 oldlazydeltabase = destrevlog._lazydeltabase
2735 2745 oldamd = destrevlog._deltabothparents
2736 2746
2737 2747 try:
2738 2748 if deltareuse == self.DELTAREUSEALWAYS:
2739 2749 destrevlog._lazydeltabase = True
2740 2750 destrevlog._lazydelta = True
2741 2751 elif deltareuse == self.DELTAREUSESAMEREVS:
2742 2752 destrevlog._lazydeltabase = False
2743 2753 destrevlog._lazydelta = True
2744 2754 elif deltareuse == self.DELTAREUSENEVER:
2745 2755 destrevlog._lazydeltabase = False
2746 2756 destrevlog._lazydelta = False
2747 2757
2748 2758 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2749 2759
2750 2760 self._clone(
2751 2761 tr,
2752 2762 destrevlog,
2753 2763 addrevisioncb,
2754 2764 deltareuse,
2755 2765 forcedeltabothparents,
2756 2766 sidedatacompanion,
2757 2767 )
2758 2768
2759 2769 finally:
2760 2770 destrevlog._lazydelta = oldlazydelta
2761 2771 destrevlog._lazydeltabase = oldlazydeltabase
2762 2772 destrevlog._deltabothparents = oldamd
2763 2773
2764 2774 def _clone(
2765 2775 self,
2766 2776 tr,
2767 2777 destrevlog,
2768 2778 addrevisioncb,
2769 2779 deltareuse,
2770 2780 forcedeltabothparents,
2771 2781 sidedatacompanion,
2772 2782 ):
2773 2783 """perform the core duty of `revlog.clone` after parameter processing"""
2774 2784 deltacomputer = deltautil.deltacomputer(destrevlog)
2775 2785 index = self.index
2776 2786 for rev in self:
2777 2787 entry = index[rev]
2778 2788
2779 2789 # Some classes override linkrev to take filtered revs into
2780 2790 # account. Use raw entry from index.
2781 2791 flags = entry[0] & 0xFFFF
2782 2792 linkrev = entry[4]
2783 2793 p1 = index[entry[5]][7]
2784 2794 p2 = index[entry[6]][7]
2785 2795 node = entry[7]
2786 2796
2787 2797 sidedataactions = (False, [], {}, 0, 0)
2788 2798 if sidedatacompanion is not None:
2789 2799 sidedataactions = sidedatacompanion(self, rev)
2790 2800
2791 2801 # (Possibly) reuse the delta from the revlog if allowed and
2792 2802 # the revlog chunk is a delta.
2793 2803 cachedelta = None
2794 2804 rawtext = None
2795 2805 if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
2796 2806 dropall = sidedataactions[0]
2797 2807 filterout = sidedataactions[1]
2798 2808 update = sidedataactions[2]
2799 2809 new_flags = sidedataactions[3]
2800 2810 dropped_flags = sidedataactions[4]
2801 2811 text, sidedata = self._revisiondata(rev)
2802 2812 if dropall:
2803 2813 sidedata = {}
2804 2814 for key in filterout:
2805 2815 sidedata.pop(key, None)
2806 2816 sidedata.update(update)
2807 2817 if not sidedata:
2808 2818 sidedata = None
2809 2819
2810 2820 flags |= new_flags
2811 2821 flags &= ~dropped_flags
2812 2822
2813 2823 destrevlog.addrevision(
2814 2824 text,
2815 2825 tr,
2816 2826 linkrev,
2817 2827 p1,
2818 2828 p2,
2819 2829 cachedelta=cachedelta,
2820 2830 node=node,
2821 2831 flags=flags,
2822 2832 deltacomputer=deltacomputer,
2823 2833 sidedata=sidedata,
2824 2834 )
2825 2835 else:
2826 2836 if destrevlog._lazydelta:
2827 2837 dp = self.deltaparent(rev)
2828 2838 if dp != nullrev:
2829 2839 cachedelta = (dp, bytes(self._chunk(rev)))
2830 2840
2831 2841 if not cachedelta:
2832 2842 rawtext = self.rawdata(rev)
2833 2843
2834 2844 ifh = destrevlog.opener(
2835 2845 destrevlog.indexfile, b'a+', checkambig=False
2836 2846 )
2837 2847 dfh = None
2838 2848 if not destrevlog._inline:
2839 2849 dfh = destrevlog.opener(destrevlog.datafile, b'a+')
2840 2850 try:
2841 2851 destrevlog._addrevision(
2842 2852 node,
2843 2853 rawtext,
2844 2854 tr,
2845 2855 linkrev,
2846 2856 p1,
2847 2857 p2,
2848 2858 flags,
2849 2859 cachedelta,
2850 2860 ifh,
2851 2861 dfh,
2852 2862 deltacomputer=deltacomputer,
2853 2863 )
2854 2864 finally:
2855 2865 if dfh:
2856 2866 dfh.close()
2857 2867 ifh.close()
2858 2868
2859 2869 if addrevisioncb:
2860 2870 addrevisioncb(self, rev, node)
2861 2871
2862 2872 def censorrevision(self, tr, censornode, tombstone=b''):
2863 2873 if (self.version & 0xFFFF) == REVLOGV0:
2864 2874 raise error.RevlogError(
2865 2875 _(b'cannot censor with version %d revlogs') % self.version
2866 2876 )
2867 2877
2868 2878 censorrev = self.rev(censornode)
2869 2879 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2870 2880
2871 2881 if len(tombstone) > self.rawsize(censorrev):
2872 2882 raise error.Abort(
2873 2883 _(b'censor tombstone must be no longer than censored data')
2874 2884 )
2875 2885
2876 2886 # Rewriting the revlog in place is hard. Our strategy for censoring is
2877 2887 # to create a new revlog, copy all revisions to it, then replace the
2878 2888 # revlogs on transaction close.
2879 2889
2880 2890 newindexfile = self.indexfile + b'.tmpcensored'
2881 2891 newdatafile = self.datafile + b'.tmpcensored'
2882 2892
2883 2893 # This is a bit dangerous. We could easily have a mismatch of state.
2884 2894 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
2885 2895 newrl.version = self.version
2886 2896 newrl._generaldelta = self._generaldelta
2887 2897 newrl._io = self._io
2888 2898
2889 2899 for rev in self.revs():
2890 2900 node = self.node(rev)
2891 2901 p1, p2 = self.parents(node)
2892 2902
2893 2903 if rev == censorrev:
2894 2904 newrl.addrawrevision(
2895 2905 tombstone,
2896 2906 tr,
2897 2907 self.linkrev(censorrev),
2898 2908 p1,
2899 2909 p2,
2900 2910 censornode,
2901 2911 REVIDX_ISCENSORED,
2902 2912 )
2903 2913
2904 2914 if newrl.deltaparent(rev) != nullrev:
2905 2915 raise error.Abort(
2906 2916 _(
2907 2917 b'censored revision stored as delta; '
2908 2918 b'cannot censor'
2909 2919 ),
2910 2920 hint=_(
2911 2921 b'censoring of revlogs is not '
2912 2922 b'fully implemented; please report '
2913 2923 b'this bug'
2914 2924 ),
2915 2925 )
2916 2926 continue
2917 2927
2918 2928 if self.iscensored(rev):
2919 2929 if self.deltaparent(rev) != nullrev:
2920 2930 raise error.Abort(
2921 2931 _(
2922 2932 b'cannot censor due to censored '
2923 2933 b'revision having delta stored'
2924 2934 )
2925 2935 )
2926 2936 rawtext = self._chunk(rev)
2927 2937 else:
2928 2938 rawtext = self.rawdata(rev)
2929 2939
2930 2940 newrl.addrawrevision(
2931 2941 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2932 2942 )
2933 2943
2934 2944 tr.addbackup(self.indexfile, location=b'store')
2935 2945 if not self._inline:
2936 2946 tr.addbackup(self.datafile, location=b'store')
2937 2947
2938 2948 self.opener.rename(newrl.indexfile, self.indexfile)
2939 2949 if not self._inline:
2940 2950 self.opener.rename(newrl.datafile, self.datafile)
2941 2951
2942 2952 self.clearcaches()
2943 2953 self._loadindex()
2944 2954
2945 2955 def verifyintegrity(self, state):
2946 2956 """Verifies the integrity of the revlog.
2947 2957
2948 2958 Yields ``revlogproblem`` instances describing problems that are
2949 2959 found.
2950 2960 """
2951 2961 dd, di = self.checksize()
2952 2962 if dd:
2953 2963 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
2954 2964 if di:
2955 2965 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
2956 2966
2957 2967 version = self.version & 0xFFFF
2958 2968
2959 2969 # The verifier tells us what version revlog we should be.
2960 2970 if version != state[b'expectedversion']:
2961 2971 yield revlogproblem(
2962 2972 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
2963 2973 % (self.indexfile, version, state[b'expectedversion'])
2964 2974 )
2965 2975
2966 2976 state[b'skipread'] = set()
2967 2977 state[b'safe_renamed'] = set()
2968 2978
2969 2979 for rev in self:
2970 2980 node = self.node(rev)
2971 2981
2972 2982 # Verify contents. 4 cases to care about:
2973 2983 #
2974 2984 # common: the most common case
2975 2985 # rename: with a rename
2976 2986 # meta: file content starts with b'\1\n', the metadata
2977 2987 # header defined in filelog.py, but without a rename
2978 2988 # ext: content stored externally
2979 2989 #
2980 2990 # More formally, their differences are shown below:
2981 2991 #
2982 2992 # | common | rename | meta | ext
2983 2993 # -------------------------------------------------------
2984 2994 # flags() | 0 | 0 | 0 | not 0
2985 2995 # renamed() | False | True | False | ?
2986 2996 # rawtext[0:2]=='\1\n'| False | True | True | ?
2987 2997 #
2988 2998 # "rawtext" means the raw text stored in revlog data, which
2989 2999 # could be retrieved by "rawdata(rev)". "text"
2990 3000 # mentioned below is "revision(rev)".
2991 3001 #
2992 3002 # There are 3 different lengths stored physically:
2993 3003 # 1. L1: rawsize, stored in revlog index
2994 3004 # 2. L2: len(rawtext), stored in revlog data
2995 3005 # 3. L3: len(text), stored in revlog data if flags==0, or
2996 3006 # possibly somewhere else if flags!=0
2997 3007 #
2998 3008 # L1 should be equal to L2. L3 could be different from them.
2999 3009 # "text" may or may not affect commit hash depending on flag
3000 3010 # processors (see flagutil.addflagprocessor).
3001 3011 #
3002 3012 # | common | rename | meta | ext
3003 3013 # -------------------------------------------------
3004 3014 # rawsize() | L1 | L1 | L1 | L1
3005 3015 # size() | L1 | L2-LM | L1(*) | L1 (?)
3006 3016 # len(rawtext) | L2 | L2 | L2 | L2
3007 3017 # len(text) | L2 | L2 | L2 | L3
3008 3018 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3009 3019 #
3010 3020 # LM: length of metadata, depending on rawtext
3011 3021 # (*): not ideal, see comment in filelog.size
3012 3022 # (?): could be "- len(meta)" if the resolved content has
3013 3023 # rename metadata
3014 3024 #
3015 3025 # Checks needed to be done:
3016 3026 # 1. length check: L1 == L2, in all cases.
3017 3027 # 2. hash check: depending on flag processor, we may need to
3018 3028 # use either "text" (external), or "rawtext" (in revlog).
3019 3029
3020 3030 try:
3021 3031 skipflags = state.get(b'skipflags', 0)
3022 3032 if skipflags:
3023 3033 skipflags &= self.flags(rev)
3024 3034
3025 3035 _verify_revision(self, skipflags, state, node)
3026 3036
3027 3037 l1 = self.rawsize(rev)
3028 3038 l2 = len(self.rawdata(node))
3029 3039
3030 3040 if l1 != l2:
3031 3041 yield revlogproblem(
3032 3042 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3033 3043 node=node,
3034 3044 )
3035 3045
3036 3046 except error.CensoredNodeError:
3037 3047 if state[b'erroroncensored']:
3038 3048 yield revlogproblem(
3039 3049 error=_(b'censored file data'), node=node
3040 3050 )
3041 3051 state[b'skipread'].add(node)
3042 3052 except Exception as e:
3043 3053 yield revlogproblem(
3044 3054 error=_(b'unpacking %s: %s')
3045 3055 % (short(node), stringutil.forcebytestr(e)),
3046 3056 node=node,
3047 3057 )
3048 3058 state[b'skipread'].add(node)
3049 3059
3050 3060 def storageinfo(
3051 3061 self,
3052 3062 exclusivefiles=False,
3053 3063 sharedfiles=False,
3054 3064 revisionscount=False,
3055 3065 trackedsize=False,
3056 3066 storedsize=False,
3057 3067 ):
3058 3068 d = {}
3059 3069
3060 3070 if exclusivefiles:
3061 3071 d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
3062 3072 if not self._inline:
3063 3073 d[b'exclusivefiles'].append((self.opener, self.datafile))
3064 3074
3065 3075 if sharedfiles:
3066 3076 d[b'sharedfiles'] = []
3067 3077
3068 3078 if revisionscount:
3069 3079 d[b'revisionscount'] = len(self)
3070 3080
3071 3081 if trackedsize:
3072 3082 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3073 3083
3074 3084 if storedsize:
3075 3085 d[b'storedsize'] = sum(
3076 3086 self.opener.stat(path).st_size for path in self.files()
3077 3087 )
3078 3088
3079 3089 return d
@@ -1,637 +1,669 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5 $ cat << EOF >> $HGRCPATH
6 6 > [format]
7 7 > use-persistent-nodemap=yes
8 8 > [devel]
9 9 > persistent-nodemap=yes
10 10 > EOF
11 $ hg init test-repo
11
12 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
12 13 $ cd test-repo
14
15 Check handling of the default slow-path value
16
17 #if no-pure no-rust
18
19 $ hg id
20 warning: accessing `persistent-nodemap` repository without associated fast implementation.
21 (check `hg help config.format.use-persistent-nodemap` for details)
22 000000000000 tip
23
24 Unlock further check (we are here to test the feature)
25
26 $ cat << EOF >> $HGRCPATH
27 > [storage]
28 > # to avoid spamming the test
29 > revlog.persistent-nodemap.slow-path=allow
30 > EOF
31
32 #endif
33
34
13 35 $ hg debugformat
14 36 format-variant repo
15 37 fncache: yes
16 38 dotencode: yes
17 39 generaldelta: yes
18 40 exp-sharesafe: no
19 41 sparserevlog: yes
20 42 sidedata: no
21 43 persistent-nodemap: yes
22 44 copies-sdc: no
23 45 plain-cl-delta: yes
24 46 compression: zlib
25 47 compression-level: default
26 $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
27 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
28 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
48 $ hg debugbuilddag .+5000 --new-file
49
29 50 $ hg debugnodemap --metadata
30 51 uid: ???????????????? (glob)
31 52 tip-rev: 5000
32 53 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
33 54 data-length: 121088
34 55 data-unused: 0
35 56 data-unused: 0.000%
36 57 $ f --size .hg/store/00changelog.n
37 58 .hg/store/00changelog.n: size=70
38 59
39 60 Simple lookup works
40 61
41 62 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
42 63 $ hg log -r "$ANYNODE" --template '{rev}\n'
43 64 5000
44 65
45 66
46 67 #if rust
47 68
48 69 $ f --sha256 .hg/store/00changelog-*.nd
49 70 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
50 71
51 72 $ f --sha256 .hg/store/00manifest-*.nd
52 73 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
53 74 $ hg debugnodemap --dump-new | f --sha256 --size
54 75 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
55 76 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
56 77 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
57 78 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
58 79 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
59 80 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
60 81 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
61 82 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
62 83 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
63 84 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
64 85 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
65 86 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
66 87 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
67 88 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
68 89 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
69 90 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
70 91 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
71 92 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
72 93 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
73 94
74 95
75 96 #else
76 97
77 98 $ f --sha256 .hg/store/00changelog-*.nd
78 99 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
79 100 $ hg debugnodemap --dump-new | f --sha256 --size
80 101 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
81 102 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
82 103 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
83 104 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
84 105 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
85 106 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
86 107 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
87 108 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
88 109 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
89 110 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
90 111 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
91 112 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
92 113 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
93 114 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
94 115 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
95 116 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
96 117 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
97 118 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
98 119 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
99 120
100 121 #endif
101 122
102 123 $ hg debugnodemap --check
103 124 revision in index: 5001
104 125 revision in nodemap: 5001
105 126
106 127 add a new commit
107 128
108 129 $ hg up
109 130 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 131 $ echo foo > foo
111 132 $ hg add foo
112 133
113 134
114 135 Check slow-path config value handling
115 136 -------------------------------------
116 137
117 138 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
118 139 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
119 falling back to default value: allow
140 falling back to default value: warn
141 warning: accessing `persistent-nodemap` repository without associated fast implementation. (no-pure no-rust !)
142 (check `hg help config.format.use-persistent-nodemap` for details) (no-pure no-rust !)
120 143 6b02b8c7b966+ tip
121 144
122 145 #if no-pure no-rust
123 146
147 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
148 warning: accessing `persistent-nodemap` repository without associated fast implementation.
149 (check `hg help config.format.use-persistent-nodemap` for details)
150 changeset: 5000:6b02b8c7b966
151 tag: tip
152 user: debugbuilddag
153 date: Thu Jan 01 01:23:20 1970 +0000
154 summary: r5000
155
124 156 $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
125 157 transaction abort!
126 158 rollback completed
127 159 abort: persistent nodemap in strict mode without efficient method
128 160 [255]
129 161
130 162 #endif
131 163
132 164 $ hg ci -m 'foo'
133 165
134 166 #if no-pure no-rust
135 167 $ hg debugnodemap --metadata
136 168 uid: ???????????????? (glob)
137 169 tip-rev: 5001
138 170 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
139 171 data-length: 121088
140 172 data-unused: 0
141 173 data-unused: 0.000%
142 174 #else
143 175 $ hg debugnodemap --metadata
144 176 uid: ???????????????? (glob)
145 177 tip-rev: 5001
146 178 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
147 179 data-length: 121344
148 180 data-unused: 256
149 181 data-unused: 0.211%
150 182 #endif
151 183
152 184 $ f --size .hg/store/00changelog.n
153 185 .hg/store/00changelog.n: size=70
154 186
155 187 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
156 188
157 189 #if pure
158 190 $ f --sha256 .hg/store/00changelog-*.nd --size
159 191 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
160 192 #endif
161 193
162 194 #if rust
163 195 $ f --sha256 .hg/store/00changelog-*.nd --size
164 196 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
165 197 #endif
166 198
167 199 #if no-pure no-rust
168 200 $ f --sha256 .hg/store/00changelog-*.nd --size
169 201 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
170 202 #endif
171 203
172 204 $ hg debugnodemap --check
173 205 revision in index: 5002
174 206 revision in nodemap: 5002
175 207
176 208 Test code path without mmap
177 209 ---------------------------
178 210
179 211 $ echo bar > bar
180 212 $ hg add bar
181 213 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
182 214
183 215 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
184 216 revision in index: 5003
185 217 revision in nodemap: 5003
186 218 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
187 219 revision in index: 5003
188 220 revision in nodemap: 5003
189 221
190 222
191 223 #if pure
192 224 $ hg debugnodemap --metadata
193 225 uid: ???????????????? (glob)
194 226 tip-rev: 5002
195 227 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
196 228 data-length: 121600
197 229 data-unused: 512
198 230 data-unused: 0.421%
199 231 $ f --sha256 .hg/store/00changelog-*.nd --size
200 232 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
201 233 #endif
202 234 #if rust
203 235 $ hg debugnodemap --metadata
204 236 uid: ???????????????? (glob)
205 237 tip-rev: 5002
206 238 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
207 239 data-length: 121600
208 240 data-unused: 512
209 241 data-unused: 0.421%
210 242 $ f --sha256 .hg/store/00changelog-*.nd --size
211 243 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
212 244 #endif
213 245 #if no-pure no-rust
214 246 $ hg debugnodemap --metadata
215 247 uid: ???????????????? (glob)
216 248 tip-rev: 5002
217 249 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
218 250 data-length: 121088
219 251 data-unused: 0
220 252 data-unused: 0.000%
221 253 $ f --sha256 .hg/store/00changelog-*.nd --size
222 254 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
223 255 #endif
224 256
225 257 Test force warming the cache
226 258
227 259 $ rm .hg/store/00changelog.n
228 260 $ hg debugnodemap --metadata
229 261 $ hg debugupdatecache
230 262 #if pure
231 263 $ hg debugnodemap --metadata
232 264 uid: ???????????????? (glob)
233 265 tip-rev: 5002
234 266 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
235 267 data-length: 121088
236 268 data-unused: 0
237 269 data-unused: 0.000%
238 270 #else
239 271 $ hg debugnodemap --metadata
240 272 uid: ???????????????? (glob)
241 273 tip-rev: 5002
242 274 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
243 275 data-length: 121088
244 276 data-unused: 0
245 277 data-unused: 0.000%
246 278 #endif
247 279
248 280 Check out of sync nodemap
249 281 =========================
250 282
251 283 First copy old data on the side.
252 284
253 285 $ mkdir ../tmp-copies
254 286 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
255 287
256 288 Nodemap lagging behind
257 289 ----------------------
258 290
259 291 make a new commit
260 292
261 293 $ echo bar2 > bar
262 294 $ hg ci -m 'bar2'
263 295 $ NODE=`hg log -r tip -T '{node}\n'`
264 296 $ hg log -r "$NODE" -T '{rev}\n'
265 297 5003
266 298
267 299 If the nodemap is lagging behind, it can catch up fine
268 300
269 301 $ hg debugnodemap --metadata
270 302 uid: ???????????????? (glob)
271 303 tip-rev: 5003
272 304 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
273 305 data-length: 121344 (pure !)
274 306 data-length: 121344 (rust !)
275 307 data-length: 121152 (no-rust no-pure !)
276 308 data-unused: 192 (pure !)
277 309 data-unused: 192 (rust !)
278 310 data-unused: 0 (no-rust no-pure !)
279 311 data-unused: 0.158% (pure !)
280 312 data-unused: 0.158% (rust !)
281 313 data-unused: 0.000% (no-rust no-pure !)
282 314 $ cp -f ../tmp-copies/* .hg/store/
283 315 $ hg debugnodemap --metadata
284 316 uid: ???????????????? (glob)
285 317 tip-rev: 5002
286 318 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
287 319 data-length: 121088
288 320 data-unused: 0
289 321 data-unused: 0.000%
290 322 $ hg log -r "$NODE" -T '{rev}\n'
291 323 5003
292 324
293 325 changelog altered
294 326 -----------------
295 327
296 328 If the nodemap is not gated behind a requirements, an unaware client can alter
297 329 the repository so the revlog used to generate the nodemap is not longer
298 330 compatible with the persistent nodemap. We need to detect that.
299 331
300 332 $ hg up "$NODE~5"
301 333 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
302 334 $ echo bar > babar
303 335 $ hg add babar
304 336 $ hg ci -m 'babar'
305 337 created new head
306 338 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
307 339 $ hg log -r "$OTHERNODE" -T '{rev}\n'
308 340 5004
309 341
310 342 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
311 343
312 344 the nodemap should detect the changelog have been tampered with and recover.
313 345
314 346 $ hg debugnodemap --metadata
315 347 uid: ???????????????? (glob)
316 348 tip-rev: 5002
317 349 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
318 350 data-length: 121536 (pure !)
319 351 data-length: 121088 (rust !)
320 352 data-length: 121088 (no-pure no-rust !)
321 353 data-unused: 448 (pure !)
322 354 data-unused: 0 (rust !)
323 355 data-unused: 0 (no-pure no-rust !)
324 356 data-unused: 0.000% (rust !)
325 357 data-unused: 0.369% (pure !)
326 358 data-unused: 0.000% (no-pure no-rust !)
327 359
328 360 $ cp -f ../tmp-copies/* .hg/store/
329 361 $ hg debugnodemap --metadata
330 362 uid: ???????????????? (glob)
331 363 tip-rev: 5002
332 364 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
333 365 data-length: 121088
334 366 data-unused: 0
335 367 data-unused: 0.000%
336 368 $ hg log -r "$OTHERNODE" -T '{rev}\n'
337 369 5002
338 370
339 371 Check transaction related property
340 372 ==================================
341 373
342 374 An up to date nodemap should be available to shell hooks,
343 375
344 376 $ echo dsljfl > a
345 377 $ hg add a
346 378 $ hg ci -m a
347 379 $ hg debugnodemap --metadata
348 380 uid: ???????????????? (glob)
349 381 tip-rev: 5003
350 382 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
351 383 data-length: 121088
352 384 data-unused: 0
353 385 data-unused: 0.000%
354 386 $ echo babar2 > babar
355 387 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
356 388 uid: ???????????????? (glob)
357 389 tip-rev: 5004
358 390 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
359 391 data-length: 121280 (pure !)
360 392 data-length: 121280 (rust !)
361 393 data-length: 121088 (no-pure no-rust !)
362 394 data-unused: 192 (pure !)
363 395 data-unused: 192 (rust !)
364 396 data-unused: 0 (no-pure no-rust !)
365 397 data-unused: 0.158% (pure !)
366 398 data-unused: 0.158% (rust !)
367 399 data-unused: 0.000% (no-pure no-rust !)
368 400 $ hg debugnodemap --metadata
369 401 uid: ???????????????? (glob)
370 402 tip-rev: 5004
371 403 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
372 404 data-length: 121280 (pure !)
373 405 data-length: 121280 (rust !)
374 406 data-length: 121088 (no-pure no-rust !)
375 407 data-unused: 192 (pure !)
376 408 data-unused: 192 (rust !)
377 409 data-unused: 0 (no-pure no-rust !)
378 410 data-unused: 0.158% (pure !)
379 411 data-unused: 0.158% (rust !)
380 412 data-unused: 0.000% (no-pure no-rust !)
381 413
382 414 Another process does not see the pending nodemap content during run.
383 415
384 416 $ PATH=$RUNTESTDIR/testlib/:$PATH
385 417 $ echo qpoasp > a
386 418 $ hg ci -m a2 \
387 419 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
388 420 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
389 421
390 422 (read the repository while the commit transaction is pending)
391 423
392 424 $ wait-on-file 20 sync-txn-pending && \
393 425 > hg debugnodemap --metadata && \
394 426 > wait-on-file 20 sync-txn-close sync-repo-read
395 427 uid: ???????????????? (glob)
396 428 tip-rev: 5004
397 429 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
398 430 data-length: 121280 (pure !)
399 431 data-length: 121280 (rust !)
400 432 data-length: 121088 (no-pure no-rust !)
401 433 data-unused: 192 (pure !)
402 434 data-unused: 192 (rust !)
403 435 data-unused: 0 (no-pure no-rust !)
404 436 data-unused: 0.158% (pure !)
405 437 data-unused: 0.158% (rust !)
406 438 data-unused: 0.000% (no-pure no-rust !)
407 439 $ hg debugnodemap --metadata
408 440 uid: ???????????????? (glob)
409 441 tip-rev: 5005
410 442 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
411 443 data-length: 121536 (pure !)
412 444 data-length: 121536 (rust !)
413 445 data-length: 121088 (no-pure no-rust !)
414 446 data-unused: 448 (pure !)
415 447 data-unused: 448 (rust !)
416 448 data-unused: 0 (no-pure no-rust !)
417 449 data-unused: 0.369% (pure !)
418 450 data-unused: 0.369% (rust !)
419 451 data-unused: 0.000% (no-pure no-rust !)
420 452
421 453 $ cat output.txt
422 454
423 455 Check that a failing transaction will properly revert the data
424 456
425 457 $ echo plakfe > a
426 458 $ f --size --sha256 .hg/store/00changelog-*.nd
427 459 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
428 460 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
429 461 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
430 462 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
431 463 transaction abort!
432 464 rollback completed
433 465 abort: This is a late abort
434 466 [255]
435 467 $ hg debugnodemap --metadata
436 468 uid: ???????????????? (glob)
437 469 tip-rev: 5005
438 470 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
439 471 data-length: 121536 (pure !)
440 472 data-length: 121536 (rust !)
441 473 data-length: 121088 (no-pure no-rust !)
442 474 data-unused: 448 (pure !)
443 475 data-unused: 448 (rust !)
444 476 data-unused: 0 (no-pure no-rust !)
445 477 data-unused: 0.369% (pure !)
446 478 data-unused: 0.369% (rust !)
447 479 data-unused: 0.000% (no-pure no-rust !)
448 480 $ f --size --sha256 .hg/store/00changelog-*.nd
449 481 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
450 482 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
451 483 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
452 484
453 485 Check that removing content does not confuse the nodemap
454 486 --------------------------------------------------------
455 487
456 488 removing data with rollback
457 489
458 490 $ echo aso > a
459 491 $ hg ci -m a4
460 492 $ hg rollback
461 493 repository tip rolled back to revision 5005 (undo commit)
462 494 working directory now based on revision 5005
463 495 $ hg id -r .
464 496 90d5d3ba2fc4 tip
465 497
466 498 roming data with strip
467 499
468 500 $ echo aso > a
469 501 $ hg ci -m a4
470 502 $ hg --config extensions.strip= strip -r . --no-backup
471 503 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
472 504 $ hg id -r . --traceback
473 505 90d5d3ba2fc4 tip
474 506
475 507 Test upgrade / downgrade
476 508 ========================
477 509
478 510 downgrading
479 511
480 512 $ cat << EOF >> .hg/hgrc
481 513 > [format]
482 514 > use-persistent-nodemap=no
483 515 > EOF
484 516 $ hg debugformat -v
485 517 format-variant repo config default
486 518 fncache: yes yes yes
487 519 dotencode: yes yes yes
488 520 generaldelta: yes yes yes
489 521 exp-sharesafe: no no no
490 522 sparserevlog: yes yes yes
491 523 sidedata: no no no
492 524 persistent-nodemap: yes no no
493 525 copies-sdc: no no no
494 526 plain-cl-delta: yes yes yes
495 527 compression: zlib zlib zlib
496 528 compression-level: default default default
497 529 $ hg debugupgraderepo --run --no-backup --quiet
498 530 upgrade will perform the following actions:
499 531
500 532 requirements
501 533 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
502 534 removed: persistent-nodemap
503 535
504 536 processed revlogs:
505 537 - all-filelogs
506 538 - changelog
507 539 - manifest
508 540
509 541 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
510 542 [1]
511 543 $ hg debugnodemap --metadata
512 544
513 545
514 546 upgrading
515 547
516 548 $ cat << EOF >> .hg/hgrc
517 549 > [format]
518 550 > use-persistent-nodemap=yes
519 551 > EOF
520 552 $ hg debugformat -v
521 553 format-variant repo config default
522 554 fncache: yes yes yes
523 555 dotencode: yes yes yes
524 556 generaldelta: yes yes yes
525 557 exp-sharesafe: no no no
526 558 sparserevlog: yes yes yes
527 559 sidedata: no no no
528 560 persistent-nodemap: no yes no
529 561 copies-sdc: no no no
530 562 plain-cl-delta: yes yes yes
531 563 compression: zlib zlib zlib
532 564 compression-level: default default default
533 565 $ hg debugupgraderepo --run --no-backup --quiet
534 566 upgrade will perform the following actions:
535 567
536 568 requirements
537 569 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
538 570 added: persistent-nodemap
539 571
540 572 processed revlogs:
541 573 - all-filelogs
542 574 - changelog
543 575 - manifest
544 576
545 577 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
546 578 00changelog-*.nd (glob)
547 579 00changelog.n
548 580 00manifest-*.nd (glob)
549 581 00manifest.n
550 582
551 583 $ hg debugnodemap --metadata
552 584 uid: * (glob)
553 585 tip-rev: 5005
554 586 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
555 587 data-length: 121088
556 588 data-unused: 0
557 589 data-unused: 0.000%
558 590
559 591 Running unrelated upgrade
560 592
561 593 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
562 594 upgrade will perform the following actions:
563 595
564 596 requirements
565 597 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
566 598
567 599 optimisations: re-delta-all
568 600
569 601 processed revlogs:
570 602 - all-filelogs
571 603 - changelog
572 604 - manifest
573 605
574 606 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
575 607 00changelog-*.nd (glob)
576 608 00changelog.n
577 609 00manifest-*.nd (glob)
578 610 00manifest.n
579 611
580 612 $ hg debugnodemap --metadata
581 613 uid: * (glob)
582 614 tip-rev: 5005
583 615 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
584 616 data-length: 121088
585 617 data-unused: 0
586 618 data-unused: 0.000%
587 619
588 620 Persistent nodemap and local/streaming clone
589 621 ============================================
590 622
591 623 $ cd ..
592 624
593 625 standard clone
594 626 --------------
595 627
596 628 The persistent nodemap should exist after a streaming clone
597 629
598 630 $ hg clone --pull --quiet -U test-repo standard-clone
599 631 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
600 632 00changelog-*.nd (glob)
601 633 00changelog.n
602 634 00manifest-*.nd (glob)
603 635 00manifest.n
604 636 $ hg -R standard-clone debugnodemap --metadata
605 637 uid: * (glob)
606 638 tip-rev: 5005
607 639 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
608 640 data-length: 121088
609 641 data-unused: 0
610 642 data-unused: 0.000%
611 643
612 644
613 645 local clone
614 646 ------------
615 647
616 648 The persistent nodemap should exist after a streaming clone
617 649
618 650 $ hg clone -U test-repo local-clone
619 651 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
620 652 [1]
621 653 $ hg -R local-clone debugnodemap --metadata
622 654
623 655 stream clone
624 656 ------------
625 657
626 658 The persistent nodemap should exist after a streaming clone
627 659
628 660 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
629 661 adding [s] 00manifest.n (70 bytes)
630 662 adding [s] 00manifest.i (313 KB)
631 663 adding [s] 00manifest.d (452 KB)
632 664 adding [s] 00changelog.n (70 bytes)
633 665 adding [s] 00changelog.i (313 KB)
634 666 adding [s] 00changelog.d (360 KB)
635 667 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
636 668 [1]
637 669 $ hg -R stream-clone debugnodemap --metadata
@@ -1,579 +1,581 b''
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > share =
6 6 > [format]
7 7 > exp-share-safe = True
8 > [storage]
9 > revlog.persistent-nodemap.slow-path=allow
8 10 > EOF
9 11
10 12 prepare source repo
11 13
12 14 $ hg init source
13 15 $ cd source
14 16 $ cat .hg/requires
15 17 exp-sharesafe
16 18 $ cat .hg/store/requires
17 19 dotencode
18 20 fncache
19 21 generaldelta
20 22 revlogv1
21 23 sparserevlog
22 24 store
23 25 $ hg debugrequirements
24 26 dotencode
25 27 exp-sharesafe
26 28 fncache
27 29 generaldelta
28 30 revlogv1
29 31 sparserevlog
30 32 store
31 33
32 34 $ echo a > a
33 35 $ hg ci -Aqm "added a"
34 36 $ echo b > b
35 37 $ hg ci -Aqm "added b"
36 38
37 39 $ HGEDITOR=cat hg config --shared
38 40 abort: repository is not shared; can't use --shared
39 41 [10]
40 42 $ cd ..
41 43
42 44 Create a shared repo and check the requirements are shared and read correctly
43 45 $ hg share source shared1
44 46 updating working directory
45 47 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 48 $ cd shared1
47 49 $ cat .hg/requires
48 50 exp-sharesafe
49 51 shared
50 52
51 53 $ hg debugrequirements -R ../source
52 54 dotencode
53 55 exp-sharesafe
54 56 fncache
55 57 generaldelta
56 58 revlogv1
57 59 sparserevlog
58 60 store
59 61
60 62 $ hg debugrequirements
61 63 dotencode
62 64 exp-sharesafe
63 65 fncache
64 66 generaldelta
65 67 revlogv1
66 68 shared
67 69 sparserevlog
68 70 store
69 71
70 72 $ echo c > c
71 73 $ hg ci -Aqm "added c"
72 74
73 75 Check that config of the source repository is also loaded
74 76
75 77 $ hg showconfig ui.curses
76 78 [1]
77 79
78 80 $ echo "[ui]" >> ../source/.hg/hgrc
79 81 $ echo "curses=true" >> ../source/.hg/hgrc
80 82
81 83 $ hg showconfig ui.curses
82 84 true
83 85
84 86 Test that extensions of source repository are also loaded
85 87
86 88 $ hg debugextensions
87 89 share
88 90 $ hg extdiff -p echo
89 91 hg: unknown command 'extdiff'
90 92 'extdiff' is provided by the following extension:
91 93
92 94 extdiff command to allow external programs to compare revisions
93 95
94 96 (use 'hg help extensions' for information on enabling extensions)
95 97 [10]
96 98
97 99 $ echo "[extensions]" >> ../source/.hg/hgrc
98 100 $ echo "extdiff=" >> ../source/.hg/hgrc
99 101
100 102 $ hg debugextensions -R ../source
101 103 extdiff
102 104 share
103 105 $ hg extdiff -R ../source -p echo
104 106
105 107 BROKEN: the command below will not work if config of shared source is not loaded
106 108 on dispatch but debugextensions says that extension
107 109 is loaded
108 110 $ hg debugextensions
109 111 extdiff
110 112 share
111 113
112 114 $ hg extdiff -p echo
113 115
114 116 However, local .hg/hgrc should override the config set by share source
115 117
116 118 $ echo "[ui]" >> .hg/hgrc
117 119 $ echo "curses=false" >> .hg/hgrc
118 120
119 121 $ hg showconfig ui.curses
120 122 false
121 123
122 124 $ HGEDITOR=cat hg config --shared
123 125 [ui]
124 126 curses=true
125 127 [extensions]
126 128 extdiff=
127 129
128 130 $ HGEDITOR=cat hg config --local
129 131 [ui]
130 132 curses=false
131 133
132 134 Testing that hooks set in source repository also runs in shared repo
133 135
134 136 $ cd ../source
135 137 $ cat <<EOF >> .hg/hgrc
136 138 > [extensions]
137 139 > hooklib=
138 140 > [hooks]
139 141 > pretxnchangegroup.reject_merge_commits = \
140 142 > python:hgext.hooklib.reject_merge_commits.hook
141 143 > EOF
142 144
143 145 $ cd ..
144 146 $ hg clone source cloned
145 147 updating to branch default
146 148 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 149 $ cd cloned
148 150 $ hg up 0
149 151 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 152 $ echo bar > bar
151 153 $ hg ci -Aqm "added bar"
152 154 $ hg merge
153 155 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 156 (branch merge, don't forget to commit)
155 157 $ hg ci -m "merge commit"
156 158
157 159 $ hg push ../source
158 160 pushing to ../source
159 161 searching for changes
160 162 adding changesets
161 163 adding manifests
162 164 adding file changes
163 165 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 166 transaction abort!
165 167 rollback completed
166 168 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 169 [255]
168 170
169 171 $ hg push ../shared1
170 172 pushing to ../shared1
171 173 searching for changes
172 174 adding changesets
173 175 adding manifests
174 176 adding file changes
175 177 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 178 transaction abort!
177 179 rollback completed
178 180 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 181 [255]
180 182
181 183 Test that if share source config is untrusted, we dont read it
182 184
183 185 $ cd ../shared1
184 186
185 187 $ cat << EOF > $TESTTMP/untrusted.py
186 188 > from mercurial import scmutil, util
187 189 > def uisetup(ui):
188 190 > class untrustedui(ui.__class__):
189 191 > def _trusted(self, fp, f):
190 192 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 193 > return False
192 194 > return super(untrustedui, self)._trusted(fp, f)
193 195 > ui.__class__ = untrustedui
194 196 > EOF
195 197
196 198 $ hg showconfig hooks
197 199 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198 200
199 201 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 202 [1]
201 203
202 204 Update the source repository format and check that shared repo works
203 205
204 206 $ cd ../source
205 207
206 208 Disable zstd related tests because its not present on pure version
207 209 #if zstd
208 210 $ echo "[format]" >> .hg/hgrc
209 211 $ echo "revlog-compression=zstd" >> .hg/hgrc
210 212
211 213 $ hg debugupgraderepo --run -q
212 214 upgrade will perform the following actions:
213 215
214 216 requirements
215 217 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 218 added: revlog-compression-zstd
217 219
218 220 processed revlogs:
219 221 - all-filelogs
220 222 - changelog
221 223 - manifest
222 224
223 225 $ hg log -r .
224 226 changeset: 1:5f6d8a4bf34a
225 227 user: test
226 228 date: Thu Jan 01 00:00:00 1970 +0000
227 229 summary: added b
228 230
229 231 #endif
230 232 $ echo "[format]" >> .hg/hgrc
231 233 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
232 234
233 235 $ hg debugupgraderepo --run -q -R ../shared1
234 236 abort: cannot upgrade repository; unsupported source requirement: shared
235 237 [255]
236 238
237 239 $ hg debugupgraderepo --run -q
238 240 upgrade will perform the following actions:
239 241
240 242 requirements
241 243 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
242 244 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
243 245 added: persistent-nodemap
244 246
245 247 processed revlogs:
246 248 - all-filelogs
247 249 - changelog
248 250 - manifest
249 251
250 252 $ hg log -r .
251 253 changeset: 1:5f6d8a4bf34a
252 254 user: test
253 255 date: Thu Jan 01 00:00:00 1970 +0000
254 256 summary: added b
255 257
256 258
257 259 Shared one should work
258 260 $ cd ../shared1
259 261 $ hg log -r .
260 262 changeset: 2:155349b645be
261 263 tag: tip
262 264 user: test
263 265 date: Thu Jan 01 00:00:00 1970 +0000
264 266 summary: added c
265 267
266 268
267 269 Testing that nonsharedrc is loaded for source and not shared
268 270
269 271 $ cd ../source
270 272 $ touch .hg/hgrc-not-shared
271 273 $ echo "[ui]" >> .hg/hgrc-not-shared
272 274 $ echo "traceback=true" >> .hg/hgrc-not-shared
273 275
274 276 $ hg showconfig ui.traceback
275 277 true
276 278
277 279 $ HGEDITOR=cat hg config --non-shared
278 280 [ui]
279 281 traceback=true
280 282
281 283 $ cd ../shared1
282 284 $ hg showconfig ui.traceback
283 285 [1]
284 286
285 287 Unsharing works
286 288
287 289 $ hg unshare
288 290
289 291 Test that source config is added to the shared one after unshare, and the config
290 292 of current repo is still respected over the config which came from source config
291 293 $ cd ../cloned
292 294 $ hg push ../shared1
293 295 pushing to ../shared1
294 296 searching for changes
295 297 adding changesets
296 298 adding manifests
297 299 adding file changes
298 300 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
299 301 transaction abort!
300 302 rollback completed
301 303 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
302 304 [255]
303 305 $ hg showconfig ui.curses -R ../shared1
304 306 false
305 307
306 308 $ cd ../
307 309
308 310 Test that upgrading using debugupgraderepo works
309 311 =================================================
310 312
311 313 $ hg init non-share-safe --config format.exp-share-safe=false
312 314 $ cd non-share-safe
313 315 $ hg debugrequirements
314 316 dotencode
315 317 fncache
316 318 generaldelta
317 319 revlogv1
318 320 sparserevlog
319 321 store
320 322 $ echo foo > foo
321 323 $ hg ci -Aqm 'added foo'
322 324 $ echo bar > bar
323 325 $ hg ci -Aqm 'added bar'
324 326
325 327 Create a share before upgrading
326 328
327 329 $ cd ..
328 330 $ hg share non-share-safe nss-share
329 331 updating working directory
330 332 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 333 $ hg debugrequirements -R nss-share
332 334 dotencode
333 335 fncache
334 336 generaldelta
335 337 revlogv1
336 338 shared
337 339 sparserevlog
338 340 store
339 341 $ cd non-share-safe
340 342
341 343 Upgrade
342 344
343 345 $ hg debugupgraderepo -q
344 346 requirements
345 347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
346 348 added: exp-sharesafe
347 349
348 350 processed revlogs:
349 351 - all-filelogs
350 352 - changelog
351 353 - manifest
352 354
353 355 $ hg debugupgraderepo --run -q
354 356 upgrade will perform the following actions:
355 357
356 358 requirements
357 359 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
358 360 added: exp-sharesafe
359 361
360 362 processed revlogs:
361 363 - all-filelogs
362 364 - changelog
363 365 - manifest
364 366
365 367 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
366 368
367 369 $ hg debugrequirements
368 370 dotencode
369 371 exp-sharesafe
370 372 fncache
371 373 generaldelta
372 374 revlogv1
373 375 sparserevlog
374 376 store
375 377
376 378 $ cat .hg/requires
377 379 exp-sharesafe
378 380
379 381 $ cat .hg/store/requires
380 382 dotencode
381 383 fncache
382 384 generaldelta
383 385 revlogv1
384 386 sparserevlog
385 387 store
386 388
387 389 $ hg log -GT "{node}: {desc}\n"
388 390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
389 391 |
390 392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
391 393
392 394
393 395 Make sure existing shares still works
394 396
395 397 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-warn-outdated-shares=false
396 398 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
397 399 |
398 400 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
399 401
400 402
401 403 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
402 404 warning: source repository supports share-safe functionality. Reshare to upgrade.
403 405 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
404 406 |
405 407 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
406 408
407 409
408 410
409 411 Create a safe share from upgrade one
410 412
411 413 $ cd ..
412 414 $ hg share non-share-safe ss-share
413 415 updating working directory
414 416 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
415 417 $ cd ss-share
416 418 $ hg log -GT "{node}: {desc}\n"
417 419 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
418 420 |
419 421 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
420 422
421 423 $ cd ../non-share-safe
422 424
423 425 Test that downgrading works too
424 426
425 427 $ cat >> $HGRCPATH <<EOF
426 428 > [extensions]
427 429 > share =
428 430 > [format]
429 431 > exp-share-safe = False
430 432 > EOF
431 433
432 434 $ hg debugupgraderepo -q
433 435 requirements
434 436 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
435 437 removed: exp-sharesafe
436 438
437 439 processed revlogs:
438 440 - all-filelogs
439 441 - changelog
440 442 - manifest
441 443
442 444 $ hg debugupgraderepo -q --run
443 445 upgrade will perform the following actions:
444 446
445 447 requirements
446 448 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
447 449 removed: exp-sharesafe
448 450
449 451 processed revlogs:
450 452 - all-filelogs
451 453 - changelog
452 454 - manifest
453 455
454 456 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
455 457
456 458 $ hg debugrequirements
457 459 dotencode
458 460 fncache
459 461 generaldelta
460 462 revlogv1
461 463 sparserevlog
462 464 store
463 465
464 466 $ cat .hg/requires
465 467 dotencode
466 468 fncache
467 469 generaldelta
468 470 revlogv1
469 471 sparserevlog
470 472 store
471 473
472 474 $ test -f .hg/store/requires
473 475 [1]
474 476
475 477 $ hg log -GT "{node}: {desc}\n"
476 478 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
477 479 |
478 480 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
479 481
480 482
481 483 Make sure existing shares still works
482 484
483 485 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
484 486 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
485 487 |
486 488 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
487 489
488 490
489 491 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
490 492 abort: share source does not support exp-sharesafe requirement
491 493 [255]
492 494
493 495 Testing automatic downgrade of shares when config is set
494 496
495 497 $ touch ../ss-share/.hg/wlock
496 498 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
497 499 abort: failed to downgrade share, got error: Lock held
498 500 [255]
499 501 $ rm ../ss-share/.hg/wlock
500 502
501 503 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
502 504 repository downgraded to not use share-safe mode
503 505 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
504 506 |
505 507 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
506 508
507 509
508 510 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
509 511 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
510 512 |
511 513 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
512 514
513 515
514 516
515 517 Testing automatic upgrade of shares when config is set
516 518
517 519 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
518 520 upgrade will perform the following actions:
519 521
520 522 requirements
521 523 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
522 524 added: exp-sharesafe
523 525
524 526 processed revlogs:
525 527 - all-filelogs
526 528 - changelog
527 529 - manifest
528 530
529 531 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
530 532 $ hg debugrequirements
531 533 dotencode
532 534 exp-sharesafe
533 535 fncache
534 536 generaldelta
535 537 revlogv1
536 538 sparserevlog
537 539 store
538 540 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
539 541 warning: source repository supports share-safe functionality. Reshare to upgrade.
540 542 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
541 543 |
542 544 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
543 545
544 546
545 547 Check that if lock is taken, upgrade fails but read operation are successful
546 548 $ touch ../nss-share/.hg/wlock
547 549 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
548 550 failed to upgrade share, got error: Lock held
549 551 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
550 552 |
551 553 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
552 554
553 555
554 556 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-warn-outdated-shares=false
555 557 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
556 558 |
557 559 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
558 560
559 561
560 562 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-auto-upgrade-fail-error=true
561 563 abort: failed to upgrade share, got error: Lock held
562 564 [255]
563 565
564 566 $ rm ../nss-share/.hg/wlock
565 567 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
566 568 repository upgraded to use share-safe mode
567 569 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
568 570 |
569 571 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
570 572
571 573
572 574 Test that unshare works
573 575
574 576 $ hg unshare -R ../nss-share
575 577 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
576 578 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
577 579 |
578 580 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
579 581
General Comments 0
You need to be logged in to leave comments. Login now