##// END OF EJS Templates
configitems: use standard "dynamicdefault" approach in edge case...
Raphaël Gomès -
r51652:f0ae403b default
parent child Browse files
Show More
@@ -1,2984 +1,2972 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10 import re
11 11
12 12 from . import (
13 13 encoding,
14 14 error,
15 15 )
16 16
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = b"extension '%s' overwrites config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config=b'warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31
32 32 class configitem:
33 33 """represent a known config item
34 34
35 35 :section: the official config section where to find this item,
36 36 :name: the official name within the section,
37 37 :default: default value for this item,
38 38 :alias: optional list of tuples as alternatives,
39 39 :generic: this is a generic definition, match name using regular expression.
40 40 """
41 41
42 42 def __init__(
43 43 self,
44 44 section,
45 45 name,
46 46 default=None,
47 47 alias=(),
48 48 generic=False,
49 49 priority=0,
50 50 experimental=False,
51 51 ):
52 52 self.section = section
53 53 self.name = name
54 54 self.default = default
55 55 self.alias = list(alias)
56 56 self.generic = generic
57 57 self.priority = priority
58 58 self.experimental = experimental
59 59 self._re = None
60 60 if generic:
61 61 self._re = re.compile(self.name)
62 62
63 63
64 64 class itemregister(dict):
65 65 """A specialized dictionary that can handle wild-card selection"""
66 66
67 67 def __init__(self):
68 68 super(itemregister, self).__init__()
69 69 self._generics = set()
70 70
71 71 def update(self, other):
72 72 super(itemregister, self).update(other)
73 73 self._generics.update(other._generics)
74 74
75 75 def __setitem__(self, key, item):
76 76 super(itemregister, self).__setitem__(key, item)
77 77 if item.generic:
78 78 self._generics.add(item)
79 79
80 80 def get(self, key):
81 81 baseitem = super(itemregister, self).get(key)
82 82 if baseitem is not None and not baseitem.generic:
83 83 return baseitem
84 84
85 85 # search for a matching generic item
86 86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 87 for item in generics:
88 88 # we use 'match' instead of 'search' to make the matching simpler
89 89 # for people unfamiliar with regular expression. Having the match
90 90 # rooted to the start of the string will produce less surprising
91 91 # result for user writing simple regex for sub-attribute.
92 92 #
93 93 # For example using "color\..*" match produces an unsurprising
94 94 # result, while using search could suddenly match apparently
95 95 # unrelated configuration that happens to contains "color."
96 96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 97 # some match to avoid the need to prefix most pattern with "^".
98 98 # The "^" seems more error prone.
99 99 if item._re.match(key):
100 100 return item
101 101
102 102 return None
103 103
104 104
105 105 coreitems = {}
106 106
107 107
108 108 def _register(configtable, *args, **kwargs):
109 109 item = configitem(*args, **kwargs)
110 110 section = configtable.setdefault(item.section, itemregister())
111 111 if item.name in section:
112 112 msg = b"duplicated config item registration for '%s.%s'"
113 113 raise error.ProgrammingError(msg % (item.section, item.name))
114 114 section[item.name] = item
115 115
116 116
117 117 # special value for case where the default is derived from other values
118 118 dynamicdefault = object()
119 119
120 120 # Registering actual config items
121 121
122 122
123 123 def getitemregister(configtable):
124 124 f = functools.partial(_register, configtable)
125 125 # export pseudo enum as configitem.*
126 126 f.dynamicdefault = dynamicdefault
127 127 return f
128 128
129 129
130 130 coreconfigitem = getitemregister(coreitems)
131 131
132 132
133 133 def _registerdiffopts(section, configprefix=b''):
134 134 coreconfigitem(
135 135 section,
136 136 configprefix + b'nodates',
137 137 default=False,
138 138 )
139 139 coreconfigitem(
140 140 section,
141 141 configprefix + b'showfunc',
142 142 default=False,
143 143 )
144 144 coreconfigitem(
145 145 section,
146 146 configprefix + b'unified',
147 147 default=None,
148 148 )
149 149 coreconfigitem(
150 150 section,
151 151 configprefix + b'git',
152 152 default=False,
153 153 )
154 154 coreconfigitem(
155 155 section,
156 156 configprefix + b'ignorews',
157 157 default=False,
158 158 )
159 159 coreconfigitem(
160 160 section,
161 161 configprefix + b'ignorewsamount',
162 162 default=False,
163 163 )
164 164 coreconfigitem(
165 165 section,
166 166 configprefix + b'ignoreblanklines',
167 167 default=False,
168 168 )
169 169 coreconfigitem(
170 170 section,
171 171 configprefix + b'ignorewseol',
172 172 default=False,
173 173 )
174 174 coreconfigitem(
175 175 section,
176 176 configprefix + b'nobinary',
177 177 default=False,
178 178 )
179 179 coreconfigitem(
180 180 section,
181 181 configprefix + b'noprefix',
182 182 default=False,
183 183 )
184 184 coreconfigitem(
185 185 section,
186 186 configprefix + b'word-diff',
187 187 default=False,
188 188 )
189 189
190 190
191 191 coreconfigitem(
192 192 b'alias',
193 193 b'.*',
194 194 default=dynamicdefault,
195 195 generic=True,
196 196 )
197 197 coreconfigitem(
198 198 b'auth',
199 199 b'cookiefile',
200 200 default=None,
201 201 )
202 202 _registerdiffopts(section=b'annotate')
203 203 # bookmarks.pushing: internal hack for discovery
204 204 coreconfigitem(
205 205 b'bookmarks',
206 206 b'pushing',
207 207 default=list,
208 208 )
209 209 # bundle.mainreporoot: internal hack for bundlerepo
210 210 coreconfigitem(
211 211 b'bundle',
212 212 b'mainreporoot',
213 213 default=b'',
214 214 )
215 215 coreconfigitem(
216 216 b'censor',
217 217 b'policy',
218 218 default=b'abort',
219 219 experimental=True,
220 220 )
221 221 coreconfigitem(
222 222 b'chgserver',
223 223 b'idletimeout',
224 224 default=3600,
225 225 )
226 226 coreconfigitem(
227 227 b'chgserver',
228 228 b'skiphash',
229 229 default=False,
230 230 )
231 231 coreconfigitem(
232 232 b'cmdserver',
233 233 b'log',
234 234 default=None,
235 235 )
236 236 coreconfigitem(
237 237 b'cmdserver',
238 238 b'max-log-files',
239 239 default=7,
240 240 )
241 241 coreconfigitem(
242 242 b'cmdserver',
243 243 b'max-log-size',
244 244 default=b'1 MB',
245 245 )
246 246 coreconfigitem(
247 247 b'cmdserver',
248 248 b'max-repo-cache',
249 249 default=0,
250 250 experimental=True,
251 251 )
252 252 coreconfigitem(
253 253 b'cmdserver',
254 254 b'message-encodings',
255 255 default=list,
256 256 )
257 257 coreconfigitem(
258 258 b'cmdserver',
259 259 b'track-log',
260 260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 261 )
262 262 coreconfigitem(
263 263 b'cmdserver',
264 264 b'shutdown-on-interrupt',
265 265 default=True,
266 266 )
267 267 coreconfigitem(
268 268 b'color',
269 269 b'.*',
270 270 default=None,
271 271 generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'color',
275 275 b'mode',
276 276 default=b'auto',
277 277 )
278 278 coreconfigitem(
279 279 b'color',
280 280 b'pagermode',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem(
284 284 b'command-templates',
285 285 b'graphnode',
286 286 default=None,
287 287 alias=[(b'ui', b'graphnodetemplate')],
288 288 )
289 289 coreconfigitem(
290 290 b'command-templates',
291 291 b'log',
292 292 default=None,
293 293 alias=[(b'ui', b'logtemplate')],
294 294 )
295 295 coreconfigitem(
296 296 b'command-templates',
297 297 b'mergemarker',
298 298 default=(
299 299 b'{node|short} '
300 300 b'{ifeq(tags, "tip", "", '
301 301 b'ifeq(tags, "", "", "{tags} "))}'
302 302 b'{if(bookmarks, "{bookmarks} ")}'
303 303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 304 b'- {author|user}: {desc|firstline}'
305 305 ),
306 306 alias=[(b'ui', b'mergemarkertemplate')],
307 307 )
308 308 coreconfigitem(
309 309 b'command-templates',
310 310 b'pre-merge-tool-output',
311 311 default=None,
312 312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 313 )
314 314 coreconfigitem(
315 315 b'command-templates',
316 316 b'oneline-summary',
317 317 default=None,
318 318 )
319 319 coreconfigitem(
320 320 b'command-templates',
321 321 b'oneline-summary.*',
322 322 default=dynamicdefault,
323 323 generic=True,
324 324 )
325 325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 326 coreconfigitem(
327 327 b'commands',
328 328 b'commit.post-status',
329 329 default=False,
330 330 )
331 331 coreconfigitem(
332 332 b'commands',
333 333 b'grep.all-files',
334 334 default=False,
335 335 experimental=True,
336 336 )
337 337 coreconfigitem(
338 338 b'commands',
339 339 b'merge.require-rev',
340 340 default=False,
341 341 )
342 342 coreconfigitem(
343 343 b'commands',
344 344 b'push.require-revs',
345 345 default=False,
346 346 )
347 347 coreconfigitem(
348 348 b'commands',
349 349 b'resolve.confirm',
350 350 default=False,
351 351 )
352 352 coreconfigitem(
353 353 b'commands',
354 354 b'resolve.explicit-re-merge',
355 355 default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'commands',
359 359 b'resolve.mark-check',
360 360 default=b'none',
361 361 )
362 362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 363 coreconfigitem(
364 364 b'commands',
365 365 b'show.aliasprefix',
366 366 default=list,
367 367 )
368 368 coreconfigitem(
369 369 b'commands',
370 370 b'status.relative',
371 371 default=False,
372 372 )
373 373 coreconfigitem(
374 374 b'commands',
375 375 b'status.skipstates',
376 376 default=[],
377 377 experimental=True,
378 378 )
379 379 coreconfigitem(
380 380 b'commands',
381 381 b'status.terse',
382 382 default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'commands',
386 386 b'status.verbose',
387 387 default=False,
388 388 )
389 389 coreconfigitem(
390 390 b'commands',
391 391 b'update.check',
392 392 default=None,
393 393 )
394 394 coreconfigitem(
395 395 b'commands',
396 396 b'update.requiredest',
397 397 default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'committemplate',
401 401 b'.*',
402 402 default=None,
403 403 generic=True,
404 404 )
405 405 coreconfigitem(
406 406 b'convert',
407 407 b'bzr.saverev',
408 408 default=True,
409 409 )
410 410 coreconfigitem(
411 411 b'convert',
412 412 b'cvsps.cache',
413 413 default=True,
414 414 )
415 415 coreconfigitem(
416 416 b'convert',
417 417 b'cvsps.fuzz',
418 418 default=60,
419 419 )
420 420 coreconfigitem(
421 421 b'convert',
422 422 b'cvsps.logencoding',
423 423 default=None,
424 424 )
425 425 coreconfigitem(
426 426 b'convert',
427 427 b'cvsps.mergefrom',
428 428 default=None,
429 429 )
430 430 coreconfigitem(
431 431 b'convert',
432 432 b'cvsps.mergeto',
433 433 default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'convert',
437 437 b'git.committeractions',
438 438 default=lambda: [b'messagedifferent'],
439 439 )
440 440 coreconfigitem(
441 441 b'convert',
442 442 b'git.extrakeys',
443 443 default=list,
444 444 )
445 445 coreconfigitem(
446 446 b'convert',
447 447 b'git.findcopiesharder',
448 448 default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'convert',
452 452 b'git.remoteprefix',
453 453 default=b'remote',
454 454 )
455 455 coreconfigitem(
456 456 b'convert',
457 457 b'git.renamelimit',
458 458 default=400,
459 459 )
460 460 coreconfigitem(
461 461 b'convert',
462 462 b'git.saverev',
463 463 default=True,
464 464 )
465 465 coreconfigitem(
466 466 b'convert',
467 467 b'git.similarity',
468 468 default=50,
469 469 )
470 470 coreconfigitem(
471 471 b'convert',
472 472 b'git.skipsubmodules',
473 473 default=False,
474 474 )
475 475 coreconfigitem(
476 476 b'convert',
477 477 b'hg.clonebranches',
478 478 default=False,
479 479 )
480 480 coreconfigitem(
481 481 b'convert',
482 482 b'hg.ignoreerrors',
483 483 default=False,
484 484 )
485 485 coreconfigitem(
486 486 b'convert',
487 487 b'hg.preserve-hash',
488 488 default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'convert',
492 492 b'hg.revs',
493 493 default=None,
494 494 )
495 495 coreconfigitem(
496 496 b'convert',
497 497 b'hg.saverev',
498 498 default=False,
499 499 )
500 500 coreconfigitem(
501 501 b'convert',
502 502 b'hg.sourcename',
503 503 default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'convert',
507 507 b'hg.startrev',
508 508 default=None,
509 509 )
510 510 coreconfigitem(
511 511 b'convert',
512 512 b'hg.tagsbranch',
513 513 default=b'default',
514 514 )
515 515 coreconfigitem(
516 516 b'convert',
517 517 b'hg.usebranchnames',
518 518 default=True,
519 519 )
520 520 coreconfigitem(
521 521 b'convert',
522 522 b'ignoreancestorcheck',
523 523 default=False,
524 524 experimental=True,
525 525 )
526 526 coreconfigitem(
527 527 b'convert',
528 528 b'localtimezone',
529 529 default=False,
530 530 )
531 531 coreconfigitem(
532 532 b'convert',
533 533 b'p4.encoding',
534 534 default=dynamicdefault,
535 535 )
536 536 coreconfigitem(
537 537 b'convert',
538 538 b'p4.startrev',
539 539 default=0,
540 540 )
541 541 coreconfigitem(
542 542 b'convert',
543 543 b'skiptags',
544 544 default=False,
545 545 )
546 546 coreconfigitem(
547 547 b'convert',
548 548 b'svn.debugsvnlog',
549 549 default=True,
550 550 )
551 551 coreconfigitem(
552 552 b'convert',
553 553 b'svn.trunk',
554 554 default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'convert',
558 558 b'svn.tags',
559 559 default=None,
560 560 )
561 561 coreconfigitem(
562 562 b'convert',
563 563 b'svn.branches',
564 564 default=None,
565 565 )
566 566 coreconfigitem(
567 567 b'convert',
568 568 b'svn.startrev',
569 569 default=0,
570 570 )
571 571 coreconfigitem(
572 572 b'convert',
573 573 b'svn.dangerous-set-commit-dates',
574 574 default=False,
575 575 )
576 576 coreconfigitem(
577 577 b'debug',
578 578 b'dirstate.delaywrite',
579 579 default=0,
580 580 )
581 581 coreconfigitem(
582 582 b'debug',
583 583 b'revlog.verifyposition.changelog',
584 584 default=b'',
585 585 )
586 586 coreconfigitem(
587 587 b'debug',
588 588 b'revlog.debug-delta',
589 589 default=False,
590 590 )
591 591 # display extra information about the bundling process
592 592 coreconfigitem(
593 593 b'debug',
594 594 b'bundling-stats',
595 595 default=False,
596 596 )
597 597 # display extra information about the unbundling process
598 598 coreconfigitem(
599 599 b'debug',
600 600 b'unbundling-stats',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'defaults',
605 605 b'.*',
606 606 default=None,
607 607 generic=True,
608 608 )
609 609 coreconfigitem(
610 610 b'devel',
611 611 b'all-warnings',
612 612 default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'devel',
616 616 b'bundle2.debug',
617 617 default=False,
618 618 )
619 619 # which kind of delta to put in the bundled changegroup. Possible value
620 620 # - '': use default behavior
621 621 # - p1: force to always use delta against p1
622 622 # - full: force to always use full content
623 623 coreconfigitem(
624 624 b'devel',
625 625 b'bundle.delta',
626 626 default=b'',
627 627 )
628 628 coreconfigitem(
629 629 b'devel',
630 630 b'cache-vfs',
631 631 default=None,
632 632 )
633 633 coreconfigitem(
634 634 b'devel',
635 635 b'check-locks',
636 636 default=False,
637 637 )
638 638 coreconfigitem(
639 639 b'devel',
640 640 b'check-relroot',
641 641 default=False,
642 642 )
643 643 # Track copy information for all file, not just "added" one (very slow)
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'copy-tracing.trace-all-files',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'default-date',
652 652 default=None,
653 653 )
654 654 coreconfigitem(
655 655 b'devel',
656 656 b'deprec-warn',
657 657 default=False,
658 658 )
659 659 # possible values:
660 660 # - auto (the default)
661 661 # - force-append
662 662 # - force-new
663 663 coreconfigitem(
664 664 b'devel',
665 665 b'dirstate.v2.data_update_mode',
666 666 default="auto",
667 667 )
668 668 coreconfigitem(
669 669 b'devel',
670 670 b'disableloaddefaultcerts',
671 671 default=False,
672 672 )
673 673 coreconfigitem(
674 674 b'devel',
675 675 b'warn-empty-changegroup',
676 676 default=False,
677 677 )
678 678 coreconfigitem(
679 679 b'devel',
680 680 b'legacy.exchange',
681 681 default=list,
682 682 )
683 683 # When True, revlogs use a special reference version of the nodemap, that is not
684 684 # performant but is "known" to behave properly.
685 685 coreconfigitem(
686 686 b'devel',
687 687 b'persistent-nodemap',
688 688 default=False,
689 689 )
690 690 coreconfigitem(
691 691 b'devel',
692 692 b'servercafile',
693 693 default=b'',
694 694 )
695 695 # This config option is intended for use in tests only. It is a giant
696 696 # footgun to kill security. Don't define it.
697 697 coreconfigitem(
698 698 b'devel',
699 699 b'server-insecure-exact-protocol',
700 700 default=b'',
701 701 )
702 702 coreconfigitem(
703 703 b'devel',
704 704 b'serverrequirecert',
705 705 default=False,
706 706 )
707 707 # Makes the status algorithm wait for the existence of this file
708 708 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
709 709 # seconds) before taking the lock and writing the dirstate.
710 710 # Status signals that it's ready to wait by creating a file
711 711 # with the same name + `.waiting`.
712 712 # Useful when testing race conditions.
713 713 coreconfigitem(
714 714 b'devel',
715 715 b'sync.status.pre-dirstate-write-file',
716 716 default=None,
717 717 )
718 718 coreconfigitem(
719 719 b'devel',
720 720 b'sync.status.pre-dirstate-write-file-timeout',
721 721 default=2,
722 722 )
723 723 coreconfigitem(
724 724 b'devel',
725 725 b'sync.dirstate.post-docket-read-file',
726 726 default=None,
727 727 )
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'sync.dirstate.post-docket-read-file-timeout',
731 731 default=2,
732 732 )
733 733 coreconfigitem(
734 734 b'devel',
735 735 b'sync.dirstate.pre-read-file',
736 736 default=None,
737 737 )
738 738 coreconfigitem(
739 739 b'devel',
740 740 b'sync.dirstate.pre-read-file-timeout',
741 741 default=2,
742 742 )
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'strip-obsmarkers',
746 746 default=True,
747 747 )
748 748 coreconfigitem(
749 749 b'devel',
750 750 b'warn-config',
751 751 default=None,
752 752 )
753 753 coreconfigitem(
754 754 b'devel',
755 755 b'warn-config-default',
756 756 default=None,
757 757 )
758 758 coreconfigitem(
759 759 b'devel',
760 760 b'user.obsmarker',
761 761 default=None,
762 762 )
763 763 coreconfigitem(
764 764 b'devel',
765 765 b'warn-config-unknown',
766 766 default=None,
767 767 )
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'debug.copies',
771 771 default=False,
772 772 )
773 773 coreconfigitem(
774 774 b'devel',
775 775 b'copy-tracing.multi-thread',
776 776 default=True,
777 777 )
778 778 coreconfigitem(
779 779 b'devel',
780 780 b'debug.extensions',
781 781 default=False,
782 782 )
783 783 coreconfigitem(
784 784 b'devel',
785 785 b'debug.repo-filters',
786 786 default=False,
787 787 )
788 788 coreconfigitem(
789 789 b'devel',
790 790 b'debug.peer-request',
791 791 default=False,
792 792 )
793 793 # If discovery.exchange-heads is False, the discovery will not start with
794 794 # remote head fetching and local head querying.
795 795 coreconfigitem(
796 796 b'devel',
797 797 b'discovery.exchange-heads',
798 798 default=True,
799 799 )
800 800 # If devel.debug.abort-update is True, then any merge with the working copy,
801 801 # e.g. [hg update], will be aborted after figuring out what needs to be done,
802 802 # but before spawning the parallel worker
803 803 coreconfigitem(
804 804 b'devel',
805 805 b'debug.abort-update',
806 806 default=False,
807 807 )
808 808 # If discovery.grow-sample is False, the sample size used in set discovery will
809 809 # not be increased through the process
810 810 coreconfigitem(
811 811 b'devel',
812 812 b'discovery.grow-sample',
813 813 default=True,
814 814 )
815 815 # When discovery.grow-sample.dynamic is True, the default, the sample size is
816 816 # adapted to the shape of the undecided set (it is set to the max of:
817 817 # <target-size>, len(roots(undecided)), len(heads(undecided)
818 818 coreconfigitem(
819 819 b'devel',
820 820 b'discovery.grow-sample.dynamic',
821 821 default=True,
822 822 )
823 823 # discovery.grow-sample.rate control the rate at which the sample grow
824 824 coreconfigitem(
825 825 b'devel',
826 826 b'discovery.grow-sample.rate',
827 827 default=1.05,
828 828 )
829 829 # If discovery.randomize is False, random sampling during discovery are
830 830 # deterministic. It is meant for integration tests.
831 831 coreconfigitem(
832 832 b'devel',
833 833 b'discovery.randomize',
834 834 default=True,
835 835 )
836 836 # Control the initial size of the discovery sample
837 837 coreconfigitem(
838 838 b'devel',
839 839 b'discovery.sample-size',
840 840 default=200,
841 841 )
842 842 # Control the initial size of the discovery for initial change
843 843 coreconfigitem(
844 844 b'devel',
845 845 b'discovery.sample-size.initial',
846 846 default=100,
847 847 )
848 848 _registerdiffopts(section=b'diff')
849 849 coreconfigitem(
850 850 b'diff',
851 851 b'merge',
852 852 default=False,
853 853 experimental=True,
854 854 )
855 855 coreconfigitem(
856 856 b'email',
857 857 b'bcc',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'email',
862 862 b'cc',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'email',
867 867 b'charsets',
868 868 default=list,
869 869 )
870 870 coreconfigitem(
871 871 b'email',
872 872 b'from',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'email',
877 877 b'method',
878 878 default=b'smtp',
879 879 )
880 880 coreconfigitem(
881 881 b'email',
882 882 b'reply-to',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'email',
887 887 b'to',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'archivemetatemplate',
893 893 default=dynamicdefault,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'auto-publish',
898 898 default=b'publish',
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'bundle-phases',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'bundle2-advertise',
908 908 default=True,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'bundle2-output-capture',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'bundle2.pushback',
918 918 default=False,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'bundle2lazylocking',
923 923 default=False,
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'bundlecomplevel',
928 928 default=None,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'bundlecomplevel.bzip2',
933 933 default=None,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'bundlecomplevel.gzip',
938 938 default=None,
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'bundlecomplevel.none',
943 943 default=None,
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'bundlecomplevel.zstd',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'bundlecompthreads',
953 953 default=None,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'bundlecompthreads.bzip2',
958 958 default=None,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'bundlecompthreads.gzip',
963 963 default=None,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'bundlecompthreads.none',
968 968 default=None,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'bundlecompthreads.zstd',
973 973 default=None,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'changegroup3',
978 978 default=True,
979 979 )
980 980 coreconfigitem(
981 981 b'experimental',
982 982 b'changegroup4',
983 983 default=False,
984 984 )
985 985
986 986 # might remove rank configuration once the computation has no impact
987 987 coreconfigitem(
988 988 b'experimental',
989 989 b'changelog-v2.compute-rank',
990 990 default=True,
991 991 )
992 992 coreconfigitem(
993 993 b'experimental',
994 994 b'cleanup-as-archived',
995 995 default=False,
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'clientcompressionengines',
1000 1000 default=list,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'copytrace',
1005 1005 default=b'on',
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'copytrace.movecandidateslimit',
1010 1010 default=100,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'copytrace.sourcecommitlimit',
1015 1015 default=100,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'copies.read-from',
1020 1020 default=b"filelog-only",
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'copies.write-to',
1025 1025 default=b'filelog-only',
1026 1026 )
1027 1027 coreconfigitem(
1028 1028 b'experimental',
1029 1029 b'crecordtest',
1030 1030 default=None,
1031 1031 )
1032 1032 coreconfigitem(
1033 1033 b'experimental',
1034 1034 b'directaccess',
1035 1035 default=False,
1036 1036 )
1037 1037 coreconfigitem(
1038 1038 b'experimental',
1039 1039 b'directaccess.revnums',
1040 1040 default=False,
1041 1041 )
1042 1042 coreconfigitem(
1043 1043 b'experimental',
1044 1044 b'editortmpinhg',
1045 1045 default=False,
1046 1046 )
1047 1047 coreconfigitem(
1048 1048 b'experimental',
1049 1049 b'evolution',
1050 1050 default=list,
1051 1051 )
1052 1052 coreconfigitem(
1053 1053 b'experimental',
1054 1054 b'evolution.allowdivergence',
1055 1055 default=False,
1056 1056 alias=[(b'experimental', b'allowdivergence')],
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'evolution.allowunstable',
1061 1061 default=None,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'evolution.createmarkers',
1066 1066 default=None,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'evolution.effect-flags',
1071 1071 default=True,
1072 1072 alias=[(b'experimental', b'effect-flags')],
1073 1073 )
1074 1074 coreconfigitem(
1075 1075 b'experimental',
1076 1076 b'evolution.exchange',
1077 1077 default=None,
1078 1078 )
1079 1079 coreconfigitem(
1080 1080 b'experimental',
1081 1081 b'evolution.bundle-obsmarker',
1082 1082 default=False,
1083 1083 )
1084 1084 coreconfigitem(
1085 1085 b'experimental',
1086 1086 b'evolution.bundle-obsmarker:mandatory',
1087 1087 default=True,
1088 1088 )
1089 1089 coreconfigitem(
1090 1090 b'experimental',
1091 1091 b'log.topo',
1092 1092 default=False,
1093 1093 )
1094 1094 coreconfigitem(
1095 1095 b'experimental',
1096 1096 b'evolution.report-instabilities',
1097 1097 default=True,
1098 1098 )
1099 1099 coreconfigitem(
1100 1100 b'experimental',
1101 1101 b'evolution.track-operation',
1102 1102 default=True,
1103 1103 )
1104 1104 # repo-level config to exclude a revset visibility
1105 1105 #
1106 1106 # The target use case is to use `share` to expose different subset of the same
1107 1107 # repository, especially server side. See also `server.view`.
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'extra-filter-revs',
1111 1111 default=None,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'maxdeltachainspan',
1116 1116 default=-1,
1117 1117 )
1118 1118 # tracks files which were undeleted (merge might delete them but we explicitly
1119 1119 # kept/undeleted them) and creates new filenodes for them
1120 1120 coreconfigitem(
1121 1121 b'experimental',
1122 1122 b'merge-track-salvaged',
1123 1123 default=False,
1124 1124 )
1125 1125 coreconfigitem(
1126 1126 b'experimental',
1127 1127 b'mmapindexthreshold',
1128 1128 default=None,
1129 1129 )
1130 1130 coreconfigitem(
1131 1131 b'experimental',
1132 1132 b'narrow',
1133 1133 default=False,
1134 1134 )
1135 1135 coreconfigitem(
1136 1136 b'experimental',
1137 1137 b'nonnormalparanoidcheck',
1138 1138 default=False,
1139 1139 )
1140 1140 coreconfigitem(
1141 1141 b'experimental',
1142 1142 b'exportableenviron',
1143 1143 default=list,
1144 1144 )
1145 1145 coreconfigitem(
1146 1146 b'experimental',
1147 1147 b'extendedheader.index',
1148 1148 default=None,
1149 1149 )
1150 1150 coreconfigitem(
1151 1151 b'experimental',
1152 1152 b'extendedheader.similarity',
1153 1153 default=False,
1154 1154 )
1155 1155 coreconfigitem(
1156 1156 b'experimental',
1157 1157 b'graphshorten',
1158 1158 default=False,
1159 1159 )
1160 1160 coreconfigitem(
1161 1161 b'experimental',
1162 1162 b'graphstyle.parent',
1163 1163 default=dynamicdefault,
1164 1164 )
1165 1165 coreconfigitem(
1166 1166 b'experimental',
1167 1167 b'graphstyle.missing',
1168 1168 default=dynamicdefault,
1169 1169 )
1170 1170 coreconfigitem(
1171 1171 b'experimental',
1172 1172 b'graphstyle.grandparent',
1173 1173 default=dynamicdefault,
1174 1174 )
1175 1175 coreconfigitem(
1176 1176 b'experimental',
1177 1177 b'hook-track-tags',
1178 1178 default=False,
1179 1179 )
1180 1180 coreconfigitem(
1181 1181 b'experimental',
1182 1182 b'httppostargs',
1183 1183 default=False,
1184 1184 )
1185 1185 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1186 1186 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1187 1187
1188 1188 coreconfigitem(
1189 1189 b'experimental',
1190 1190 b'obsmarkers-exchange-debug',
1191 1191 default=False,
1192 1192 )
1193 1193 coreconfigitem(
1194 1194 b'experimental',
1195 1195 b'remotenames',
1196 1196 default=False,
1197 1197 )
1198 1198 coreconfigitem(
1199 1199 b'experimental',
1200 1200 b'removeemptydirs',
1201 1201 default=True,
1202 1202 )
1203 1203 coreconfigitem(
1204 1204 b'experimental',
1205 1205 b'revert.interactive.select-to-keep',
1206 1206 default=False,
1207 1207 )
1208 1208 coreconfigitem(
1209 1209 b'experimental',
1210 1210 b'revisions.prefixhexnode',
1211 1211 default=False,
1212 1212 )
1213 1213 # "out of experimental" todo list.
1214 1214 #
1215 1215 # * include management of a persistent nodemap in the main docket
1216 1216 # * enforce a "no-truncate" policy for mmap safety
1217 1217 # - for censoring operation
1218 1218 # - for stripping operation
1219 1219 # - for rollback operation
1220 1220 # * proper streaming (race free) of the docket file
1221 1221 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1222 1222 # * Exchange-wise, we will also need to do something more efficient than
1223 1223 # keeping references to the affected revlogs, especially memory-wise when
1224 1224 # rewriting sidedata.
1225 1225 # * introduce a proper solution to reduce the number of filelog related files.
1226 1226 # * use caching for reading sidedata (similar to what we do for data).
1227 1227 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1228 1228 # * Improvement to consider
1229 1229 # - avoid compression header in chunk using the default compression?
1230 1230 # - forbid "inline" compression mode entirely?
1231 1231 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1232 1232 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1233 1233 # - keep track of chain base or size (probably not that useful anymore)
1234 1234 coreconfigitem(
1235 1235 b'experimental',
1236 1236 b'revlogv2',
1237 1237 default=None,
1238 1238 )
1239 1239 coreconfigitem(
1240 1240 b'experimental',
1241 1241 b'revisions.disambiguatewithin',
1242 1242 default=None,
1243 1243 )
1244 1244 coreconfigitem(
1245 1245 b'experimental',
1246 1246 b'rust.index',
1247 1247 default=False,
1248 1248 )
1249 1249 coreconfigitem(
1250 1250 b'experimental',
1251 1251 b'server.allow-hidden-access',
1252 1252 default=list,
1253 1253 )
1254 1254 coreconfigitem(
1255 1255 b'experimental',
1256 1256 b'server.filesdata.recommended-batch-size',
1257 1257 default=50000,
1258 1258 )
1259 1259 coreconfigitem(
1260 1260 b'experimental',
1261 1261 b'server.manifestdata.recommended-batch-size',
1262 1262 default=100000,
1263 1263 )
1264 1264 coreconfigitem(
1265 1265 b'experimental',
1266 1266 b'server.stream-narrow-clones',
1267 1267 default=False,
1268 1268 )
1269 1269 coreconfigitem(
1270 1270 b'experimental',
1271 1271 b'single-head-per-branch',
1272 1272 default=False,
1273 1273 )
1274 1274 coreconfigitem(
1275 1275 b'experimental',
1276 1276 b'single-head-per-branch:account-closed-heads',
1277 1277 default=False,
1278 1278 )
1279 1279 coreconfigitem(
1280 1280 b'experimental',
1281 1281 b'single-head-per-branch:public-changes-only',
1282 1282 default=False,
1283 1283 )
1284 1284 coreconfigitem(
1285 1285 b'experimental',
1286 1286 b'sparse-read',
1287 1287 default=False,
1288 1288 )
1289 1289 coreconfigitem(
1290 1290 b'experimental',
1291 1291 b'sparse-read.density-threshold',
1292 1292 default=0.50,
1293 1293 )
1294 1294 coreconfigitem(
1295 1295 b'experimental',
1296 1296 b'sparse-read.min-gap-size',
1297 1297 default=b'65K',
1298 1298 )
1299 1299 coreconfigitem(
1300 1300 b'experimental',
1301 1301 b'stream-v3',
1302 1302 default=False,
1303 1303 )
1304 1304 coreconfigitem(
1305 1305 b'experimental',
1306 1306 b'treemanifest',
1307 1307 default=False,
1308 1308 )
1309 1309 coreconfigitem(
1310 1310 b'experimental',
1311 1311 b'update.atomic-file',
1312 1312 default=False,
1313 1313 )
1314 1314 coreconfigitem(
1315 1315 b'experimental',
1316 1316 b'web.full-garbage-collection-rate',
1317 1317 default=1, # still forcing a full collection on each request
1318 1318 )
1319 1319 coreconfigitem(
1320 1320 b'experimental',
1321 1321 b'worker.wdir-get-thread-safe',
1322 1322 default=False,
1323 1323 )
1324 1324 coreconfigitem(
1325 1325 b'experimental',
1326 1326 b'worker.repository-upgrade',
1327 1327 default=False,
1328 1328 )
1329 1329 coreconfigitem(
1330 1330 b'experimental',
1331 1331 b'xdiff',
1332 1332 default=False,
1333 1333 )
1334 1334 coreconfigitem(
1335 1335 b'extensions',
1336 1336 b'[^:]*',
1337 1337 default=None,
1338 1338 generic=True,
1339 1339 )
1340 1340 coreconfigitem(
1341 1341 b'extensions',
1342 1342 b'[^:]*:required',
1343 1343 default=False,
1344 1344 generic=True,
1345 1345 )
1346 1346 coreconfigitem(
1347 1347 b'extdata',
1348 1348 b'.*',
1349 1349 default=None,
1350 1350 generic=True,
1351 1351 )
1352 1352 coreconfigitem(
1353 1353 b'format',
1354 1354 b'bookmarks-in-store',
1355 1355 default=False,
1356 1356 )
1357 1357 coreconfigitem(
1358 1358 b'format',
1359 1359 b'chunkcachesize',
1360 1360 default=None,
1361 1361 experimental=True,
1362 1362 )
1363 1363 coreconfigitem(
1364 1364 # Enable this dirstate format *when creating a new repository*.
1365 1365 # Which format to use for existing repos is controlled by .hg/requires
1366 1366 b'format',
1367 1367 b'use-dirstate-v2',
1368 1368 default=False,
1369 1369 experimental=True,
1370 1370 alias=[(b'format', b'exp-rc-dirstate-v2')],
1371 1371 )
1372 1372 coreconfigitem(
1373 1373 b'format',
1374 1374 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1375 1375 default=False,
1376 1376 experimental=True,
1377 1377 )
1378 1378 coreconfigitem(
1379 1379 b'format',
1380 1380 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1381 1381 default=False,
1382 1382 experimental=True,
1383 1383 )
1384 1384 coreconfigitem(
1385 1385 b'format',
1386 1386 b'use-dirstate-tracked-hint',
1387 1387 default=False,
1388 1388 experimental=True,
1389 1389 )
1390 1390 coreconfigitem(
1391 1391 b'format',
1392 1392 b'use-dirstate-tracked-hint.version',
1393 1393 default=1,
1394 1394 experimental=True,
1395 1395 )
1396 1396 coreconfigitem(
1397 1397 b'format',
1398 1398 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1399 1399 default=False,
1400 1400 experimental=True,
1401 1401 )
1402 1402 coreconfigitem(
1403 1403 b'format',
1404 1404 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1405 1405 default=False,
1406 1406 experimental=True,
1407 1407 )
1408 1408 coreconfigitem(
1409 1409 b'format',
1410 1410 b'dotencode',
1411 1411 default=True,
1412 1412 )
1413 1413 coreconfigitem(
1414 1414 b'format',
1415 1415 b'generaldelta',
1416 1416 default=False,
1417 1417 experimental=True,
1418 1418 )
1419 1419 coreconfigitem(
1420 1420 b'format',
1421 1421 b'manifestcachesize',
1422 1422 default=None,
1423 1423 experimental=True,
1424 1424 )
1425 1425 coreconfigitem(
1426 1426 b'format',
1427 1427 b'maxchainlen',
1428 1428 default=dynamicdefault,
1429 1429 experimental=True,
1430 1430 )
1431 1431 coreconfigitem(
1432 1432 b'format',
1433 1433 b'obsstore-version',
1434 1434 default=None,
1435 1435 )
1436 1436 coreconfigitem(
1437 1437 b'format',
1438 1438 b'sparse-revlog',
1439 1439 default=True,
1440 1440 )
1441 1441 coreconfigitem(
1442 1442 b'format',
1443 1443 b'revlog-compression',
1444 1444 default=lambda: [b'zstd', b'zlib'],
1445 1445 alias=[(b'experimental', b'format.compression')],
1446 1446 )
1447 1447 # Experimental TODOs:
1448 1448 #
1449 1449 # * Same as for revlogv2 (but for the reduction of the number of files)
1450 1450 # * Actually computing the rank of changesets
1451 1451 # * Improvement to investigate
1452 1452 # - storing .hgtags fnode
1453 1453 # - storing branch related identifier
1454 1454
1455 1455 coreconfigitem(
1456 1456 b'format',
1457 1457 b'exp-use-changelog-v2',
1458 1458 default=None,
1459 1459 experimental=True,
1460 1460 )
1461 1461 coreconfigitem(
1462 1462 b'format',
1463 1463 b'usefncache',
1464 1464 default=True,
1465 1465 )
1466 1466 coreconfigitem(
1467 1467 b'format',
1468 1468 b'usegeneraldelta',
1469 1469 default=True,
1470 1470 )
1471 1471 coreconfigitem(
1472 1472 b'format',
1473 1473 b'usestore',
1474 1474 default=True,
1475 1475 )
1476
1477
1478 def _persistent_nodemap_default():
1479 """compute `use-persistent-nodemap` default value
1480
1481 The feature is disabled unless a fast implementation is available.
1482 """
1483 from . import policy
1484
1485 return policy.importrust('revlog') is not None
1486
1487
1488 1476 coreconfigitem(
1489 1477 b'format',
1490 1478 b'use-persistent-nodemap',
1491 default=_persistent_nodemap_default,
1479 default=dynamicdefault,
1492 1480 )
1493 1481 coreconfigitem(
1494 1482 b'format',
1495 1483 b'exp-use-copies-side-data-changeset',
1496 1484 default=False,
1497 1485 experimental=True,
1498 1486 )
1499 1487 coreconfigitem(
1500 1488 b'format',
1501 1489 b'use-share-safe',
1502 1490 default=True,
1503 1491 )
1504 1492 coreconfigitem(
1505 1493 b'format',
1506 1494 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1507 1495 default=False,
1508 1496 experimental=True,
1509 1497 )
1510 1498 coreconfigitem(
1511 1499 b'format',
1512 1500 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1513 1501 default=False,
1514 1502 experimental=True,
1515 1503 )
1516 1504
1517 1505 # Moving this on by default means we are confident about the scaling of phases.
1518 1506 # This is not garanteed to be the case at the time this message is written.
1519 1507 coreconfigitem(
1520 1508 b'format',
1521 1509 b'use-internal-phase',
1522 1510 default=False,
1523 1511 experimental=True,
1524 1512 )
1525 1513 # The interaction between the archived phase and obsolescence markers needs to
1526 1514 # be sorted out before wider usage of this are to be considered.
1527 1515 #
1528 1516 # At the time this message is written, behavior when archiving obsolete
1529 1517 # changeset differ significantly from stripping. As part of stripping, we also
1530 1518 # remove the obsolescence marker associated to the stripped changesets,
1531 1519 # revealing the precedecessors changesets when applicable. When archiving, we
1532 1520 # don't touch the obsolescence markers, keeping everything hidden. This can
1533 1521 # result in quite confusing situation for people combining exchanging draft
1534 1522 # with the archived phases. As some markers needed by others may be skipped
1535 1523 # during exchange.
1536 1524 coreconfigitem(
1537 1525 b'format',
1538 1526 b'exp-archived-phase',
1539 1527 default=False,
1540 1528 experimental=True,
1541 1529 )
1542 1530 coreconfigitem(
1543 1531 b'shelve',
1544 1532 b'store',
1545 1533 default=b'internal',
1546 1534 experimental=True,
1547 1535 )
1548 1536 coreconfigitem(
1549 1537 b'fsmonitor',
1550 1538 b'warn_when_unused',
1551 1539 default=True,
1552 1540 )
1553 1541 coreconfigitem(
1554 1542 b'fsmonitor',
1555 1543 b'warn_update_file_count',
1556 1544 default=50000,
1557 1545 )
1558 1546 coreconfigitem(
1559 1547 b'fsmonitor',
1560 1548 b'warn_update_file_count_rust',
1561 1549 default=400000,
1562 1550 )
1563 1551 coreconfigitem(
1564 1552 b'help',
1565 1553 br'hidden-command\..*',
1566 1554 default=False,
1567 1555 generic=True,
1568 1556 )
1569 1557 coreconfigitem(
1570 1558 b'help',
1571 1559 br'hidden-topic\..*',
1572 1560 default=False,
1573 1561 generic=True,
1574 1562 )
1575 1563 coreconfigitem(
1576 1564 b'hooks',
1577 1565 b'[^:]*',
1578 1566 default=dynamicdefault,
1579 1567 generic=True,
1580 1568 )
1581 1569 coreconfigitem(
1582 1570 b'hooks',
1583 1571 b'.*:run-with-plain',
1584 1572 default=True,
1585 1573 generic=True,
1586 1574 )
1587 1575 coreconfigitem(
1588 1576 b'hgweb-paths',
1589 1577 b'.*',
1590 1578 default=list,
1591 1579 generic=True,
1592 1580 )
1593 1581 coreconfigitem(
1594 1582 b'hostfingerprints',
1595 1583 b'.*',
1596 1584 default=list,
1597 1585 generic=True,
1598 1586 )
1599 1587 coreconfigitem(
1600 1588 b'hostsecurity',
1601 1589 b'ciphers',
1602 1590 default=None,
1603 1591 )
1604 1592 coreconfigitem(
1605 1593 b'hostsecurity',
1606 1594 b'minimumprotocol',
1607 1595 default=dynamicdefault,
1608 1596 )
1609 1597 coreconfigitem(
1610 1598 b'hostsecurity',
1611 1599 b'.*:minimumprotocol$',
1612 1600 default=dynamicdefault,
1613 1601 generic=True,
1614 1602 )
1615 1603 coreconfigitem(
1616 1604 b'hostsecurity',
1617 1605 b'.*:ciphers$',
1618 1606 default=dynamicdefault,
1619 1607 generic=True,
1620 1608 )
1621 1609 coreconfigitem(
1622 1610 b'hostsecurity',
1623 1611 b'.*:fingerprints$',
1624 1612 default=list,
1625 1613 generic=True,
1626 1614 )
1627 1615 coreconfigitem(
1628 1616 b'hostsecurity',
1629 1617 b'.*:verifycertsfile$',
1630 1618 default=None,
1631 1619 generic=True,
1632 1620 )
1633 1621
1634 1622 coreconfigitem(
1635 1623 b'http_proxy',
1636 1624 b'always',
1637 1625 default=False,
1638 1626 )
1639 1627 coreconfigitem(
1640 1628 b'http_proxy',
1641 1629 b'host',
1642 1630 default=None,
1643 1631 )
1644 1632 coreconfigitem(
1645 1633 b'http_proxy',
1646 1634 b'no',
1647 1635 default=list,
1648 1636 )
1649 1637 coreconfigitem(
1650 1638 b'http_proxy',
1651 1639 b'passwd',
1652 1640 default=None,
1653 1641 )
1654 1642 coreconfigitem(
1655 1643 b'http_proxy',
1656 1644 b'user',
1657 1645 default=None,
1658 1646 )
1659 1647
1660 1648 coreconfigitem(
1661 1649 b'http',
1662 1650 b'timeout',
1663 1651 default=None,
1664 1652 )
1665 1653
1666 1654 coreconfigitem(
1667 1655 b'logtoprocess',
1668 1656 b'commandexception',
1669 1657 default=None,
1670 1658 )
1671 1659 coreconfigitem(
1672 1660 b'logtoprocess',
1673 1661 b'commandfinish',
1674 1662 default=None,
1675 1663 )
1676 1664 coreconfigitem(
1677 1665 b'logtoprocess',
1678 1666 b'command',
1679 1667 default=None,
1680 1668 )
1681 1669 coreconfigitem(
1682 1670 b'logtoprocess',
1683 1671 b'develwarn',
1684 1672 default=None,
1685 1673 )
1686 1674 coreconfigitem(
1687 1675 b'logtoprocess',
1688 1676 b'uiblocked',
1689 1677 default=None,
1690 1678 )
1691 1679 coreconfigitem(
1692 1680 b'merge',
1693 1681 b'checkunknown',
1694 1682 default=b'abort',
1695 1683 )
1696 1684 coreconfigitem(
1697 1685 b'merge',
1698 1686 b'checkignored',
1699 1687 default=b'abort',
1700 1688 )
1701 1689 coreconfigitem(
1702 1690 b'experimental',
1703 1691 b'merge.checkpathconflicts',
1704 1692 default=False,
1705 1693 )
1706 1694 coreconfigitem(
1707 1695 b'merge',
1708 1696 b'followcopies',
1709 1697 default=True,
1710 1698 )
1711 1699 coreconfigitem(
1712 1700 b'merge',
1713 1701 b'on-failure',
1714 1702 default=b'continue',
1715 1703 )
1716 1704 coreconfigitem(
1717 1705 b'merge',
1718 1706 b'preferancestor',
1719 1707 default=lambda: [b'*'],
1720 1708 experimental=True,
1721 1709 )
1722 1710 coreconfigitem(
1723 1711 b'merge',
1724 1712 b'strict-capability-check',
1725 1713 default=False,
1726 1714 )
1727 1715 coreconfigitem(
1728 1716 b'merge',
1729 1717 b'disable-partial-tools',
1730 1718 default=False,
1731 1719 experimental=True,
1732 1720 )
1733 1721 coreconfigitem(
1734 1722 b'partial-merge-tools',
1735 1723 b'.*',
1736 1724 default=None,
1737 1725 generic=True,
1738 1726 experimental=True,
1739 1727 )
1740 1728 coreconfigitem(
1741 1729 b'partial-merge-tools',
1742 1730 br'.*\.patterns',
1743 1731 default=dynamicdefault,
1744 1732 generic=True,
1745 1733 priority=-1,
1746 1734 experimental=True,
1747 1735 )
1748 1736 coreconfigitem(
1749 1737 b'partial-merge-tools',
1750 1738 br'.*\.executable$',
1751 1739 default=dynamicdefault,
1752 1740 generic=True,
1753 1741 priority=-1,
1754 1742 experimental=True,
1755 1743 )
1756 1744 coreconfigitem(
1757 1745 b'partial-merge-tools',
1758 1746 br'.*\.order',
1759 1747 default=0,
1760 1748 generic=True,
1761 1749 priority=-1,
1762 1750 experimental=True,
1763 1751 )
1764 1752 coreconfigitem(
1765 1753 b'partial-merge-tools',
1766 1754 br'.*\.args',
1767 1755 default=b"$local $base $other",
1768 1756 generic=True,
1769 1757 priority=-1,
1770 1758 experimental=True,
1771 1759 )
1772 1760 coreconfigitem(
1773 1761 b'partial-merge-tools',
1774 1762 br'.*\.disable',
1775 1763 default=False,
1776 1764 generic=True,
1777 1765 priority=-1,
1778 1766 experimental=True,
1779 1767 )
1780 1768 coreconfigitem(
1781 1769 b'merge-tools',
1782 1770 b'.*',
1783 1771 default=None,
1784 1772 generic=True,
1785 1773 )
1786 1774 coreconfigitem(
1787 1775 b'merge-tools',
1788 1776 br'.*\.args$',
1789 1777 default=b"$local $base $other",
1790 1778 generic=True,
1791 1779 priority=-1,
1792 1780 )
1793 1781 coreconfigitem(
1794 1782 b'merge-tools',
1795 1783 br'.*\.binary$',
1796 1784 default=False,
1797 1785 generic=True,
1798 1786 priority=-1,
1799 1787 )
1800 1788 coreconfigitem(
1801 1789 b'merge-tools',
1802 1790 br'.*\.check$',
1803 1791 default=list,
1804 1792 generic=True,
1805 1793 priority=-1,
1806 1794 )
1807 1795 coreconfigitem(
1808 1796 b'merge-tools',
1809 1797 br'.*\.checkchanged$',
1810 1798 default=False,
1811 1799 generic=True,
1812 1800 priority=-1,
1813 1801 )
1814 1802 coreconfigitem(
1815 1803 b'merge-tools',
1816 1804 br'.*\.executable$',
1817 1805 default=dynamicdefault,
1818 1806 generic=True,
1819 1807 priority=-1,
1820 1808 )
1821 1809 coreconfigitem(
1822 1810 b'merge-tools',
1823 1811 br'.*\.fixeol$',
1824 1812 default=False,
1825 1813 generic=True,
1826 1814 priority=-1,
1827 1815 )
1828 1816 coreconfigitem(
1829 1817 b'merge-tools',
1830 1818 br'.*\.gui$',
1831 1819 default=False,
1832 1820 generic=True,
1833 1821 priority=-1,
1834 1822 )
1835 1823 coreconfigitem(
1836 1824 b'merge-tools',
1837 1825 br'.*\.mergemarkers$',
1838 1826 default=b'basic',
1839 1827 generic=True,
1840 1828 priority=-1,
1841 1829 )
1842 1830 coreconfigitem(
1843 1831 b'merge-tools',
1844 1832 br'.*\.mergemarkertemplate$',
1845 1833 default=dynamicdefault, # take from command-templates.mergemarker
1846 1834 generic=True,
1847 1835 priority=-1,
1848 1836 )
1849 1837 coreconfigitem(
1850 1838 b'merge-tools',
1851 1839 br'.*\.priority$',
1852 1840 default=0,
1853 1841 generic=True,
1854 1842 priority=-1,
1855 1843 )
1856 1844 coreconfigitem(
1857 1845 b'merge-tools',
1858 1846 br'.*\.premerge$',
1859 1847 default=dynamicdefault,
1860 1848 generic=True,
1861 1849 priority=-1,
1862 1850 )
1863 1851 coreconfigitem(
1864 1852 b'merge-tools',
1865 1853 br'.*\.regappend$',
1866 1854 default=b"",
1867 1855 generic=True,
1868 1856 priority=-1,
1869 1857 )
1870 1858 coreconfigitem(
1871 1859 b'merge-tools',
1872 1860 br'.*\.symlink$',
1873 1861 default=False,
1874 1862 generic=True,
1875 1863 priority=-1,
1876 1864 )
1877 1865 coreconfigitem(
1878 1866 b'pager',
1879 1867 b'attend-.*',
1880 1868 default=dynamicdefault,
1881 1869 generic=True,
1882 1870 )
1883 1871 coreconfigitem(
1884 1872 b'pager',
1885 1873 b'ignore',
1886 1874 default=list,
1887 1875 )
1888 1876 coreconfigitem(
1889 1877 b'pager',
1890 1878 b'pager',
1891 1879 default=dynamicdefault,
1892 1880 )
1893 1881 coreconfigitem(
1894 1882 b'patch',
1895 1883 b'eol',
1896 1884 default=b'strict',
1897 1885 )
1898 1886 coreconfigitem(
1899 1887 b'patch',
1900 1888 b'fuzz',
1901 1889 default=2,
1902 1890 )
1903 1891 coreconfigitem(
1904 1892 b'paths',
1905 1893 b'default',
1906 1894 default=None,
1907 1895 )
1908 1896 coreconfigitem(
1909 1897 b'paths',
1910 1898 b'default-push',
1911 1899 default=None,
1912 1900 )
1913 1901 coreconfigitem(
1914 1902 b'paths',
1915 1903 b'[^:]*',
1916 1904 default=None,
1917 1905 generic=True,
1918 1906 )
1919 1907 coreconfigitem(
1920 1908 b'paths',
1921 1909 b'.*:bookmarks.mode',
1922 1910 default='default',
1923 1911 generic=True,
1924 1912 )
1925 1913 coreconfigitem(
1926 1914 b'paths',
1927 1915 b'.*:multi-urls',
1928 1916 default=False,
1929 1917 generic=True,
1930 1918 )
1931 1919 coreconfigitem(
1932 1920 b'paths',
1933 1921 b'.*:pushrev',
1934 1922 default=None,
1935 1923 generic=True,
1936 1924 )
1937 1925 coreconfigitem(
1938 1926 b'paths',
1939 1927 b'.*:pushurl',
1940 1928 default=None,
1941 1929 generic=True,
1942 1930 )
1943 1931 coreconfigitem(
1944 1932 b'paths',
1945 1933 b'.*:pulled-delta-reuse-policy',
1946 1934 default=None,
1947 1935 generic=True,
1948 1936 )
1949 1937 coreconfigitem(
1950 1938 b'phases',
1951 1939 b'checksubrepos',
1952 1940 default=b'follow',
1953 1941 )
1954 1942 coreconfigitem(
1955 1943 b'phases',
1956 1944 b'new-commit',
1957 1945 default=b'draft',
1958 1946 )
1959 1947 coreconfigitem(
1960 1948 b'phases',
1961 1949 b'publish',
1962 1950 default=True,
1963 1951 )
1964 1952 coreconfigitem(
1965 1953 b'profiling',
1966 1954 b'enabled',
1967 1955 default=False,
1968 1956 )
1969 1957 coreconfigitem(
1970 1958 b'profiling',
1971 1959 b'format',
1972 1960 default=b'text',
1973 1961 )
1974 1962 coreconfigitem(
1975 1963 b'profiling',
1976 1964 b'freq',
1977 1965 default=1000,
1978 1966 )
1979 1967 coreconfigitem(
1980 1968 b'profiling',
1981 1969 b'limit',
1982 1970 default=30,
1983 1971 )
1984 1972 coreconfigitem(
1985 1973 b'profiling',
1986 1974 b'nested',
1987 1975 default=0,
1988 1976 )
1989 1977 coreconfigitem(
1990 1978 b'profiling',
1991 1979 b'output',
1992 1980 default=None,
1993 1981 )
1994 1982 coreconfigitem(
1995 1983 b'profiling',
1996 1984 b'showmax',
1997 1985 default=0.999,
1998 1986 )
1999 1987 coreconfigitem(
2000 1988 b'profiling',
2001 1989 b'showmin',
2002 1990 default=dynamicdefault,
2003 1991 )
2004 1992 coreconfigitem(
2005 1993 b'profiling',
2006 1994 b'showtime',
2007 1995 default=True,
2008 1996 )
2009 1997 coreconfigitem(
2010 1998 b'profiling',
2011 1999 b'sort',
2012 2000 default=b'inlinetime',
2013 2001 )
2014 2002 coreconfigitem(
2015 2003 b'profiling',
2016 2004 b'statformat',
2017 2005 default=b'hotpath',
2018 2006 )
2019 2007 coreconfigitem(
2020 2008 b'profiling',
2021 2009 b'time-track',
2022 2010 default=dynamicdefault,
2023 2011 )
2024 2012 coreconfigitem(
2025 2013 b'profiling',
2026 2014 b'type',
2027 2015 default=b'stat',
2028 2016 )
2029 2017 coreconfigitem(
2030 2018 b'progress',
2031 2019 b'assume-tty',
2032 2020 default=False,
2033 2021 )
2034 2022 coreconfigitem(
2035 2023 b'progress',
2036 2024 b'changedelay',
2037 2025 default=1,
2038 2026 )
2039 2027 coreconfigitem(
2040 2028 b'progress',
2041 2029 b'clear-complete',
2042 2030 default=True,
2043 2031 )
2044 2032 coreconfigitem(
2045 2033 b'progress',
2046 2034 b'debug',
2047 2035 default=False,
2048 2036 )
2049 2037 coreconfigitem(
2050 2038 b'progress',
2051 2039 b'delay',
2052 2040 default=3,
2053 2041 )
2054 2042 coreconfigitem(
2055 2043 b'progress',
2056 2044 b'disable',
2057 2045 default=False,
2058 2046 )
2059 2047 coreconfigitem(
2060 2048 b'progress',
2061 2049 b'estimateinterval',
2062 2050 default=60.0,
2063 2051 )
2064 2052 coreconfigitem(
2065 2053 b'progress',
2066 2054 b'format',
2067 2055 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
2068 2056 )
2069 2057 coreconfigitem(
2070 2058 b'progress',
2071 2059 b'refresh',
2072 2060 default=0.1,
2073 2061 )
2074 2062 coreconfigitem(
2075 2063 b'progress',
2076 2064 b'width',
2077 2065 default=dynamicdefault,
2078 2066 )
2079 2067 coreconfigitem(
2080 2068 b'pull',
2081 2069 b'confirm',
2082 2070 default=False,
2083 2071 )
2084 2072 coreconfigitem(
2085 2073 b'push',
2086 2074 b'pushvars.server',
2087 2075 default=False,
2088 2076 )
2089 2077 coreconfigitem(
2090 2078 b'rewrite',
2091 2079 b'backup-bundle',
2092 2080 default=True,
2093 2081 alias=[(b'ui', b'history-editing-backup')],
2094 2082 )
2095 2083 coreconfigitem(
2096 2084 b'rewrite',
2097 2085 b'update-timestamp',
2098 2086 default=False,
2099 2087 )
2100 2088 coreconfigitem(
2101 2089 b'rewrite',
2102 2090 b'empty-successor',
2103 2091 default=b'skip',
2104 2092 experimental=True,
2105 2093 )
2106 2094 # experimental as long as format.use-dirstate-v2 is.
2107 2095 coreconfigitem(
2108 2096 b'storage',
2109 2097 b'dirstate-v2.slow-path',
2110 2098 default=b"abort",
2111 2099 experimental=True,
2112 2100 )
2113 2101 coreconfigitem(
2114 2102 b'storage',
2115 2103 b'new-repo-backend',
2116 2104 default=b'revlogv1',
2117 2105 experimental=True,
2118 2106 )
2119 2107 coreconfigitem(
2120 2108 b'storage',
2121 2109 b'revlog.optimize-delta-parent-choice',
2122 2110 default=True,
2123 2111 alias=[(b'format', b'aggressivemergedeltas')],
2124 2112 )
2125 2113 coreconfigitem(
2126 2114 b'storage',
2127 2115 b'revlog.delta-parent-search.candidate-group-chunk-size',
2128 2116 default=20,
2129 2117 )
2130 2118 coreconfigitem(
2131 2119 b'storage',
2132 2120 b'revlog.issue6528.fix-incoming',
2133 2121 default=True,
2134 2122 )
2135 2123 # experimental as long as rust is experimental (or a C version is implemented)
2136 2124 coreconfigitem(
2137 2125 b'storage',
2138 2126 b'revlog.persistent-nodemap.mmap',
2139 2127 default=True,
2140 2128 )
2141 2129 # experimental as long as format.use-persistent-nodemap is.
2142 2130 coreconfigitem(
2143 2131 b'storage',
2144 2132 b'revlog.persistent-nodemap.slow-path',
2145 2133 default=b"abort",
2146 2134 )
2147 2135
2148 2136 coreconfigitem(
2149 2137 b'storage',
2150 2138 b'revlog.reuse-external-delta',
2151 2139 default=True,
2152 2140 )
2153 2141 # This option is True unless `format.generaldelta` is set.
2154 2142 coreconfigitem(
2155 2143 b'storage',
2156 2144 b'revlog.reuse-external-delta-parent',
2157 2145 default=None,
2158 2146 )
2159 2147 coreconfigitem(
2160 2148 b'storage',
2161 2149 b'revlog.zlib.level',
2162 2150 default=None,
2163 2151 )
2164 2152 coreconfigitem(
2165 2153 b'storage',
2166 2154 b'revlog.zstd.level',
2167 2155 default=None,
2168 2156 )
2169 2157 coreconfigitem(
2170 2158 b'server',
2171 2159 b'bookmarks-pushkey-compat',
2172 2160 default=True,
2173 2161 )
2174 2162 coreconfigitem(
2175 2163 b'server',
2176 2164 b'bundle1',
2177 2165 default=True,
2178 2166 )
2179 2167 coreconfigitem(
2180 2168 b'server',
2181 2169 b'bundle1gd',
2182 2170 default=None,
2183 2171 )
2184 2172 coreconfigitem(
2185 2173 b'server',
2186 2174 b'bundle1.pull',
2187 2175 default=None,
2188 2176 )
2189 2177 coreconfigitem(
2190 2178 b'server',
2191 2179 b'bundle1gd.pull',
2192 2180 default=None,
2193 2181 )
2194 2182 coreconfigitem(
2195 2183 b'server',
2196 2184 b'bundle1.push',
2197 2185 default=None,
2198 2186 )
2199 2187 coreconfigitem(
2200 2188 b'server',
2201 2189 b'bundle1gd.push',
2202 2190 default=None,
2203 2191 )
2204 2192 coreconfigitem(
2205 2193 b'server',
2206 2194 b'bundle2.stream',
2207 2195 default=True,
2208 2196 alias=[(b'experimental', b'bundle2.stream')],
2209 2197 )
2210 2198 coreconfigitem(
2211 2199 b'server',
2212 2200 b'compressionengines',
2213 2201 default=list,
2214 2202 )
2215 2203 coreconfigitem(
2216 2204 b'server',
2217 2205 b'concurrent-push-mode',
2218 2206 default=b'check-related',
2219 2207 )
2220 2208 coreconfigitem(
2221 2209 b'server',
2222 2210 b'disablefullbundle',
2223 2211 default=False,
2224 2212 )
2225 2213 coreconfigitem(
2226 2214 b'server',
2227 2215 b'maxhttpheaderlen',
2228 2216 default=1024,
2229 2217 )
2230 2218 coreconfigitem(
2231 2219 b'server',
2232 2220 b'pullbundle',
2233 2221 default=True,
2234 2222 )
2235 2223 coreconfigitem(
2236 2224 b'server',
2237 2225 b'preferuncompressed',
2238 2226 default=False,
2239 2227 )
2240 2228 coreconfigitem(
2241 2229 b'server',
2242 2230 b'streamunbundle',
2243 2231 default=False,
2244 2232 )
2245 2233 coreconfigitem(
2246 2234 b'server',
2247 2235 b'uncompressed',
2248 2236 default=True,
2249 2237 )
2250 2238 coreconfigitem(
2251 2239 b'server',
2252 2240 b'uncompressedallowsecret',
2253 2241 default=False,
2254 2242 )
2255 2243 coreconfigitem(
2256 2244 b'server',
2257 2245 b'view',
2258 2246 default=b'served',
2259 2247 )
2260 2248 coreconfigitem(
2261 2249 b'server',
2262 2250 b'validate',
2263 2251 default=False,
2264 2252 )
2265 2253 coreconfigitem(
2266 2254 b'server',
2267 2255 b'zliblevel',
2268 2256 default=-1,
2269 2257 )
2270 2258 coreconfigitem(
2271 2259 b'server',
2272 2260 b'zstdlevel',
2273 2261 default=3,
2274 2262 )
2275 2263 coreconfigitem(
2276 2264 b'share',
2277 2265 b'pool',
2278 2266 default=None,
2279 2267 )
2280 2268 coreconfigitem(
2281 2269 b'share',
2282 2270 b'poolnaming',
2283 2271 default=b'identity',
2284 2272 )
2285 2273 coreconfigitem(
2286 2274 b'share',
2287 2275 b'safe-mismatch.source-not-safe',
2288 2276 default=b'abort',
2289 2277 )
2290 2278 coreconfigitem(
2291 2279 b'share',
2292 2280 b'safe-mismatch.source-safe',
2293 2281 default=b'abort',
2294 2282 )
2295 2283 coreconfigitem(
2296 2284 b'share',
2297 2285 b'safe-mismatch.source-not-safe.warn',
2298 2286 default=True,
2299 2287 )
2300 2288 coreconfigitem(
2301 2289 b'share',
2302 2290 b'safe-mismatch.source-safe.warn',
2303 2291 default=True,
2304 2292 )
2305 2293 coreconfigitem(
2306 2294 b'share',
2307 2295 b'safe-mismatch.source-not-safe:verbose-upgrade',
2308 2296 default=True,
2309 2297 )
2310 2298 coreconfigitem(
2311 2299 b'share',
2312 2300 b'safe-mismatch.source-safe:verbose-upgrade',
2313 2301 default=True,
2314 2302 )
2315 2303 coreconfigitem(
2316 2304 b'shelve',
2317 2305 b'maxbackups',
2318 2306 default=10,
2319 2307 )
2320 2308 coreconfigitem(
2321 2309 b'smtp',
2322 2310 b'host',
2323 2311 default=None,
2324 2312 )
2325 2313 coreconfigitem(
2326 2314 b'smtp',
2327 2315 b'local_hostname',
2328 2316 default=None,
2329 2317 )
2330 2318 coreconfigitem(
2331 2319 b'smtp',
2332 2320 b'password',
2333 2321 default=None,
2334 2322 )
2335 2323 coreconfigitem(
2336 2324 b'smtp',
2337 2325 b'port',
2338 2326 default=dynamicdefault,
2339 2327 )
2340 2328 coreconfigitem(
2341 2329 b'smtp',
2342 2330 b'tls',
2343 2331 default=b'none',
2344 2332 )
2345 2333 coreconfigitem(
2346 2334 b'smtp',
2347 2335 b'username',
2348 2336 default=None,
2349 2337 )
2350 2338 coreconfigitem(
2351 2339 b'sparse',
2352 2340 b'missingwarning',
2353 2341 default=True,
2354 2342 experimental=True,
2355 2343 )
2356 2344 coreconfigitem(
2357 2345 b'subrepos',
2358 2346 b'allowed',
2359 2347 default=dynamicdefault, # to make backporting simpler
2360 2348 )
2361 2349 coreconfigitem(
2362 2350 b'subrepos',
2363 2351 b'hg:allowed',
2364 2352 default=dynamicdefault,
2365 2353 )
2366 2354 coreconfigitem(
2367 2355 b'subrepos',
2368 2356 b'git:allowed',
2369 2357 default=dynamicdefault,
2370 2358 )
2371 2359 coreconfigitem(
2372 2360 b'subrepos',
2373 2361 b'svn:allowed',
2374 2362 default=dynamicdefault,
2375 2363 )
2376 2364 coreconfigitem(
2377 2365 b'templates',
2378 2366 b'.*',
2379 2367 default=None,
2380 2368 generic=True,
2381 2369 )
2382 2370 coreconfigitem(
2383 2371 b'templateconfig',
2384 2372 b'.*',
2385 2373 default=dynamicdefault,
2386 2374 generic=True,
2387 2375 )
2388 2376 coreconfigitem(
2389 2377 b'trusted',
2390 2378 b'groups',
2391 2379 default=list,
2392 2380 )
2393 2381 coreconfigitem(
2394 2382 b'trusted',
2395 2383 b'users',
2396 2384 default=list,
2397 2385 )
2398 2386 coreconfigitem(
2399 2387 b'ui',
2400 2388 b'_usedassubrepo',
2401 2389 default=False,
2402 2390 )
2403 2391 coreconfigitem(
2404 2392 b'ui',
2405 2393 b'allowemptycommit',
2406 2394 default=False,
2407 2395 )
2408 2396 coreconfigitem(
2409 2397 b'ui',
2410 2398 b'archivemeta',
2411 2399 default=True,
2412 2400 )
2413 2401 coreconfigitem(
2414 2402 b'ui',
2415 2403 b'askusername',
2416 2404 default=False,
2417 2405 )
2418 2406 coreconfigitem(
2419 2407 b'ui',
2420 2408 b'available-memory',
2421 2409 default=None,
2422 2410 )
2423 2411
2424 2412 coreconfigitem(
2425 2413 b'ui',
2426 2414 b'clonebundlefallback',
2427 2415 default=False,
2428 2416 )
2429 2417 coreconfigitem(
2430 2418 b'ui',
2431 2419 b'clonebundleprefers',
2432 2420 default=list,
2433 2421 )
2434 2422 coreconfigitem(
2435 2423 b'ui',
2436 2424 b'clonebundles',
2437 2425 default=True,
2438 2426 )
2439 2427 coreconfigitem(
2440 2428 b'ui',
2441 2429 b'color',
2442 2430 default=b'auto',
2443 2431 )
2444 2432 coreconfigitem(
2445 2433 b'ui',
2446 2434 b'commitsubrepos',
2447 2435 default=False,
2448 2436 )
2449 2437 coreconfigitem(
2450 2438 b'ui',
2451 2439 b'debug',
2452 2440 default=False,
2453 2441 )
2454 2442 coreconfigitem(
2455 2443 b'ui',
2456 2444 b'debugger',
2457 2445 default=None,
2458 2446 )
2459 2447 coreconfigitem(
2460 2448 b'ui',
2461 2449 b'editor',
2462 2450 default=dynamicdefault,
2463 2451 )
2464 2452 coreconfigitem(
2465 2453 b'ui',
2466 2454 b'detailed-exit-code',
2467 2455 default=False,
2468 2456 experimental=True,
2469 2457 )
2470 2458 coreconfigitem(
2471 2459 b'ui',
2472 2460 b'fallbackencoding',
2473 2461 default=None,
2474 2462 )
2475 2463 coreconfigitem(
2476 2464 b'ui',
2477 2465 b'forcecwd',
2478 2466 default=None,
2479 2467 )
2480 2468 coreconfigitem(
2481 2469 b'ui',
2482 2470 b'forcemerge',
2483 2471 default=None,
2484 2472 )
2485 2473 coreconfigitem(
2486 2474 b'ui',
2487 2475 b'formatdebug',
2488 2476 default=False,
2489 2477 )
2490 2478 coreconfigitem(
2491 2479 b'ui',
2492 2480 b'formatjson',
2493 2481 default=False,
2494 2482 )
2495 2483 coreconfigitem(
2496 2484 b'ui',
2497 2485 b'formatted',
2498 2486 default=None,
2499 2487 )
2500 2488 coreconfigitem(
2501 2489 b'ui',
2502 2490 b'interactive',
2503 2491 default=None,
2504 2492 )
2505 2493 coreconfigitem(
2506 2494 b'ui',
2507 2495 b'interface',
2508 2496 default=None,
2509 2497 )
2510 2498 coreconfigitem(
2511 2499 b'ui',
2512 2500 b'interface.chunkselector',
2513 2501 default=None,
2514 2502 )
2515 2503 coreconfigitem(
2516 2504 b'ui',
2517 2505 b'large-file-limit',
2518 2506 default=10 * (2 ** 20),
2519 2507 )
2520 2508 coreconfigitem(
2521 2509 b'ui',
2522 2510 b'logblockedtimes',
2523 2511 default=False,
2524 2512 )
2525 2513 coreconfigitem(
2526 2514 b'ui',
2527 2515 b'merge',
2528 2516 default=None,
2529 2517 )
2530 2518 coreconfigitem(
2531 2519 b'ui',
2532 2520 b'mergemarkers',
2533 2521 default=b'basic',
2534 2522 )
2535 2523 coreconfigitem(
2536 2524 b'ui',
2537 2525 b'message-output',
2538 2526 default=b'stdio',
2539 2527 )
2540 2528 coreconfigitem(
2541 2529 b'ui',
2542 2530 b'nontty',
2543 2531 default=False,
2544 2532 )
2545 2533 coreconfigitem(
2546 2534 b'ui',
2547 2535 b'origbackuppath',
2548 2536 default=None,
2549 2537 )
2550 2538 coreconfigitem(
2551 2539 b'ui',
2552 2540 b'paginate',
2553 2541 default=True,
2554 2542 )
2555 2543 coreconfigitem(
2556 2544 b'ui',
2557 2545 b'patch',
2558 2546 default=None,
2559 2547 )
2560 2548 coreconfigitem(
2561 2549 b'ui',
2562 2550 b'portablefilenames',
2563 2551 default=b'warn',
2564 2552 )
2565 2553 coreconfigitem(
2566 2554 b'ui',
2567 2555 b'promptecho',
2568 2556 default=False,
2569 2557 )
2570 2558 coreconfigitem(
2571 2559 b'ui',
2572 2560 b'quiet',
2573 2561 default=False,
2574 2562 )
2575 2563 coreconfigitem(
2576 2564 b'ui',
2577 2565 b'quietbookmarkmove',
2578 2566 default=False,
2579 2567 )
2580 2568 coreconfigitem(
2581 2569 b'ui',
2582 2570 b'relative-paths',
2583 2571 default=b'legacy',
2584 2572 )
2585 2573 coreconfigitem(
2586 2574 b'ui',
2587 2575 b'remotecmd',
2588 2576 default=b'hg',
2589 2577 )
2590 2578 coreconfigitem(
2591 2579 b'ui',
2592 2580 b'report_untrusted',
2593 2581 default=True,
2594 2582 )
2595 2583 coreconfigitem(
2596 2584 b'ui',
2597 2585 b'rollback',
2598 2586 default=True,
2599 2587 )
2600 2588 coreconfigitem(
2601 2589 b'ui',
2602 2590 b'signal-safe-lock',
2603 2591 default=True,
2604 2592 )
2605 2593 coreconfigitem(
2606 2594 b'ui',
2607 2595 b'slash',
2608 2596 default=False,
2609 2597 )
2610 2598 coreconfigitem(
2611 2599 b'ui',
2612 2600 b'ssh',
2613 2601 default=b'ssh',
2614 2602 )
2615 2603 coreconfigitem(
2616 2604 b'ui',
2617 2605 b'ssherrorhint',
2618 2606 default=None,
2619 2607 )
2620 2608 coreconfigitem(
2621 2609 b'ui',
2622 2610 b'statuscopies',
2623 2611 default=False,
2624 2612 )
2625 2613 coreconfigitem(
2626 2614 b'ui',
2627 2615 b'strict',
2628 2616 default=False,
2629 2617 )
2630 2618 coreconfigitem(
2631 2619 b'ui',
2632 2620 b'style',
2633 2621 default=b'',
2634 2622 )
2635 2623 coreconfigitem(
2636 2624 b'ui',
2637 2625 b'supportcontact',
2638 2626 default=None,
2639 2627 )
2640 2628 coreconfigitem(
2641 2629 b'ui',
2642 2630 b'textwidth',
2643 2631 default=78,
2644 2632 )
2645 2633 coreconfigitem(
2646 2634 b'ui',
2647 2635 b'timeout',
2648 2636 default=b'600',
2649 2637 )
2650 2638 coreconfigitem(
2651 2639 b'ui',
2652 2640 b'timeout.warn',
2653 2641 default=0,
2654 2642 )
2655 2643 coreconfigitem(
2656 2644 b'ui',
2657 2645 b'timestamp-output',
2658 2646 default=False,
2659 2647 )
2660 2648 coreconfigitem(
2661 2649 b'ui',
2662 2650 b'traceback',
2663 2651 default=False,
2664 2652 )
2665 2653 coreconfigitem(
2666 2654 b'ui',
2667 2655 b'tweakdefaults',
2668 2656 default=False,
2669 2657 )
2670 2658 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2671 2659 coreconfigitem(
2672 2660 b'ui',
2673 2661 b'verbose',
2674 2662 default=False,
2675 2663 )
2676 2664 coreconfigitem(
2677 2665 b'verify',
2678 2666 b'skipflags',
2679 2667 default=0,
2680 2668 )
2681 2669 coreconfigitem(
2682 2670 b'web',
2683 2671 b'allowbz2',
2684 2672 default=False,
2685 2673 )
2686 2674 coreconfigitem(
2687 2675 b'web',
2688 2676 b'allowgz',
2689 2677 default=False,
2690 2678 )
2691 2679 coreconfigitem(
2692 2680 b'web',
2693 2681 b'allow-pull',
2694 2682 alias=[(b'web', b'allowpull')],
2695 2683 default=True,
2696 2684 )
2697 2685 coreconfigitem(
2698 2686 b'web',
2699 2687 b'allow-push',
2700 2688 alias=[(b'web', b'allow_push')],
2701 2689 default=list,
2702 2690 )
2703 2691 coreconfigitem(
2704 2692 b'web',
2705 2693 b'allowzip',
2706 2694 default=False,
2707 2695 )
2708 2696 coreconfigitem(
2709 2697 b'web',
2710 2698 b'archivesubrepos',
2711 2699 default=False,
2712 2700 )
2713 2701 coreconfigitem(
2714 2702 b'web',
2715 2703 b'cache',
2716 2704 default=True,
2717 2705 )
2718 2706 coreconfigitem(
2719 2707 b'web',
2720 2708 b'comparisoncontext',
2721 2709 default=5,
2722 2710 )
2723 2711 coreconfigitem(
2724 2712 b'web',
2725 2713 b'contact',
2726 2714 default=None,
2727 2715 )
2728 2716 coreconfigitem(
2729 2717 b'web',
2730 2718 b'deny_push',
2731 2719 default=list,
2732 2720 )
2733 2721 coreconfigitem(
2734 2722 b'web',
2735 2723 b'guessmime',
2736 2724 default=False,
2737 2725 )
2738 2726 coreconfigitem(
2739 2727 b'web',
2740 2728 b'hidden',
2741 2729 default=False,
2742 2730 )
2743 2731 coreconfigitem(
2744 2732 b'web',
2745 2733 b'labels',
2746 2734 default=list,
2747 2735 )
2748 2736 coreconfigitem(
2749 2737 b'web',
2750 2738 b'logoimg',
2751 2739 default=b'hglogo.png',
2752 2740 )
2753 2741 coreconfigitem(
2754 2742 b'web',
2755 2743 b'logourl',
2756 2744 default=b'https://mercurial-scm.org/',
2757 2745 )
2758 2746 coreconfigitem(
2759 2747 b'web',
2760 2748 b'accesslog',
2761 2749 default=b'-',
2762 2750 )
2763 2751 coreconfigitem(
2764 2752 b'web',
2765 2753 b'address',
2766 2754 default=b'',
2767 2755 )
2768 2756 coreconfigitem(
2769 2757 b'web',
2770 2758 b'allow-archive',
2771 2759 alias=[(b'web', b'allow_archive')],
2772 2760 default=list,
2773 2761 )
2774 2762 coreconfigitem(
2775 2763 b'web',
2776 2764 b'allow_read',
2777 2765 default=list,
2778 2766 )
2779 2767 coreconfigitem(
2780 2768 b'web',
2781 2769 b'baseurl',
2782 2770 default=None,
2783 2771 )
2784 2772 coreconfigitem(
2785 2773 b'web',
2786 2774 b'cacerts',
2787 2775 default=None,
2788 2776 )
2789 2777 coreconfigitem(
2790 2778 b'web',
2791 2779 b'certificate',
2792 2780 default=None,
2793 2781 )
2794 2782 coreconfigitem(
2795 2783 b'web',
2796 2784 b'collapse',
2797 2785 default=False,
2798 2786 )
2799 2787 coreconfigitem(
2800 2788 b'web',
2801 2789 b'csp',
2802 2790 default=None,
2803 2791 )
2804 2792 coreconfigitem(
2805 2793 b'web',
2806 2794 b'deny_read',
2807 2795 default=list,
2808 2796 )
2809 2797 coreconfigitem(
2810 2798 b'web',
2811 2799 b'descend',
2812 2800 default=True,
2813 2801 )
2814 2802 coreconfigitem(
2815 2803 b'web',
2816 2804 b'description',
2817 2805 default=b"",
2818 2806 )
2819 2807 coreconfigitem(
2820 2808 b'web',
2821 2809 b'encoding',
2822 2810 default=lambda: encoding.encoding,
2823 2811 )
2824 2812 coreconfigitem(
2825 2813 b'web',
2826 2814 b'errorlog',
2827 2815 default=b'-',
2828 2816 )
2829 2817 coreconfigitem(
2830 2818 b'web',
2831 2819 b'ipv6',
2832 2820 default=False,
2833 2821 )
2834 2822 coreconfigitem(
2835 2823 b'web',
2836 2824 b'maxchanges',
2837 2825 default=10,
2838 2826 )
2839 2827 coreconfigitem(
2840 2828 b'web',
2841 2829 b'maxfiles',
2842 2830 default=10,
2843 2831 )
2844 2832 coreconfigitem(
2845 2833 b'web',
2846 2834 b'maxshortchanges',
2847 2835 default=60,
2848 2836 )
2849 2837 coreconfigitem(
2850 2838 b'web',
2851 2839 b'motd',
2852 2840 default=b'',
2853 2841 )
2854 2842 coreconfigitem(
2855 2843 b'web',
2856 2844 b'name',
2857 2845 default=dynamicdefault,
2858 2846 )
2859 2847 coreconfigitem(
2860 2848 b'web',
2861 2849 b'port',
2862 2850 default=8000,
2863 2851 )
2864 2852 coreconfigitem(
2865 2853 b'web',
2866 2854 b'prefix',
2867 2855 default=b'',
2868 2856 )
2869 2857 coreconfigitem(
2870 2858 b'web',
2871 2859 b'push_ssl',
2872 2860 default=True,
2873 2861 )
2874 2862 coreconfigitem(
2875 2863 b'web',
2876 2864 b'refreshinterval',
2877 2865 default=20,
2878 2866 )
2879 2867 coreconfigitem(
2880 2868 b'web',
2881 2869 b'server-header',
2882 2870 default=None,
2883 2871 )
2884 2872 coreconfigitem(
2885 2873 b'web',
2886 2874 b'static',
2887 2875 default=None,
2888 2876 )
2889 2877 coreconfigitem(
2890 2878 b'web',
2891 2879 b'staticurl',
2892 2880 default=None,
2893 2881 )
2894 2882 coreconfigitem(
2895 2883 b'web',
2896 2884 b'stripes',
2897 2885 default=1,
2898 2886 )
2899 2887 coreconfigitem(
2900 2888 b'web',
2901 2889 b'style',
2902 2890 default=b'paper',
2903 2891 )
2904 2892 coreconfigitem(
2905 2893 b'web',
2906 2894 b'templates',
2907 2895 default=None,
2908 2896 )
2909 2897 coreconfigitem(
2910 2898 b'web',
2911 2899 b'view',
2912 2900 default=b'served',
2913 2901 experimental=True,
2914 2902 )
2915 2903 coreconfigitem(
2916 2904 b'worker',
2917 2905 b'backgroundclose',
2918 2906 default=dynamicdefault,
2919 2907 )
2920 2908 # Windows defaults to a limit of 512 open files. A buffer of 128
2921 2909 # should give us enough headway.
2922 2910 coreconfigitem(
2923 2911 b'worker',
2924 2912 b'backgroundclosemaxqueue',
2925 2913 default=384,
2926 2914 )
2927 2915 coreconfigitem(
2928 2916 b'worker',
2929 2917 b'backgroundcloseminfilecount',
2930 2918 default=2048,
2931 2919 )
2932 2920 coreconfigitem(
2933 2921 b'worker',
2934 2922 b'backgroundclosethreadcount',
2935 2923 default=4,
2936 2924 )
2937 2925 coreconfigitem(
2938 2926 b'worker',
2939 2927 b'enabled',
2940 2928 default=True,
2941 2929 )
2942 2930 coreconfigitem(
2943 2931 b'worker',
2944 2932 b'numcpus',
2945 2933 default=None,
2946 2934 )
2947 2935
2948 2936 # Rebase related configuration moved to core because other extension are doing
2949 2937 # strange things. For example, shelve import the extensions to reuse some bit
2950 2938 # without formally loading it.
2951 2939 coreconfigitem(
2952 2940 b'commands',
2953 2941 b'rebase.requiredest',
2954 2942 default=False,
2955 2943 )
2956 2944 coreconfigitem(
2957 2945 b'experimental',
2958 2946 b'rebaseskipobsolete',
2959 2947 default=True,
2960 2948 )
2961 2949 coreconfigitem(
2962 2950 b'rebase',
2963 2951 b'singletransaction',
2964 2952 default=False,
2965 2953 )
2966 2954 coreconfigitem(
2967 2955 b'rebase',
2968 2956 b'experimental.inmemory',
2969 2957 default=False,
2970 2958 )
2971 2959
2972 2960 # This setting controls creation of a rebase_source extra field
2973 2961 # during rebase. When False, no such field is created. This is
2974 2962 # useful eg for incrementally converting changesets and then
2975 2963 # rebasing them onto an existing repo.
2976 2964 # WARNING: this is an advanced setting reserved for people who know
2977 2965 # exactly what they are doing. Misuse of this setting can easily
2978 2966 # result in obsmarker cycles and a vivid headache.
2979 2967 coreconfigitem(
2980 2968 b'rebase',
2981 2969 b'store-source',
2982 2970 default=True,
2983 2971 experimental=True,
2984 2972 )
@@ -1,4038 +1,4043 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from .pycompat import (
32 32 delattr,
33 33 getattr,
34 34 )
35 35 from . import (
36 36 bookmarks,
37 37 branchmap,
38 38 bundle2,
39 39 bundlecaches,
40 40 changegroup,
41 41 color,
42 42 commit,
43 43 context,
44 44 dirstate,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 policy,
61 62 pushkey,
62 63 pycompat,
63 64 rcutil,
64 65 repoview,
65 66 requirements as requirementsmod,
66 67 revlog,
67 68 revset,
68 69 revsetlang,
69 70 scmutil,
70 71 sparse,
71 72 store as storemod,
72 73 subrepoutil,
73 74 tags as tagsmod,
74 75 transaction,
75 76 txnutil,
76 77 util,
77 78 vfs as vfsmod,
78 79 wireprototypes,
79 80 )
80 81
81 82 from .interfaces import (
82 83 repository,
83 84 util as interfaceutil,
84 85 )
85 86
86 87 from .utils import (
87 88 hashutil,
88 89 procutil,
89 90 stringutil,
90 91 urlutil,
91 92 )
92 93
93 94 from .revlogutils import (
94 95 concurrency_checker as revlogchecker,
95 96 constants as revlogconst,
96 97 sidedata as sidedatamod,
97 98 )
98 99
99 100 release = lockmod.release
100 101 urlerr = util.urlerr
101 102 urlreq = util.urlreq
102 103
103 104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 106 )
106 107
107 108 # set of (path, vfs-location) tuples. vfs-location is:
108 109 # - 'plain for vfs relative paths
109 110 # - '' for svfs relative paths
110 111 _cachedfiles = set()
111 112
112 113
113 114 class _basefilecache(scmutil.filecache):
114 115 """All filecache usage on repo are done for logic that should be unfiltered"""
115 116
116 117 def __get__(self, repo, type=None):
117 118 if repo is None:
118 119 return self
119 120 # proxy to unfiltered __dict__ since filtered repo has no entry
120 121 unfi = repo.unfiltered()
121 122 try:
122 123 return unfi.__dict__[self.sname]
123 124 except KeyError:
124 125 pass
125 126 return super(_basefilecache, self).__get__(unfi, type)
126 127
127 128 def set(self, repo, value):
128 129 return super(_basefilecache, self).set(repo.unfiltered(), value)
129 130
130 131
131 132 class repofilecache(_basefilecache):
132 133 """filecache for files in .hg but outside of .hg/store"""
133 134
134 135 def __init__(self, *paths):
135 136 super(repofilecache, self).__init__(*paths)
136 137 for path in paths:
137 138 _cachedfiles.add((path, b'plain'))
138 139
139 140 def join(self, obj, fname):
140 141 return obj.vfs.join(fname)
141 142
142 143
143 144 class storecache(_basefilecache):
144 145 """filecache for files in the store"""
145 146
146 147 def __init__(self, *paths):
147 148 super(storecache, self).__init__(*paths)
148 149 for path in paths:
149 150 _cachedfiles.add((path, b''))
150 151
151 152 def join(self, obj, fname):
152 153 return obj.sjoin(fname)
153 154
154 155
155 156 class changelogcache(storecache):
156 157 """filecache for the changelog"""
157 158
158 159 def __init__(self):
159 160 super(changelogcache, self).__init__()
160 161 _cachedfiles.add((b'00changelog.i', b''))
161 162 _cachedfiles.add((b'00changelog.n', b''))
162 163
163 164 def tracked_paths(self, obj):
164 165 paths = [self.join(obj, b'00changelog.i')]
165 166 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 167 paths.append(self.join(obj, b'00changelog.n'))
167 168 return paths
168 169
169 170
170 171 class manifestlogcache(storecache):
171 172 """filecache for the manifestlog"""
172 173
173 174 def __init__(self):
174 175 super(manifestlogcache, self).__init__()
175 176 _cachedfiles.add((b'00manifest.i', b''))
176 177 _cachedfiles.add((b'00manifest.n', b''))
177 178
178 179 def tracked_paths(self, obj):
179 180 paths = [self.join(obj, b'00manifest.i')]
180 181 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 182 paths.append(self.join(obj, b'00manifest.n'))
182 183 return paths
183 184
184 185
185 186 class mixedrepostorecache(_basefilecache):
186 187 """filecache for a mix files in .hg/store and outside"""
187 188
188 189 def __init__(self, *pathsandlocations):
189 190 # scmutil.filecache only uses the path for passing back into our
190 191 # join(), so we can safely pass a list of paths and locations
191 192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 193 _cachedfiles.update(pathsandlocations)
193 194
194 195 def join(self, obj, fnameandlocation):
195 196 fname, location = fnameandlocation
196 197 if location == b'plain':
197 198 return obj.vfs.join(fname)
198 199 else:
199 200 if location != b'':
200 201 raise error.ProgrammingError(
201 202 b'unexpected location: %s' % location
202 203 )
203 204 return obj.sjoin(fname)
204 205
205 206
206 207 def isfilecached(repo, name):
207 208 """check if a repo has already cached "name" filecache-ed property
208 209
209 210 This returns (cachedobj-or-None, iscached) tuple.
210 211 """
211 212 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 213 if not cacheentry:
213 214 return None, False
214 215 return cacheentry.obj, True
215 216
216 217
217 218 class unfilteredpropertycache(util.propertycache):
218 219 """propertycache that apply to unfiltered repo only"""
219 220
220 221 def __get__(self, repo, type=None):
221 222 unfi = repo.unfiltered()
222 223 if unfi is repo:
223 224 return super(unfilteredpropertycache, self).__get__(unfi)
224 225 return getattr(unfi, self.name)
225 226
226 227
227 228 class filteredpropertycache(util.propertycache):
228 229 """propertycache that must take filtering in account"""
229 230
230 231 def cachevalue(self, obj, value):
231 232 object.__setattr__(obj, self.name, value)
232 233
233 234
234 235 def hasunfilteredcache(repo, name):
235 236 """check if a repo has an unfilteredpropertycache value for <name>"""
236 237 return name in vars(repo.unfiltered())
237 238
238 239
239 240 def unfilteredmethod(orig):
240 241 """decorate method that always need to be run on unfiltered version"""
241 242
242 243 @functools.wraps(orig)
243 244 def wrapper(repo, *args, **kwargs):
244 245 return orig(repo.unfiltered(), *args, **kwargs)
245 246
246 247 return wrapper
247 248
248 249
249 250 moderncaps = {
250 251 b'lookup',
251 252 b'branchmap',
252 253 b'pushkey',
253 254 b'known',
254 255 b'getbundle',
255 256 b'unbundle',
256 257 }
257 258 legacycaps = moderncaps.union({b'changegroupsubset'})
258 259
259 260
260 261 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 262 class localcommandexecutor:
262 263 def __init__(self, peer):
263 264 self._peer = peer
264 265 self._sent = False
265 266 self._closed = False
266 267
267 268 def __enter__(self):
268 269 return self
269 270
270 271 def __exit__(self, exctype, excvalue, exctb):
271 272 self.close()
272 273
273 274 def callcommand(self, command, args):
274 275 if self._sent:
275 276 raise error.ProgrammingError(
276 277 b'callcommand() cannot be used after sendcommands()'
277 278 )
278 279
279 280 if self._closed:
280 281 raise error.ProgrammingError(
281 282 b'callcommand() cannot be used after close()'
282 283 )
283 284
284 285 # We don't need to support anything fancy. Just call the named
285 286 # method on the peer and return a resolved future.
286 287 fn = getattr(self._peer, pycompat.sysstr(command))
287 288
288 289 f = futures.Future()
289 290
290 291 try:
291 292 result = fn(**pycompat.strkwargs(args))
292 293 except Exception:
293 294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 295 else:
295 296 f.set_result(result)
296 297
297 298 return f
298 299
299 300 def sendcommands(self):
300 301 self._sent = True
301 302
302 303 def close(self):
303 304 self._closed = True
304 305
305 306
306 307 @interfaceutil.implementer(repository.ipeercommands)
307 308 class localpeer(repository.peer):
308 309 '''peer for a local repo; reflects only the most recent API'''
309 310
310 311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
311 312 super(localpeer, self).__init__(
312 313 repo.ui, path=path, remotehidden=remotehidden
313 314 )
314 315
315 316 if caps is None:
316 317 caps = moderncaps.copy()
317 318 if remotehidden:
318 319 self._repo = repo.filtered(b'served.hidden')
319 320 else:
320 321 self._repo = repo.filtered(b'served')
321 322 if repo._wanted_sidedata:
322 323 formatted = bundle2.format_remote_wanted_sidedata(repo)
323 324 caps.add(b'exp-wanted-sidedata=' + formatted)
324 325
325 326 self._caps = repo._restrictcapabilities(caps)
326 327
327 328 # Begin of _basepeer interface.
328 329
329 330 def url(self):
330 331 return self._repo.url()
331 332
332 333 def local(self):
333 334 return self._repo
334 335
335 336 def canpush(self):
336 337 return True
337 338
338 339 def close(self):
339 340 self._repo.close()
340 341
341 342 # End of _basepeer interface.
342 343
343 344 # Begin of _basewirecommands interface.
344 345
345 346 def branchmap(self):
346 347 return self._repo.branchmap()
347 348
348 349 def capabilities(self):
349 350 return self._caps
350 351
351 352 def get_cached_bundle_inline(self, path):
352 353 # not needed with local peer
353 354 raise NotImplementedError
354 355
355 356 def clonebundles(self):
356 357 return bundlecaches.get_manifest(self._repo)
357 358
358 359 def debugwireargs(self, one, two, three=None, four=None, five=None):
359 360 """Used to test argument passing over the wire"""
360 361 return b"%s %s %s %s %s" % (
361 362 one,
362 363 two,
363 364 pycompat.bytestr(three),
364 365 pycompat.bytestr(four),
365 366 pycompat.bytestr(five),
366 367 )
367 368
368 369 def getbundle(
369 370 self,
370 371 source,
371 372 heads=None,
372 373 common=None,
373 374 bundlecaps=None,
374 375 remote_sidedata=None,
375 376 **kwargs
376 377 ):
377 378 chunks = exchange.getbundlechunks(
378 379 self._repo,
379 380 source,
380 381 heads=heads,
381 382 common=common,
382 383 bundlecaps=bundlecaps,
383 384 remote_sidedata=remote_sidedata,
384 385 **kwargs
385 386 )[1]
386 387 cb = util.chunkbuffer(chunks)
387 388
388 389 if exchange.bundle2requested(bundlecaps):
389 390 # When requesting a bundle2, getbundle returns a stream to make the
390 391 # wire level function happier. We need to build a proper object
391 392 # from it in local peer.
392 393 return bundle2.getunbundler(self.ui, cb)
393 394 else:
394 395 return changegroup.getunbundler(b'01', cb, None)
395 396
396 397 def heads(self):
397 398 return self._repo.heads()
398 399
399 400 def known(self, nodes):
400 401 return self._repo.known(nodes)
401 402
402 403 def listkeys(self, namespace):
403 404 return self._repo.listkeys(namespace)
404 405
405 406 def lookup(self, key):
406 407 return self._repo.lookup(key)
407 408
408 409 def pushkey(self, namespace, key, old, new):
409 410 return self._repo.pushkey(namespace, key, old, new)
410 411
411 412 def stream_out(self):
412 413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
413 414
414 415 def unbundle(self, bundle, heads, url):
415 416 """apply a bundle on a repo
416 417
417 418 This function handles the repo locking itself."""
418 419 try:
419 420 try:
420 421 bundle = exchange.readbundle(self.ui, bundle, None)
421 422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
422 423 if util.safehasattr(ret, 'getchunks'):
423 424 # This is a bundle20 object, turn it into an unbundler.
424 425 # This little dance should be dropped eventually when the
425 426 # API is finally improved.
426 427 stream = util.chunkbuffer(ret.getchunks())
427 428 ret = bundle2.getunbundler(self.ui, stream)
428 429 return ret
429 430 except Exception as exc:
430 431 # If the exception contains output salvaged from a bundle2
431 432 # reply, we need to make sure it is printed before continuing
432 433 # to fail. So we build a bundle2 with such output and consume
433 434 # it directly.
434 435 #
435 436 # This is not very elegant but allows a "simple" solution for
436 437 # issue4594
437 438 output = getattr(exc, '_bundle2salvagedoutput', ())
438 439 if output:
439 440 bundler = bundle2.bundle20(self._repo.ui)
440 441 for out in output:
441 442 bundler.addpart(out)
442 443 stream = util.chunkbuffer(bundler.getchunks())
443 444 b = bundle2.getunbundler(self.ui, stream)
444 445 bundle2.processbundle(self._repo, b)
445 446 raise
446 447 except error.PushRaced as exc:
447 448 raise error.ResponseError(
448 449 _(b'push failed:'), stringutil.forcebytestr(exc)
449 450 )
450 451
451 452 # End of _basewirecommands interface.
452 453
453 454 # Begin of peer interface.
454 455
455 456 def commandexecutor(self):
456 457 return localcommandexecutor(self)
457 458
458 459 # End of peer interface.
459 460
460 461
461 462 @interfaceutil.implementer(repository.ipeerlegacycommands)
462 463 class locallegacypeer(localpeer):
463 464 """peer extension which implements legacy methods too; used for tests with
464 465 restricted capabilities"""
465 466
466 467 def __init__(self, repo, path=None, remotehidden=False):
467 468 super(locallegacypeer, self).__init__(
468 469 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 470 )
470 471
471 472 # Begin of baselegacywirecommands interface.
472 473
473 474 def between(self, pairs):
474 475 return self._repo.between(pairs)
475 476
476 477 def branches(self, nodes):
477 478 return self._repo.branches(nodes)
478 479
479 480 def changegroup(self, nodes, source):
480 481 outgoing = discovery.outgoing(
481 482 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 483 )
483 484 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484 485
485 486 def changegroupsubset(self, bases, heads, source):
486 487 outgoing = discovery.outgoing(
487 488 self._repo, missingroots=bases, ancestorsof=heads
488 489 )
489 490 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490 491
491 492 # End of baselegacywirecommands interface.
492 493
493 494
494 495 # Functions receiving (ui, features) that extensions can register to impact
495 496 # the ability to load repositories with custom requirements. Only
496 497 # functions defined in loaded extensions are called.
497 498 #
498 499 # The function receives a set of requirement strings that the repository
499 500 # is capable of opening. Functions will typically add elements to the
500 501 # set to reflect that the extension knows how to handle that requirements.
501 502 featuresetupfuncs = set()
502 503
503 504
504 505 def _getsharedvfs(hgvfs, requirements):
505 506 """returns the vfs object pointing to root of shared source
506 507 repo for a shared repository
507 508
508 509 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 510 requirements is a set of requirements of current repo (shared one)
510 511 """
511 512 # The ``shared`` or ``relshared`` requirements indicate the
512 513 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 514 # This is an absolute path for ``shared`` and relative to
514 515 # ``.hg/`` for ``relshared``.
515 516 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 517 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 518 sharedpath = util.normpath(hgvfs.join(sharedpath))
518 519
519 520 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520 521
521 522 if not sharedvfs.exists():
522 523 raise error.RepoError(
523 524 _(b'.hg/sharedpath points to nonexistent directory %s')
524 525 % sharedvfs.base
525 526 )
526 527 return sharedvfs
527 528
528 529
529 530 def _readrequires(vfs, allowmissing):
530 531 """reads the require file present at root of this vfs
531 532 and return a set of requirements
532 533
533 534 If allowmissing is True, we suppress FileNotFoundError if raised"""
534 535 # requires file contains a newline-delimited list of
535 536 # features/capabilities the opener (us) must have in order to use
536 537 # the repository. This file was introduced in Mercurial 0.9.2,
537 538 # which means very old repositories may not have one. We assume
538 539 # a missing file translates to no requirements.
539 540 read = vfs.tryread if allowmissing else vfs.read
540 541 return set(read(b'requires').splitlines())
541 542
542 543
543 544 def makelocalrepository(baseui, path: bytes, intents=None):
544 545 """Create a local repository object.
545 546
546 547 Given arguments needed to construct a local repository, this function
547 548 performs various early repository loading functionality (such as
548 549 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
549 550 the repository can be opened, derives a type suitable for representing
550 551 that repository, and returns an instance of it.
551 552
552 553 The returned object conforms to the ``repository.completelocalrepository``
553 554 interface.
554 555
555 556 The repository type is derived by calling a series of factory functions
556 557 for each aspect/interface of the final repository. These are defined by
557 558 ``REPO_INTERFACES``.
558 559
559 560 Each factory function is called to produce a type implementing a specific
560 561 interface. The cumulative list of returned types will be combined into a
561 562 new type and that type will be instantiated to represent the local
562 563 repository.
563 564
564 565 The factory functions each receive various state that may be consulted
565 566 as part of deriving a type.
566 567
567 568 Extensions should wrap these factory functions to customize repository type
568 569 creation. Note that an extension's wrapped function may be called even if
569 570 that extension is not loaded for the repo being constructed. Extensions
570 571 should check if their ``__name__`` appears in the
571 572 ``extensionmodulenames`` set passed to the factory function and no-op if
572 573 not.
573 574 """
574 575 ui = baseui.copy()
575 576 # Prevent copying repo configuration.
576 577 ui.copy = baseui.copy
577 578
578 579 # Working directory VFS rooted at repository root.
579 580 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
580 581
581 582 # Main VFS for .hg/ directory.
582 583 hgpath = wdirvfs.join(b'.hg')
583 584 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
584 585 # Whether this repository is shared one or not
585 586 shared = False
586 587 # If this repository is shared, vfs pointing to shared repo
587 588 sharedvfs = None
588 589
589 590 # The .hg/ path should exist and should be a directory. All other
590 591 # cases are errors.
591 592 if not hgvfs.isdir():
592 593 try:
593 594 hgvfs.stat()
594 595 except FileNotFoundError:
595 596 pass
596 597 except ValueError as e:
597 598 # Can be raised on Python 3.8 when path is invalid.
598 599 raise error.Abort(
599 600 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
600 601 )
601 602
602 603 raise error.RepoError(_(b'repository %s not found') % path)
603 604
604 605 requirements = _readrequires(hgvfs, True)
605 606 shared = (
606 607 requirementsmod.SHARED_REQUIREMENT in requirements
607 608 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
608 609 )
609 610 storevfs = None
610 611 if shared:
611 612 # This is a shared repo
612 613 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 614 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
614 615 else:
615 616 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
616 617
617 618 # if .hg/requires contains the sharesafe requirement, it means
618 619 # there exists a `.hg/store/requires` too and we should read it
619 620 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
620 621 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
621 622 # is not present, refer checkrequirementscompat() for that
622 623 #
623 624 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
624 625 # repository was shared the old way. We check the share source .hg/requires
625 626 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
626 627 # to be reshared
627 628 hint = _(b"see `hg help config.format.use-share-safe` for more information")
628 629 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
629 630 if (
630 631 shared
631 632 and requirementsmod.SHARESAFE_REQUIREMENT
632 633 not in _readrequires(sharedvfs, True)
633 634 ):
634 635 mismatch_warn = ui.configbool(
635 636 b'share', b'safe-mismatch.source-not-safe.warn'
636 637 )
637 638 mismatch_config = ui.config(
638 639 b'share', b'safe-mismatch.source-not-safe'
639 640 )
640 641 mismatch_verbose_upgrade = ui.configbool(
641 642 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
642 643 )
643 644 if mismatch_config in (
644 645 b'downgrade-allow',
645 646 b'allow',
646 647 b'downgrade-abort',
647 648 ):
648 649 # prevent cyclic import localrepo -> upgrade -> localrepo
649 650 from . import upgrade
650 651
651 652 upgrade.downgrade_share_to_non_safe(
652 653 ui,
653 654 hgvfs,
654 655 sharedvfs,
655 656 requirements,
656 657 mismatch_config,
657 658 mismatch_warn,
658 659 mismatch_verbose_upgrade,
659 660 )
660 661 elif mismatch_config == b'abort':
661 662 raise error.Abort(
662 663 _(b"share source does not support share-safe requirement"),
663 664 hint=hint,
664 665 )
665 666 else:
666 667 raise error.Abort(
667 668 _(
668 669 b"share-safe mismatch with source.\nUnrecognized"
669 670 b" value '%s' of `share.safe-mismatch.source-not-safe`"
670 671 b" set."
671 672 )
672 673 % mismatch_config,
673 674 hint=hint,
674 675 )
675 676 else:
676 677 requirements |= _readrequires(storevfs, False)
677 678 elif shared:
678 679 sourcerequires = _readrequires(sharedvfs, False)
679 680 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
680 681 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
681 682 mismatch_warn = ui.configbool(
682 683 b'share', b'safe-mismatch.source-safe.warn'
683 684 )
684 685 mismatch_verbose_upgrade = ui.configbool(
685 686 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
686 687 )
687 688 if mismatch_config in (
688 689 b'upgrade-allow',
689 690 b'allow',
690 691 b'upgrade-abort',
691 692 ):
692 693 # prevent cyclic import localrepo -> upgrade -> localrepo
693 694 from . import upgrade
694 695
695 696 upgrade.upgrade_share_to_safe(
696 697 ui,
697 698 hgvfs,
698 699 storevfs,
699 700 requirements,
700 701 mismatch_config,
701 702 mismatch_warn,
702 703 mismatch_verbose_upgrade,
703 704 )
704 705 elif mismatch_config == b'abort':
705 706 raise error.Abort(
706 707 _(
707 708 b'version mismatch: source uses share-safe'
708 709 b' functionality while the current share does not'
709 710 ),
710 711 hint=hint,
711 712 )
712 713 else:
713 714 raise error.Abort(
714 715 _(
715 716 b"share-safe mismatch with source.\nUnrecognized"
716 717 b" value '%s' of `share.safe-mismatch.source-safe` set."
717 718 )
718 719 % mismatch_config,
719 720 hint=hint,
720 721 )
721 722
722 723 # The .hg/hgrc file may load extensions or contain config options
723 724 # that influence repository construction. Attempt to load it and
724 725 # process any new extensions that it may have pulled in.
725 726 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
726 727 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
727 728 extensions.loadall(ui)
728 729 extensions.populateui(ui)
729 730
730 731 # Set of module names of extensions loaded for this repository.
731 732 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
732 733
733 734 supportedrequirements = gathersupportedrequirements(ui)
734 735
735 736 # We first validate the requirements are known.
736 737 ensurerequirementsrecognized(requirements, supportedrequirements)
737 738
738 739 # Then we validate that the known set is reasonable to use together.
739 740 ensurerequirementscompatible(ui, requirements)
740 741
741 742 # TODO there are unhandled edge cases related to opening repositories with
742 743 # shared storage. If storage is shared, we should also test for requirements
743 744 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
744 745 # that repo, as that repo may load extensions needed to open it. This is a
745 746 # bit complicated because we don't want the other hgrc to overwrite settings
746 747 # in this hgrc.
747 748 #
748 749 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
749 750 # file when sharing repos. But if a requirement is added after the share is
750 751 # performed, thereby introducing a new requirement for the opener, we may
751 752 # will not see that and could encounter a run-time error interacting with
752 753 # that shared store since it has an unknown-to-us requirement.
753 754
754 755 # At this point, we know we should be capable of opening the repository.
755 756 # Now get on with doing that.
756 757
757 758 features = set()
758 759
759 760 # The "store" part of the repository holds versioned data. How it is
760 761 # accessed is determined by various requirements. If `shared` or
761 762 # `relshared` requirements are present, this indicates current repository
762 763 # is a share and store exists in path mentioned in `.hg/sharedpath`
763 764 if shared:
764 765 storebasepath = sharedvfs.base
765 766 cachepath = sharedvfs.join(b'cache')
766 767 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
767 768 else:
768 769 storebasepath = hgvfs.base
769 770 cachepath = hgvfs.join(b'cache')
770 771 wcachepath = hgvfs.join(b'wcache')
771 772
772 773 # The store has changed over time and the exact layout is dictated by
773 774 # requirements. The store interface abstracts differences across all
774 775 # of them.
775 776 store = makestore(
776 777 requirements,
777 778 storebasepath,
778 779 lambda base: vfsmod.vfs(base, cacheaudited=True),
779 780 )
780 781 hgvfs.createmode = store.createmode
781 782
782 783 storevfs = store.vfs
783 784 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
784 785
785 786 if (
786 787 requirementsmod.REVLOGV2_REQUIREMENT in requirements
787 788 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
788 789 ):
789 790 features.add(repository.REPO_FEATURE_SIDE_DATA)
790 791 # the revlogv2 docket introduced race condition that we need to fix
791 792 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
792 793
793 794 # The cache vfs is used to manage cache files.
794 795 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
795 796 cachevfs.createmode = store.createmode
796 797 # The cache vfs is used to manage cache files related to the working copy
797 798 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
798 799 wcachevfs.createmode = store.createmode
799 800
800 801 # Now resolve the type for the repository object. We do this by repeatedly
801 802 # calling a factory function to produces types for specific aspects of the
802 803 # repo's operation. The aggregate returned types are used as base classes
803 804 # for a dynamically-derived type, which will represent our new repository.
804 805
805 806 bases = []
806 807 extrastate = {}
807 808
808 809 for iface, fn in REPO_INTERFACES:
809 810 # We pass all potentially useful state to give extensions tons of
810 811 # flexibility.
811 812 typ = fn()(
812 813 ui=ui,
813 814 intents=intents,
814 815 requirements=requirements,
815 816 features=features,
816 817 wdirvfs=wdirvfs,
817 818 hgvfs=hgvfs,
818 819 store=store,
819 820 storevfs=storevfs,
820 821 storeoptions=storevfs.options,
821 822 cachevfs=cachevfs,
822 823 wcachevfs=wcachevfs,
823 824 extensionmodulenames=extensionmodulenames,
824 825 extrastate=extrastate,
825 826 baseclasses=bases,
826 827 )
827 828
828 829 if not isinstance(typ, type):
829 830 raise error.ProgrammingError(
830 831 b'unable to construct type for %s' % iface
831 832 )
832 833
833 834 bases.append(typ)
834 835
835 836 # type() allows you to use characters in type names that wouldn't be
836 837 # recognized as Python symbols in source code. We abuse that to add
837 838 # rich information about our constructed repo.
838 839 name = pycompat.sysstr(
839 840 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
840 841 )
841 842
842 843 cls = type(name, tuple(bases), {})
843 844
844 845 return cls(
845 846 baseui=baseui,
846 847 ui=ui,
847 848 origroot=path,
848 849 wdirvfs=wdirvfs,
849 850 hgvfs=hgvfs,
850 851 requirements=requirements,
851 852 supportedrequirements=supportedrequirements,
852 853 sharedpath=storebasepath,
853 854 store=store,
854 855 cachevfs=cachevfs,
855 856 wcachevfs=wcachevfs,
856 857 features=features,
857 858 intents=intents,
858 859 )
859 860
860 861
861 862 def loadhgrc(
862 863 ui,
863 864 wdirvfs: vfsmod.vfs,
864 865 hgvfs: vfsmod.vfs,
865 866 requirements,
866 867 sharedvfs: Optional[vfsmod.vfs] = None,
867 868 ):
868 869 """Load hgrc files/content into a ui instance.
869 870
870 871 This is called during repository opening to load any additional
871 872 config files or settings relevant to the current repository.
872 873
873 874 Returns a bool indicating whether any additional configs were loaded.
874 875
875 876 Extensions should monkeypatch this function to modify how per-repo
876 877 configs are loaded. For example, an extension may wish to pull in
877 878 configs from alternate files or sources.
878 879
879 880 sharedvfs is vfs object pointing to source repo if the current one is a
880 881 shared one
881 882 """
882 883 if not rcutil.use_repo_hgrc():
883 884 return False
884 885
885 886 ret = False
886 887 # first load config from shared source if we has to
887 888 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
888 889 try:
889 890 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
890 891 ret = True
891 892 except IOError:
892 893 pass
893 894
894 895 try:
895 896 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
896 897 ret = True
897 898 except IOError:
898 899 pass
899 900
900 901 try:
901 902 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
902 903 ret = True
903 904 except IOError:
904 905 pass
905 906
906 907 return ret
907 908
908 909
909 910 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
910 911 """Perform additional actions after .hg/hgrc is loaded.
911 912
912 913 This function is called during repository loading immediately after
913 914 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
914 915
915 916 The function can be used to validate configs, automatically add
916 917 options (including extensions) based on requirements, etc.
917 918 """
918 919
919 920 # Map of requirements to list of extensions to load automatically when
920 921 # requirement is present.
921 922 autoextensions = {
922 923 b'git': [b'git'],
923 924 b'largefiles': [b'largefiles'],
924 925 b'lfs': [b'lfs'],
925 926 }
926 927
927 928 for requirement, names in sorted(autoextensions.items()):
928 929 if requirement not in requirements:
929 930 continue
930 931
931 932 for name in names:
932 933 if not ui.hasconfig(b'extensions', name):
933 934 ui.setconfig(b'extensions', name, b'', source=b'autoload')
934 935
935 936
936 937 def gathersupportedrequirements(ui):
937 938 """Determine the complete set of recognized requirements."""
938 939 # Start with all requirements supported by this file.
939 940 supported = set(localrepository._basesupported)
940 941
941 942 # Execute ``featuresetupfuncs`` entries if they belong to an extension
942 943 # relevant to this ui instance.
943 944 modules = {m.__name__ for n, m in extensions.extensions(ui)}
944 945
945 946 for fn in featuresetupfuncs:
946 947 if fn.__module__ in modules:
947 948 fn(ui, supported)
948 949
949 950 # Add derived requirements from registered compression engines.
950 951 for name in util.compengines:
951 952 engine = util.compengines[name]
952 953 if engine.available() and engine.revlogheader():
953 954 supported.add(b'exp-compression-%s' % name)
954 955 if engine.name() == b'zstd':
955 956 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
956 957
957 958 return supported
958 959
959 960
960 961 def ensurerequirementsrecognized(requirements, supported):
961 962 """Validate that a set of local requirements is recognized.
962 963
963 964 Receives a set of requirements. Raises an ``error.RepoError`` if there
964 965 exists any requirement in that set that currently loaded code doesn't
965 966 recognize.
966 967
967 968 Returns a set of supported requirements.
968 969 """
969 970 missing = set()
970 971
971 972 for requirement in requirements:
972 973 if requirement in supported:
973 974 continue
974 975
975 976 if not requirement or not requirement[0:1].isalnum():
976 977 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
977 978
978 979 missing.add(requirement)
979 980
980 981 if missing:
981 982 raise error.RequirementError(
982 983 _(b'repository requires features unknown to this Mercurial: %s')
983 984 % b' '.join(sorted(missing)),
984 985 hint=_(
985 986 b'see https://mercurial-scm.org/wiki/MissingRequirement '
986 987 b'for more information'
987 988 ),
988 989 )
989 990
990 991
991 992 def ensurerequirementscompatible(ui, requirements):
992 993 """Validates that a set of recognized requirements is mutually compatible.
993 994
994 995 Some requirements may not be compatible with others or require
995 996 config options that aren't enabled. This function is called during
996 997 repository opening to ensure that the set of requirements needed
997 998 to open a repository is sane and compatible with config options.
998 999
999 1000 Extensions can monkeypatch this function to perform additional
1000 1001 checking.
1001 1002
1002 1003 ``error.RepoError`` should be raised on failure.
1003 1004 """
1004 1005 if (
1005 1006 requirementsmod.SPARSE_REQUIREMENT in requirements
1006 1007 and not sparse.enabled
1007 1008 ):
1008 1009 raise error.RepoError(
1009 1010 _(
1010 1011 b'repository is using sparse feature but '
1011 1012 b'sparse is not enabled; enable the '
1012 1013 b'"sparse" extensions to access'
1013 1014 )
1014 1015 )
1015 1016
1016 1017
1017 1018 def makestore(requirements, path, vfstype):
1018 1019 """Construct a storage object for a repository."""
1019 1020 if requirementsmod.STORE_REQUIREMENT in requirements:
1020 1021 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1021 1022 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1022 1023 return storemod.fncachestore(path, vfstype, dotencode)
1023 1024
1024 1025 return storemod.encodedstore(path, vfstype)
1025 1026
1026 1027 return storemod.basicstore(path, vfstype)
1027 1028
1028 1029
1029 1030 def resolvestorevfsoptions(ui, requirements, features):
1030 1031 """Resolve the options to pass to the store vfs opener.
1031 1032
1032 1033 The returned dict is used to influence behavior of the storage layer.
1033 1034 """
1034 1035 options = {}
1035 1036
1036 1037 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1037 1038 options[b'treemanifest'] = True
1038 1039
1039 1040 # experimental config: format.manifestcachesize
1040 1041 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1041 1042 if manifestcachesize is not None:
1042 1043 options[b'manifestcachesize'] = manifestcachesize
1043 1044
1044 1045 # In the absence of another requirement superseding a revlog-related
1045 1046 # requirement, we have to assume the repo is using revlog version 0.
1046 1047 # This revlog format is super old and we don't bother trying to parse
1047 1048 # opener options for it because those options wouldn't do anything
1048 1049 # meaningful on such old repos.
1049 1050 if (
1050 1051 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1051 1052 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1052 1053 ):
1053 1054 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1054 1055 else: # explicitly mark repo as using revlogv0
1055 1056 options[b'revlogv0'] = True
1056 1057
1057 1058 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1058 1059 options[b'copies-storage'] = b'changeset-sidedata'
1059 1060 else:
1060 1061 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1061 1062 copiesextramode = (b'changeset-only', b'compatibility')
1062 1063 if writecopiesto in copiesextramode:
1063 1064 options[b'copies-storage'] = b'extra'
1064 1065
1065 1066 return options
1066 1067
1067 1068
1068 1069 def resolverevlogstorevfsoptions(ui, requirements, features):
1069 1070 """Resolve opener options specific to revlogs."""
1070 1071
1071 1072 options = {}
1072 1073 options[b'flagprocessors'] = {}
1073 1074
1074 1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 1076 options[b'revlogv1'] = True
1076 1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 1078 options[b'revlogv2'] = True
1078 1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 1080 options[b'changelogv2'] = True
1080 1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082 1083
1083 1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 1085 options[b'generaldelta'] = True
1085 1086
1086 1087 # experimental config: format.chunkcachesize
1087 1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 1089 if chunkcachesize is not None:
1089 1090 options[b'chunkcachesize'] = chunkcachesize
1090 1091
1091 1092 deltabothparents = ui.configbool(
1092 1093 b'storage', b'revlog.optimize-delta-parent-choice'
1093 1094 )
1094 1095 options[b'deltabothparents'] = deltabothparents
1095 1096 dps_cgds = ui.configint(
1096 1097 b'storage',
1097 1098 b'revlog.delta-parent-search.candidate-group-chunk-size',
1098 1099 )
1099 1100 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1100 1101 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1101 1102
1102 1103 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1103 1104 options[b'issue6528.fix-incoming'] = issue6528
1104 1105
1105 1106 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1106 1107 lazydeltabase = False
1107 1108 if lazydelta:
1108 1109 lazydeltabase = ui.configbool(
1109 1110 b'storage', b'revlog.reuse-external-delta-parent'
1110 1111 )
1111 1112 if lazydeltabase is None:
1112 1113 lazydeltabase = not scmutil.gddeltaconfig(ui)
1113 1114 options[b'lazydelta'] = lazydelta
1114 1115 options[b'lazydeltabase'] = lazydeltabase
1115 1116
1116 1117 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1117 1118 if 0 <= chainspan:
1118 1119 options[b'maxdeltachainspan'] = chainspan
1119 1120
1120 1121 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1121 1122 if mmapindexthreshold is not None:
1122 1123 options[b'mmapindexthreshold'] = mmapindexthreshold
1123 1124
1124 1125 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1125 1126 srdensitythres = float(
1126 1127 ui.config(b'experimental', b'sparse-read.density-threshold')
1127 1128 )
1128 1129 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1129 1130 options[b'with-sparse-read'] = withsparseread
1130 1131 options[b'sparse-read-density-threshold'] = srdensitythres
1131 1132 options[b'sparse-read-min-gap-size'] = srmingapsize
1132 1133
1133 1134 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1134 1135 options[b'sparse-revlog'] = sparserevlog
1135 1136 if sparserevlog:
1136 1137 options[b'generaldelta'] = True
1137 1138
1138 1139 maxchainlen = None
1139 1140 if sparserevlog:
1140 1141 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1141 1142 # experimental config: format.maxchainlen
1142 1143 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1143 1144 if maxchainlen is not None:
1144 1145 options[b'maxchainlen'] = maxchainlen
1145 1146
1146 1147 for r in requirements:
1147 1148 # we allow multiple compression engine requirement to co-exist because
1148 1149 # strickly speaking, revlog seems to support mixed compression style.
1149 1150 #
1150 1151 # The compression used for new entries will be "the last one"
1151 1152 prefix = r.startswith
1152 1153 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1153 1154 options[b'compengine'] = r.split(b'-', 2)[2]
1154 1155
1155 1156 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1156 1157 if options[b'zlib.level'] is not None:
1157 1158 if not (0 <= options[b'zlib.level'] <= 9):
1158 1159 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1159 1160 raise error.Abort(msg % options[b'zlib.level'])
1160 1161 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1161 1162 if options[b'zstd.level'] is not None:
1162 1163 if not (0 <= options[b'zstd.level'] <= 22):
1163 1164 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 1165 raise error.Abort(msg % options[b'zstd.level'])
1165 1166
1166 1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1167 1168 options[b'enableellipsis'] = True
1168 1169
1169 1170 if ui.configbool(b'experimental', b'rust.index'):
1170 1171 options[b'rust.index'] = True
1171 1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1172 1173 slow_path = ui.config(
1173 1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1174 1175 )
1175 1176 if slow_path not in (b'allow', b'warn', b'abort'):
1176 1177 default = ui.config_default(
1177 1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1178 1179 )
1179 1180 msg = _(
1180 1181 b'unknown value for config '
1181 1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1182 1183 )
1183 1184 ui.warn(msg % slow_path)
1184 1185 if not ui.quiet:
1185 1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 1187 slow_path = default
1187 1188
1188 1189 msg = _(
1189 1190 b"accessing `persistent-nodemap` repository without associated "
1190 1191 b"fast implementation."
1191 1192 )
1192 1193 hint = _(
1193 1194 b"check `hg help config.format.use-persistent-nodemap` "
1194 1195 b"for details"
1195 1196 )
1196 1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1197 1198 if slow_path == b'warn':
1198 1199 msg = b"warning: " + msg + b'\n'
1199 1200 ui.warn(msg)
1200 1201 if not ui.quiet:
1201 1202 hint = b'(' + hint + b')\n'
1202 1203 ui.warn(hint)
1203 1204 if slow_path == b'abort':
1204 1205 raise error.Abort(msg, hint=hint)
1205 1206 options[b'persistent-nodemap'] = True
1206 1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1207 1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1208 1209 if slow_path not in (b'allow', b'warn', b'abort'):
1209 1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1210 1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1211 1212 ui.warn(msg % slow_path)
1212 1213 if not ui.quiet:
1213 1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1214 1215 slow_path = default
1215 1216
1216 1217 msg = _(
1217 1218 b"accessing `dirstate-v2` repository without associated "
1218 1219 b"fast implementation."
1219 1220 )
1220 1221 hint = _(
1221 1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1222 1223 )
1223 1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1224 1225 if slow_path == b'warn':
1225 1226 msg = b"warning: " + msg + b'\n'
1226 1227 ui.warn(msg)
1227 1228 if not ui.quiet:
1228 1229 hint = b'(' + hint + b')\n'
1229 1230 ui.warn(hint)
1230 1231 if slow_path == b'abort':
1231 1232 raise error.Abort(msg, hint=hint)
1232 1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1233 1234 options[b'persistent-nodemap.mmap'] = True
1234 1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1235 1236 options[b'devel-force-nodemap'] = True
1236 1237
1237 1238 return options
1238 1239
1239 1240
1240 1241 def makemain(**kwargs):
1241 1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1242 1243 return localrepository
1243 1244
1244 1245
1245 1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 1247 class revlogfilestorage:
1247 1248 """File storage when using revlogs."""
1248 1249
1249 1250 def file(self, path):
1250 1251 if path.startswith(b'/'):
1251 1252 path = path[1:]
1252 1253
1253 1254 try_split = (
1254 1255 self.currenttransaction() is not None
1255 1256 or txnutil.mayhavepending(self.root)
1256 1257 )
1257 1258
1258 1259 return filelog.filelog(self.svfs, path, try_split=try_split)
1259 1260
1260 1261
1261 1262 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1262 1263 class revlognarrowfilestorage:
1263 1264 """File storage when using revlogs and narrow files."""
1264 1265
1265 1266 def file(self, path):
1266 1267 if path.startswith(b'/'):
1267 1268 path = path[1:]
1268 1269
1269 1270 try_split = (
1270 1271 self.currenttransaction() is not None
1271 1272 or txnutil.mayhavepending(self.root)
1272 1273 )
1273 1274 return filelog.narrowfilelog(
1274 1275 self.svfs, path, self._storenarrowmatch, try_split=try_split
1275 1276 )
1276 1277
1277 1278
1278 1279 def makefilestorage(requirements, features, **kwargs):
1279 1280 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1280 1281 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1281 1282 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1282 1283
1283 1284 if requirementsmod.NARROW_REQUIREMENT in requirements:
1284 1285 return revlognarrowfilestorage
1285 1286 else:
1286 1287 return revlogfilestorage
1287 1288
1288 1289
1289 1290 # List of repository interfaces and factory functions for them. Each
1290 1291 # will be called in order during ``makelocalrepository()`` to iteratively
1291 1292 # derive the final type for a local repository instance. We capture the
1292 1293 # function as a lambda so we don't hold a reference and the module-level
1293 1294 # functions can be wrapped.
1294 1295 REPO_INTERFACES = [
1295 1296 (repository.ilocalrepositorymain, lambda: makemain),
1296 1297 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1297 1298 ]
1298 1299
1299 1300
1300 1301 @interfaceutil.implementer(repository.ilocalrepositorymain)
1301 1302 class localrepository:
1302 1303 """Main class for representing local repositories.
1303 1304
1304 1305 All local repositories are instances of this class.
1305 1306
1306 1307 Constructed on its own, instances of this class are not usable as
1307 1308 repository objects. To obtain a usable repository object, call
1308 1309 ``hg.repository()``, ``localrepo.instance()``, or
1309 1310 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1310 1311 ``instance()`` adds support for creating new repositories.
1311 1312 ``hg.repository()`` adds more extension integration, including calling
1312 1313 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1313 1314 used.
1314 1315 """
1315 1316
1316 1317 _basesupported = {
1317 1318 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1318 1319 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1319 1320 requirementsmod.CHANGELOGV2_REQUIREMENT,
1320 1321 requirementsmod.COPIESSDC_REQUIREMENT,
1321 1322 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1322 1323 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1323 1324 requirementsmod.DOTENCODE_REQUIREMENT,
1324 1325 requirementsmod.FNCACHE_REQUIREMENT,
1325 1326 requirementsmod.GENERALDELTA_REQUIREMENT,
1326 1327 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1327 1328 requirementsmod.NODEMAP_REQUIREMENT,
1328 1329 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1329 1330 requirementsmod.REVLOGV1_REQUIREMENT,
1330 1331 requirementsmod.REVLOGV2_REQUIREMENT,
1331 1332 requirementsmod.SHARED_REQUIREMENT,
1332 1333 requirementsmod.SHARESAFE_REQUIREMENT,
1333 1334 requirementsmod.SPARSE_REQUIREMENT,
1334 1335 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1335 1336 requirementsmod.STORE_REQUIREMENT,
1336 1337 requirementsmod.TREEMANIFEST_REQUIREMENT,
1337 1338 }
1338 1339
1339 1340 # list of prefix for file which can be written without 'wlock'
1340 1341 # Extensions should extend this list when needed
1341 1342 _wlockfreeprefix = {
1342 1343 # We migh consider requiring 'wlock' for the next
1343 1344 # two, but pretty much all the existing code assume
1344 1345 # wlock is not needed so we keep them excluded for
1345 1346 # now.
1346 1347 b'hgrc',
1347 1348 b'requires',
1348 1349 # XXX cache is a complicatged business someone
1349 1350 # should investigate this in depth at some point
1350 1351 b'cache/',
1351 1352 # XXX bisect was still a bit too messy at the time
1352 1353 # this changeset was introduced. Someone should fix
1353 1354 # the remainig bit and drop this line
1354 1355 b'bisect.state',
1355 1356 }
1356 1357
1357 1358 def __init__(
1358 1359 self,
1359 1360 baseui,
1360 1361 ui,
1361 1362 origroot: bytes,
1362 1363 wdirvfs: vfsmod.vfs,
1363 1364 hgvfs: vfsmod.vfs,
1364 1365 requirements,
1365 1366 supportedrequirements,
1366 1367 sharedpath: bytes,
1367 1368 store,
1368 1369 cachevfs: vfsmod.vfs,
1369 1370 wcachevfs: vfsmod.vfs,
1370 1371 features,
1371 1372 intents=None,
1372 1373 ):
1373 1374 """Create a new local repository instance.
1374 1375
1375 1376 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1376 1377 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1377 1378 object.
1378 1379
1379 1380 Arguments:
1380 1381
1381 1382 baseui
1382 1383 ``ui.ui`` instance that ``ui`` argument was based off of.
1383 1384
1384 1385 ui
1385 1386 ``ui.ui`` instance for use by the repository.
1386 1387
1387 1388 origroot
1388 1389 ``bytes`` path to working directory root of this repository.
1389 1390
1390 1391 wdirvfs
1391 1392 ``vfs.vfs`` rooted at the working directory.
1392 1393
1393 1394 hgvfs
1394 1395 ``vfs.vfs`` rooted at .hg/
1395 1396
1396 1397 requirements
1397 1398 ``set`` of bytestrings representing repository opening requirements.
1398 1399
1399 1400 supportedrequirements
1400 1401 ``set`` of bytestrings representing repository requirements that we
1401 1402 know how to open. May be a supetset of ``requirements``.
1402 1403
1403 1404 sharedpath
1404 1405 ``bytes`` Defining path to storage base directory. Points to a
1405 1406 ``.hg/`` directory somewhere.
1406 1407
1407 1408 store
1408 1409 ``store.basicstore`` (or derived) instance providing access to
1409 1410 versioned storage.
1410 1411
1411 1412 cachevfs
1412 1413 ``vfs.vfs`` used for cache files.
1413 1414
1414 1415 wcachevfs
1415 1416 ``vfs.vfs`` used for cache files related to the working copy.
1416 1417
1417 1418 features
1418 1419 ``set`` of bytestrings defining features/capabilities of this
1419 1420 instance.
1420 1421
1421 1422 intents
1422 1423 ``set`` of system strings indicating what this repo will be used
1423 1424 for.
1424 1425 """
1425 1426 self.baseui = baseui
1426 1427 self.ui = ui
1427 1428 self.origroot = origroot
1428 1429 # vfs rooted at working directory.
1429 1430 self.wvfs = wdirvfs
1430 1431 self.root = wdirvfs.base
1431 1432 # vfs rooted at .hg/. Used to access most non-store paths.
1432 1433 self.vfs = hgvfs
1433 1434 self.path = hgvfs.base
1434 1435 self.requirements = requirements
1435 1436 self.nodeconstants = sha1nodeconstants
1436 1437 self.nullid = self.nodeconstants.nullid
1437 1438 self.supported = supportedrequirements
1438 1439 self.sharedpath = sharedpath
1439 1440 self.store = store
1440 1441 self.cachevfs = cachevfs
1441 1442 self.wcachevfs = wcachevfs
1442 1443 self.features = features
1443 1444
1444 1445 self.filtername = None
1445 1446
1446 1447 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1447 1448 b'devel', b'check-locks'
1448 1449 ):
1449 1450 self.vfs.audit = self._getvfsward(self.vfs.audit)
1450 1451 # A list of callback to shape the phase if no data were found.
1451 1452 # Callback are in the form: func(repo, roots) --> processed root.
1452 1453 # This list it to be filled by extension during repo setup
1453 1454 self._phasedefaults = []
1454 1455
1455 1456 color.setup(self.ui)
1456 1457
1457 1458 self.spath = self.store.path
1458 1459 self.svfs = self.store.vfs
1459 1460 self.sjoin = self.store.join
1460 1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1461 1462 b'devel', b'check-locks'
1462 1463 ):
1463 1464 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1464 1465 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1465 1466 else: # standard vfs
1466 1467 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1467 1468
1468 1469 self._dirstatevalidatewarned = False
1469 1470
1470 1471 self._branchcaches = branchmap.BranchMapCache()
1471 1472 self._revbranchcache = None
1472 1473 self._filterpats = {}
1473 1474 self._datafilters = {}
1474 1475 self._transref = self._lockref = self._wlockref = None
1475 1476
1476 1477 # A cache for various files under .hg/ that tracks file changes,
1477 1478 # (used by the filecache decorator)
1478 1479 #
1479 1480 # Maps a property name to its util.filecacheentry
1480 1481 self._filecache = {}
1481 1482
1482 1483 # hold sets of revision to be filtered
1483 1484 # should be cleared when something might have changed the filter value:
1484 1485 # - new changesets,
1485 1486 # - phase change,
1486 1487 # - new obsolescence marker,
1487 1488 # - working directory parent change,
1488 1489 # - bookmark changes
1489 1490 self.filteredrevcache = {}
1490 1491
1491 1492 self._dirstate = None
1492 1493 # post-dirstate-status hooks
1493 1494 self._postdsstatus = []
1494 1495
1495 1496 self._pending_narrow_pats = None
1496 1497 self._pending_narrow_pats_dirstate = None
1497 1498
1498 1499 # generic mapping between names and nodes
1499 1500 self.names = namespaces.namespaces()
1500 1501
1501 1502 # Key to signature value.
1502 1503 self._sparsesignaturecache = {}
1503 1504 # Signature to cached matcher instance.
1504 1505 self._sparsematchercache = {}
1505 1506
1506 1507 self._extrafilterid = repoview.extrafilter(ui)
1507 1508
1508 1509 self.filecopiesmode = None
1509 1510 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1510 1511 self.filecopiesmode = b'changeset-sidedata'
1511 1512
1512 1513 self._wanted_sidedata = set()
1513 1514 self._sidedata_computers = {}
1514 1515 sidedatamod.set_sidedata_spec_for_repo(self)
1515 1516
1516 1517 def _getvfsward(self, origfunc):
1517 1518 """build a ward for self.vfs"""
1518 1519 rref = weakref.ref(self)
1519 1520
1520 1521 def checkvfs(path, mode=None):
1521 1522 ret = origfunc(path, mode=mode)
1522 1523 repo = rref()
1523 1524 if (
1524 1525 repo is None
1525 1526 or not util.safehasattr(repo, '_wlockref')
1526 1527 or not util.safehasattr(repo, '_lockref')
1527 1528 ):
1528 1529 return
1529 1530 if mode in (None, b'r', b'rb'):
1530 1531 return
1531 1532 if path.startswith(repo.path):
1532 1533 # truncate name relative to the repository (.hg)
1533 1534 path = path[len(repo.path) + 1 :]
1534 1535 if path.startswith(b'cache/'):
1535 1536 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1536 1537 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1537 1538 # path prefixes covered by 'lock'
1538 1539 vfs_path_prefixes = (
1539 1540 b'journal.',
1540 1541 b'undo.',
1541 1542 b'strip-backup/',
1542 1543 b'cache/',
1543 1544 )
1544 1545 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1545 1546 if repo._currentlock(repo._lockref) is None:
1546 1547 repo.ui.develwarn(
1547 1548 b'write with no lock: "%s"' % path,
1548 1549 stacklevel=3,
1549 1550 config=b'check-locks',
1550 1551 )
1551 1552 elif repo._currentlock(repo._wlockref) is None:
1552 1553 # rest of vfs files are covered by 'wlock'
1553 1554 #
1554 1555 # exclude special files
1555 1556 for prefix in self._wlockfreeprefix:
1556 1557 if path.startswith(prefix):
1557 1558 return
1558 1559 repo.ui.develwarn(
1559 1560 b'write with no wlock: "%s"' % path,
1560 1561 stacklevel=3,
1561 1562 config=b'check-locks',
1562 1563 )
1563 1564 return ret
1564 1565
1565 1566 return checkvfs
1566 1567
1567 1568 def _getsvfsward(self, origfunc):
1568 1569 """build a ward for self.svfs"""
1569 1570 rref = weakref.ref(self)
1570 1571
1571 1572 def checksvfs(path, mode=None):
1572 1573 ret = origfunc(path, mode=mode)
1573 1574 repo = rref()
1574 1575 if repo is None or not util.safehasattr(repo, '_lockref'):
1575 1576 return
1576 1577 if mode in (None, b'r', b'rb'):
1577 1578 return
1578 1579 if path.startswith(repo.sharedpath):
1579 1580 # truncate name relative to the repository (.hg)
1580 1581 path = path[len(repo.sharedpath) + 1 :]
1581 1582 if repo._currentlock(repo._lockref) is None:
1582 1583 repo.ui.develwarn(
1583 1584 b'write with no lock: "%s"' % path, stacklevel=4
1584 1585 )
1585 1586 return ret
1586 1587
1587 1588 return checksvfs
1588 1589
1589 1590 @property
1590 1591 def vfs_map(self):
1591 1592 return {
1592 1593 b'': self.svfs,
1593 1594 b'plain': self.vfs,
1594 1595 b'store': self.svfs,
1595 1596 }
1596 1597
1597 1598 def close(self):
1598 1599 self._writecaches()
1599 1600
1600 1601 def _writecaches(self):
1601 1602 if self._revbranchcache:
1602 1603 self._revbranchcache.write()
1603 1604
1604 1605 def _restrictcapabilities(self, caps):
1605 1606 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1606 1607 caps = set(caps)
1607 1608 capsblob = bundle2.encodecaps(
1608 1609 bundle2.getrepocaps(self, role=b'client')
1609 1610 )
1610 1611 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1611 1612 if self.ui.configbool(b'experimental', b'narrow'):
1612 1613 caps.add(wireprototypes.NARROWCAP)
1613 1614 return caps
1614 1615
1615 1616 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1616 1617 # self -> auditor -> self._checknested -> self
1617 1618
1618 1619 @property
1619 1620 def auditor(self):
1620 1621 # This is only used by context.workingctx.match in order to
1621 1622 # detect files in subrepos.
1622 1623 return pathutil.pathauditor(self.root, callback=self._checknested)
1623 1624
1624 1625 @property
1625 1626 def nofsauditor(self):
1626 1627 # This is only used by context.basectx.match in order to detect
1627 1628 # files in subrepos.
1628 1629 return pathutil.pathauditor(
1629 1630 self.root, callback=self._checknested, realfs=False, cached=True
1630 1631 )
1631 1632
1632 1633 def _checknested(self, path):
1633 1634 """Determine if path is a legal nested repository."""
1634 1635 if not path.startswith(self.root):
1635 1636 return False
1636 1637 subpath = path[len(self.root) + 1 :]
1637 1638 normsubpath = util.pconvert(subpath)
1638 1639
1639 1640 # XXX: Checking against the current working copy is wrong in
1640 1641 # the sense that it can reject things like
1641 1642 #
1642 1643 # $ hg cat -r 10 sub/x.txt
1643 1644 #
1644 1645 # if sub/ is no longer a subrepository in the working copy
1645 1646 # parent revision.
1646 1647 #
1647 1648 # However, it can of course also allow things that would have
1648 1649 # been rejected before, such as the above cat command if sub/
1649 1650 # is a subrepository now, but was a normal directory before.
1650 1651 # The old path auditor would have rejected by mistake since it
1651 1652 # panics when it sees sub/.hg/.
1652 1653 #
1653 1654 # All in all, checking against the working copy seems sensible
1654 1655 # since we want to prevent access to nested repositories on
1655 1656 # the filesystem *now*.
1656 1657 ctx = self[None]
1657 1658 parts = util.splitpath(subpath)
1658 1659 while parts:
1659 1660 prefix = b'/'.join(parts)
1660 1661 if prefix in ctx.substate:
1661 1662 if prefix == normsubpath:
1662 1663 return True
1663 1664 else:
1664 1665 sub = ctx.sub(prefix)
1665 1666 return sub.checknested(subpath[len(prefix) + 1 :])
1666 1667 else:
1667 1668 parts.pop()
1668 1669 return False
1669 1670
1670 1671 def peer(self, path=None, remotehidden=False):
1671 1672 return localpeer(
1672 1673 self, path=path, remotehidden=remotehidden
1673 1674 ) # not cached to avoid reference cycle
1674 1675
1675 1676 def unfiltered(self):
1676 1677 """Return unfiltered version of the repository
1677 1678
1678 1679 Intended to be overwritten by filtered repo."""
1679 1680 return self
1680 1681
1681 1682 def filtered(self, name, visibilityexceptions=None):
1682 1683 """Return a filtered version of a repository
1683 1684
1684 1685 The `name` parameter is the identifier of the requested view. This
1685 1686 will return a repoview object set "exactly" to the specified view.
1686 1687
1687 1688 This function does not apply recursive filtering to a repository. For
1688 1689 example calling `repo.filtered("served")` will return a repoview using
1689 1690 the "served" view, regardless of the initial view used by `repo`.
1690 1691
1691 1692 In other word, there is always only one level of `repoview` "filtering".
1692 1693 """
1693 1694 if self._extrafilterid is not None and b'%' not in name:
1694 1695 name = name + b'%' + self._extrafilterid
1695 1696
1696 1697 cls = repoview.newtype(self.unfiltered().__class__)
1697 1698 return cls(self, name, visibilityexceptions)
1698 1699
1699 1700 @mixedrepostorecache(
1700 1701 (b'bookmarks', b'plain'),
1701 1702 (b'bookmarks.current', b'plain'),
1702 1703 (b'bookmarks', b''),
1703 1704 (b'00changelog.i', b''),
1704 1705 )
1705 1706 def _bookmarks(self):
1706 1707 # Since the multiple files involved in the transaction cannot be
1707 1708 # written atomically (with current repository format), there is a race
1708 1709 # condition here.
1709 1710 #
1710 1711 # 1) changelog content A is read
1711 1712 # 2) outside transaction update changelog to content B
1712 1713 # 3) outside transaction update bookmark file referring to content B
1713 1714 # 4) bookmarks file content is read and filtered against changelog-A
1714 1715 #
1715 1716 # When this happens, bookmarks against nodes missing from A are dropped.
1716 1717 #
1717 1718 # Having this happening during read is not great, but it become worse
1718 1719 # when this happen during write because the bookmarks to the "unknown"
1719 1720 # nodes will be dropped for good. However, writes happen within locks.
1720 1721 # This locking makes it possible to have a race free consistent read.
1721 1722 # For this purpose data read from disc before locking are
1722 1723 # "invalidated" right after the locks are taken. This invalidations are
1723 1724 # "light", the `filecache` mechanism keep the data in memory and will
1724 1725 # reuse them if the underlying files did not changed. Not parsing the
1725 1726 # same data multiple times helps performances.
1726 1727 #
1727 1728 # Unfortunately in the case describe above, the files tracked by the
1728 1729 # bookmarks file cache might not have changed, but the in-memory
1729 1730 # content is still "wrong" because we used an older changelog content
1730 1731 # to process the on-disk data. So after locking, the changelog would be
1731 1732 # refreshed but `_bookmarks` would be preserved.
1732 1733 # Adding `00changelog.i` to the list of tracked file is not
1733 1734 # enough, because at the time we build the content for `_bookmarks` in
1734 1735 # (4), the changelog file has already diverged from the content used
1735 1736 # for loading `changelog` in (1)
1736 1737 #
1737 1738 # To prevent the issue, we force the changelog to be explicitly
1738 1739 # reloaded while computing `_bookmarks`. The data race can still happen
1739 1740 # without the lock (with a narrower window), but it would no longer go
1740 1741 # undetected during the lock time refresh.
1741 1742 #
1742 1743 # The new schedule is as follow
1743 1744 #
1744 1745 # 1) filecache logic detect that `_bookmarks` needs to be computed
1745 1746 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1746 1747 # 3) We force `changelog` filecache to be tested
1747 1748 # 4) cachestat for `changelog` are captured (for changelog)
1748 1749 # 5) `_bookmarks` is computed and cached
1749 1750 #
1750 1751 # The step in (3) ensure we have a changelog at least as recent as the
1751 1752 # cache stat computed in (1). As a result at locking time:
1752 1753 # * if the changelog did not changed since (1) -> we can reuse the data
1753 1754 # * otherwise -> the bookmarks get refreshed.
1754 1755 self._refreshchangelog()
1755 1756 return bookmarks.bmstore(self)
1756 1757
1757 1758 def _refreshchangelog(self):
1758 1759 """make sure the in memory changelog match the on-disk one"""
1759 1760 if 'changelog' in vars(self) and self.currenttransaction() is None:
1760 1761 del self.changelog
1761 1762
1762 1763 @property
1763 1764 def _activebookmark(self):
1764 1765 return self._bookmarks.active
1765 1766
1766 1767 # _phasesets depend on changelog. what we need is to call
1767 1768 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1768 1769 # can't be easily expressed in filecache mechanism.
1769 1770 @storecache(b'phaseroots', b'00changelog.i')
1770 1771 def _phasecache(self):
1771 1772 return phases.phasecache(self, self._phasedefaults)
1772 1773
1773 1774 @storecache(b'obsstore')
1774 1775 def obsstore(self):
1775 1776 return obsolete.makestore(self.ui, self)
1776 1777
1777 1778 @changelogcache()
1778 1779 def changelog(repo):
1779 1780 # load dirstate before changelog to avoid race see issue6303
1780 1781 repo.dirstate.prefetch_parents()
1781 1782 return repo.store.changelog(
1782 1783 txnutil.mayhavepending(repo.root),
1783 1784 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1784 1785 )
1785 1786
1786 1787 @manifestlogcache()
1787 1788 def manifestlog(self):
1788 1789 return self.store.manifestlog(self, self._storenarrowmatch)
1789 1790
1790 1791 @unfilteredpropertycache
1791 1792 def dirstate(self):
1792 1793 if self._dirstate is None:
1793 1794 self._dirstate = self._makedirstate()
1794 1795 else:
1795 1796 self._dirstate.refresh()
1796 1797 return self._dirstate
1797 1798
1798 1799 def _makedirstate(self):
1799 1800 """Extension point for wrapping the dirstate per-repo."""
1800 1801 sparsematchfn = None
1801 1802 if sparse.use_sparse(self):
1802 1803 sparsematchfn = lambda: sparse.matcher(self)
1803 1804 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1804 1805 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1805 1806 use_dirstate_v2 = v2_req in self.requirements
1806 1807 use_tracked_hint = th in self.requirements
1807 1808
1808 1809 return dirstate.dirstate(
1809 1810 self.vfs,
1810 1811 self.ui,
1811 1812 self.root,
1812 1813 self._dirstatevalidate,
1813 1814 sparsematchfn,
1814 1815 self.nodeconstants,
1815 1816 use_dirstate_v2,
1816 1817 use_tracked_hint=use_tracked_hint,
1817 1818 )
1818 1819
1819 1820 def _dirstatevalidate(self, node):
1820 1821 okay = True
1821 1822 try:
1822 1823 self.changelog.rev(node)
1823 1824 except error.LookupError:
1824 1825 # If the parent are unknown it might just be because the changelog
1825 1826 # in memory is lagging behind the dirstate in memory. So try to
1826 1827 # refresh the changelog first.
1827 1828 #
1828 1829 # We only do so if we don't hold the lock, if we do hold the lock
1829 1830 # the invalidation at that time should have taken care of this and
1830 1831 # something is very fishy.
1831 1832 if self.currentlock() is None:
1832 1833 self.invalidate()
1833 1834 try:
1834 1835 self.changelog.rev(node)
1835 1836 except error.LookupError:
1836 1837 okay = False
1837 1838 else:
1838 1839 # XXX we should consider raising an error here.
1839 1840 okay = False
1840 1841 if okay:
1841 1842 return node
1842 1843 else:
1843 1844 if not self._dirstatevalidatewarned:
1844 1845 self._dirstatevalidatewarned = True
1845 1846 self.ui.warn(
1846 1847 _(b"warning: ignoring unknown working parent %s!\n")
1847 1848 % short(node)
1848 1849 )
1849 1850 return self.nullid
1850 1851
1851 1852 @storecache(narrowspec.FILENAME)
1852 1853 def narrowpats(self):
1853 1854 """matcher patterns for this repository's narrowspec
1854 1855
1855 1856 A tuple of (includes, excludes).
1856 1857 """
1857 1858 # the narrow management should probably move into its own object
1858 1859 val = self._pending_narrow_pats
1859 1860 if val is None:
1860 1861 val = narrowspec.load(self)
1861 1862 return val
1862 1863
1863 1864 @storecache(narrowspec.FILENAME)
1864 1865 def _storenarrowmatch(self):
1865 1866 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1866 1867 return matchmod.always()
1867 1868 include, exclude = self.narrowpats
1868 1869 return narrowspec.match(self.root, include=include, exclude=exclude)
1869 1870
1870 1871 @storecache(narrowspec.FILENAME)
1871 1872 def _narrowmatch(self):
1872 1873 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1873 1874 return matchmod.always()
1874 1875 narrowspec.checkworkingcopynarrowspec(self)
1875 1876 include, exclude = self.narrowpats
1876 1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 1878
1878 1879 def narrowmatch(self, match=None, includeexact=False):
1879 1880 """matcher corresponding the the repo's narrowspec
1880 1881
1881 1882 If `match` is given, then that will be intersected with the narrow
1882 1883 matcher.
1883 1884
1884 1885 If `includeexact` is True, then any exact matches from `match` will
1885 1886 be included even if they're outside the narrowspec.
1886 1887 """
1887 1888 if match:
1888 1889 if includeexact and not self._narrowmatch.always():
1889 1890 # do not exclude explicitly-specified paths so that they can
1890 1891 # be warned later on
1891 1892 em = matchmod.exact(match.files())
1892 1893 nm = matchmod.unionmatcher([self._narrowmatch, em])
1893 1894 return matchmod.intersectmatchers(match, nm)
1894 1895 return matchmod.intersectmatchers(match, self._narrowmatch)
1895 1896 return self._narrowmatch
1896 1897
1897 1898 def setnarrowpats(self, newincludes, newexcludes):
1898 1899 narrowspec.save(self, newincludes, newexcludes)
1899 1900 self.invalidate(clearfilecache=True)
1900 1901
1901 1902 @unfilteredpropertycache
1902 1903 def _quick_access_changeid_null(self):
1903 1904 return {
1904 1905 b'null': (nullrev, self.nodeconstants.nullid),
1905 1906 nullrev: (nullrev, self.nodeconstants.nullid),
1906 1907 self.nullid: (nullrev, self.nullid),
1907 1908 }
1908 1909
1909 1910 @unfilteredpropertycache
1910 1911 def _quick_access_changeid_wc(self):
1911 1912 # also fast path access to the working copy parents
1912 1913 # however, only do it for filter that ensure wc is visible.
1913 1914 quick = self._quick_access_changeid_null.copy()
1914 1915 cl = self.unfiltered().changelog
1915 1916 for node in self.dirstate.parents():
1916 1917 if node == self.nullid:
1917 1918 continue
1918 1919 rev = cl.index.get_rev(node)
1919 1920 if rev is None:
1920 1921 # unknown working copy parent case:
1921 1922 #
1922 1923 # skip the fast path and let higher code deal with it
1923 1924 continue
1924 1925 pair = (rev, node)
1925 1926 quick[rev] = pair
1926 1927 quick[node] = pair
1927 1928 # also add the parents of the parents
1928 1929 for r in cl.parentrevs(rev):
1929 1930 if r == nullrev:
1930 1931 continue
1931 1932 n = cl.node(r)
1932 1933 pair = (r, n)
1933 1934 quick[r] = pair
1934 1935 quick[n] = pair
1935 1936 p1node = self.dirstate.p1()
1936 1937 if p1node != self.nullid:
1937 1938 quick[b'.'] = quick[p1node]
1938 1939 return quick
1939 1940
1940 1941 @unfilteredmethod
1941 1942 def _quick_access_changeid_invalidate(self):
1942 1943 if '_quick_access_changeid_wc' in vars(self):
1943 1944 del self.__dict__['_quick_access_changeid_wc']
1944 1945
1945 1946 @property
1946 1947 def _quick_access_changeid(self):
1947 1948 """an helper dictionnary for __getitem__ calls
1948 1949
1949 1950 This contains a list of symbol we can recognise right away without
1950 1951 further processing.
1951 1952 """
1952 1953 if self.filtername in repoview.filter_has_wc:
1953 1954 return self._quick_access_changeid_wc
1954 1955 return self._quick_access_changeid_null
1955 1956
1956 1957 def __getitem__(self, changeid):
1957 1958 # dealing with special cases
1958 1959 if changeid is None:
1959 1960 return context.workingctx(self)
1960 1961 if isinstance(changeid, context.basectx):
1961 1962 return changeid
1962 1963
1963 1964 # dealing with multiple revisions
1964 1965 if isinstance(changeid, slice):
1965 1966 # wdirrev isn't contiguous so the slice shouldn't include it
1966 1967 return [
1967 1968 self[i]
1968 1969 for i in range(*changeid.indices(len(self)))
1969 1970 if i not in self.changelog.filteredrevs
1970 1971 ]
1971 1972
1972 1973 # dealing with some special values
1973 1974 quick_access = self._quick_access_changeid.get(changeid)
1974 1975 if quick_access is not None:
1975 1976 rev, node = quick_access
1976 1977 return context.changectx(self, rev, node, maybe_filtered=False)
1977 1978 if changeid == b'tip':
1978 1979 node = self.changelog.tip()
1979 1980 rev = self.changelog.rev(node)
1980 1981 return context.changectx(self, rev, node)
1981 1982
1982 1983 # dealing with arbitrary values
1983 1984 try:
1984 1985 if isinstance(changeid, int):
1985 1986 node = self.changelog.node(changeid)
1986 1987 rev = changeid
1987 1988 elif changeid == b'.':
1988 1989 # this is a hack to delay/avoid loading obsmarkers
1989 1990 # when we know that '.' won't be hidden
1990 1991 node = self.dirstate.p1()
1991 1992 rev = self.unfiltered().changelog.rev(node)
1992 1993 elif len(changeid) == self.nodeconstants.nodelen:
1993 1994 try:
1994 1995 node = changeid
1995 1996 rev = self.changelog.rev(changeid)
1996 1997 except error.FilteredLookupError:
1997 1998 changeid = hex(changeid) # for the error message
1998 1999 raise
1999 2000 except LookupError:
2000 2001 # check if it might have come from damaged dirstate
2001 2002 #
2002 2003 # XXX we could avoid the unfiltered if we had a recognizable
2003 2004 # exception for filtered changeset access
2004 2005 if (
2005 2006 self.local()
2006 2007 and changeid in self.unfiltered().dirstate.parents()
2007 2008 ):
2008 2009 msg = _(b"working directory has unknown parent '%s'!")
2009 2010 raise error.Abort(msg % short(changeid))
2010 2011 changeid = hex(changeid) # for the error message
2011 2012 raise
2012 2013
2013 2014 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2014 2015 node = bin(changeid)
2015 2016 rev = self.changelog.rev(node)
2016 2017 else:
2017 2018 raise error.ProgrammingError(
2018 2019 b"unsupported changeid '%s' of type %s"
2019 2020 % (changeid, pycompat.bytestr(type(changeid)))
2020 2021 )
2021 2022
2022 2023 return context.changectx(self, rev, node)
2023 2024
2024 2025 except (error.FilteredIndexError, error.FilteredLookupError):
2025 2026 raise error.FilteredRepoLookupError(
2026 2027 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2027 2028 )
2028 2029 except (IndexError, LookupError):
2029 2030 raise error.RepoLookupError(
2030 2031 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2031 2032 )
2032 2033 except error.WdirUnsupported:
2033 2034 return context.workingctx(self)
2034 2035
2035 2036 def __contains__(self, changeid):
2036 2037 """True if the given changeid exists"""
2037 2038 try:
2038 2039 self[changeid]
2039 2040 return True
2040 2041 except error.RepoLookupError:
2041 2042 return False
2042 2043
2043 2044 def __nonzero__(self):
2044 2045 return True
2045 2046
2046 2047 __bool__ = __nonzero__
2047 2048
2048 2049 def __len__(self):
2049 2050 # no need to pay the cost of repoview.changelog
2050 2051 unfi = self.unfiltered()
2051 2052 return len(unfi.changelog)
2052 2053
2053 2054 def __iter__(self):
2054 2055 return iter(self.changelog)
2055 2056
2056 2057 def revs(self, expr: bytes, *args):
2057 2058 """Find revisions matching a revset.
2058 2059
2059 2060 The revset is specified as a string ``expr`` that may contain
2060 2061 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2061 2062
2062 2063 Revset aliases from the configuration are not expanded. To expand
2063 2064 user aliases, consider calling ``scmutil.revrange()`` or
2064 2065 ``repo.anyrevs([expr], user=True)``.
2065 2066
2066 2067 Returns a smartset.abstractsmartset, which is a list-like interface
2067 2068 that contains integer revisions.
2068 2069 """
2069 2070 tree = revsetlang.spectree(expr, *args)
2070 2071 return revset.makematcher(tree)(self)
2071 2072
2072 2073 def set(self, expr: bytes, *args):
2073 2074 """Find revisions matching a revset and emit changectx instances.
2074 2075
2075 2076 This is a convenience wrapper around ``revs()`` that iterates the
2076 2077 result and is a generator of changectx instances.
2077 2078
2078 2079 Revset aliases from the configuration are not expanded. To expand
2079 2080 user aliases, consider calling ``scmutil.revrange()``.
2080 2081 """
2081 2082 for r in self.revs(expr, *args):
2082 2083 yield self[r]
2083 2084
2084 2085 def anyrevs(self, specs: bytes, user=False, localalias=None):
2085 2086 """Find revisions matching one of the given revsets.
2086 2087
2087 2088 Revset aliases from the configuration are not expanded by default. To
2088 2089 expand user aliases, specify ``user=True``. To provide some local
2089 2090 definitions overriding user aliases, set ``localalias`` to
2090 2091 ``{name: definitionstring}``.
2091 2092 """
2092 2093 if specs == [b'null']:
2093 2094 return revset.baseset([nullrev])
2094 2095 if specs == [b'.']:
2095 2096 quick_data = self._quick_access_changeid.get(b'.')
2096 2097 if quick_data is not None:
2097 2098 return revset.baseset([quick_data[0]])
2098 2099 if user:
2099 2100 m = revset.matchany(
2100 2101 self.ui,
2101 2102 specs,
2102 2103 lookup=revset.lookupfn(self),
2103 2104 localalias=localalias,
2104 2105 )
2105 2106 else:
2106 2107 m = revset.matchany(None, specs, localalias=localalias)
2107 2108 return m(self)
2108 2109
2109 2110 def url(self) -> bytes:
2110 2111 return b'file:' + self.root
2111 2112
2112 2113 def hook(self, name, throw=False, **args):
2113 2114 """Call a hook, passing this repo instance.
2114 2115
2115 2116 This a convenience method to aid invoking hooks. Extensions likely
2116 2117 won't call this unless they have registered a custom hook or are
2117 2118 replacing code that is expected to call a hook.
2118 2119 """
2119 2120 return hook.hook(self.ui, self, name, throw, **args)
2120 2121
2121 2122 @filteredpropertycache
2122 2123 def _tagscache(self):
2123 2124 """Returns a tagscache object that contains various tags related
2124 2125 caches."""
2125 2126
2126 2127 # This simplifies its cache management by having one decorated
2127 2128 # function (this one) and the rest simply fetch things from it.
2128 2129 class tagscache:
2129 2130 def __init__(self):
2130 2131 # These two define the set of tags for this repository. tags
2131 2132 # maps tag name to node; tagtypes maps tag name to 'global' or
2132 2133 # 'local'. (Global tags are defined by .hgtags across all
2133 2134 # heads, and local tags are defined in .hg/localtags.)
2134 2135 # They constitute the in-memory cache of tags.
2135 2136 self.tags = self.tagtypes = None
2136 2137
2137 2138 self.nodetagscache = self.tagslist = None
2138 2139
2139 2140 cache = tagscache()
2140 2141 cache.tags, cache.tagtypes = self._findtags()
2141 2142
2142 2143 return cache
2143 2144
2144 2145 def tags(self):
2145 2146 '''return a mapping of tag to node'''
2146 2147 t = {}
2147 2148 if self.changelog.filteredrevs:
2148 2149 tags, tt = self._findtags()
2149 2150 else:
2150 2151 tags = self._tagscache.tags
2151 2152 rev = self.changelog.rev
2152 2153 for k, v in tags.items():
2153 2154 try:
2154 2155 # ignore tags to unknown nodes
2155 2156 rev(v)
2156 2157 t[k] = v
2157 2158 except (error.LookupError, ValueError):
2158 2159 pass
2159 2160 return t
2160 2161
2161 2162 def _findtags(self):
2162 2163 """Do the hard work of finding tags. Return a pair of dicts
2163 2164 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2164 2165 maps tag name to a string like \'global\' or \'local\'.
2165 2166 Subclasses or extensions are free to add their own tags, but
2166 2167 should be aware that the returned dicts will be retained for the
2167 2168 duration of the localrepo object."""
2168 2169
2169 2170 # XXX what tagtype should subclasses/extensions use? Currently
2170 2171 # mq and bookmarks add tags, but do not set the tagtype at all.
2171 2172 # Should each extension invent its own tag type? Should there
2172 2173 # be one tagtype for all such "virtual" tags? Or is the status
2173 2174 # quo fine?
2174 2175
2175 2176 # map tag name to (node, hist)
2176 2177 alltags = tagsmod.findglobaltags(self.ui, self)
2177 2178 # map tag name to tag type
2178 2179 tagtypes = {tag: b'global' for tag in alltags}
2179 2180
2180 2181 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2181 2182
2182 2183 # Build the return dicts. Have to re-encode tag names because
2183 2184 # the tags module always uses UTF-8 (in order not to lose info
2184 2185 # writing to the cache), but the rest of Mercurial wants them in
2185 2186 # local encoding.
2186 2187 tags = {}
2187 2188 for name, (node, hist) in alltags.items():
2188 2189 if node != self.nullid:
2189 2190 tags[encoding.tolocal(name)] = node
2190 2191 tags[b'tip'] = self.changelog.tip()
2191 2192 tagtypes = {
2192 2193 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2193 2194 }
2194 2195 return (tags, tagtypes)
2195 2196
2196 2197 def tagtype(self, tagname):
2197 2198 """
2198 2199 return the type of the given tag. result can be:
2199 2200
2200 2201 'local' : a local tag
2201 2202 'global' : a global tag
2202 2203 None : tag does not exist
2203 2204 """
2204 2205
2205 2206 return self._tagscache.tagtypes.get(tagname)
2206 2207
2207 2208 def tagslist(self):
2208 2209 '''return a list of tags ordered by revision'''
2209 2210 if not self._tagscache.tagslist:
2210 2211 l = []
2211 2212 for t, n in self.tags().items():
2212 2213 l.append((self.changelog.rev(n), t, n))
2213 2214 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2214 2215
2215 2216 return self._tagscache.tagslist
2216 2217
2217 2218 def nodetags(self, node):
2218 2219 '''return the tags associated with a node'''
2219 2220 if not self._tagscache.nodetagscache:
2220 2221 nodetagscache = {}
2221 2222 for t, n in self._tagscache.tags.items():
2222 2223 nodetagscache.setdefault(n, []).append(t)
2223 2224 for tags in nodetagscache.values():
2224 2225 tags.sort()
2225 2226 self._tagscache.nodetagscache = nodetagscache
2226 2227 return self._tagscache.nodetagscache.get(node, [])
2227 2228
2228 2229 def nodebookmarks(self, node):
2229 2230 """return the list of bookmarks pointing to the specified node"""
2230 2231 return self._bookmarks.names(node)
2231 2232
2232 2233 def branchmap(self):
2233 2234 """returns a dictionary {branch: [branchheads]} with branchheads
2234 2235 ordered by increasing revision number"""
2235 2236 return self._branchcaches[self]
2236 2237
2237 2238 @unfilteredmethod
2238 2239 def revbranchcache(self):
2239 2240 if not self._revbranchcache:
2240 2241 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2241 2242 return self._revbranchcache
2242 2243
2243 2244 def register_changeset(self, rev, changelogrevision):
2244 2245 self.revbranchcache().setdata(rev, changelogrevision)
2245 2246
2246 2247 def branchtip(self, branch, ignoremissing=False):
2247 2248 """return the tip node for a given branch
2248 2249
2249 2250 If ignoremissing is True, then this method will not raise an error.
2250 2251 This is helpful for callers that only expect None for a missing branch
2251 2252 (e.g. namespace).
2252 2253
2253 2254 """
2254 2255 try:
2255 2256 return self.branchmap().branchtip(branch)
2256 2257 except KeyError:
2257 2258 if not ignoremissing:
2258 2259 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2259 2260 else:
2260 2261 pass
2261 2262
2262 2263 def lookup(self, key):
2263 2264 node = scmutil.revsymbol(self, key).node()
2264 2265 if node is None:
2265 2266 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2266 2267 return node
2267 2268
2268 2269 def lookupbranch(self, key):
2269 2270 if self.branchmap().hasbranch(key):
2270 2271 return key
2271 2272
2272 2273 return scmutil.revsymbol(self, key).branch()
2273 2274
2274 2275 def known(self, nodes):
2275 2276 cl = self.changelog
2276 2277 get_rev = cl.index.get_rev
2277 2278 filtered = cl.filteredrevs
2278 2279 result = []
2279 2280 for n in nodes:
2280 2281 r = get_rev(n)
2281 2282 resp = not (r is None or r in filtered)
2282 2283 result.append(resp)
2283 2284 return result
2284 2285
2285 2286 def local(self):
2286 2287 return self
2287 2288
2288 2289 def publishing(self):
2289 2290 # it's safe (and desirable) to trust the publish flag unconditionally
2290 2291 # so that we don't finalize changes shared between users via ssh or nfs
2291 2292 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2292 2293
2293 2294 def cancopy(self):
2294 2295 # so statichttprepo's override of local() works
2295 2296 if not self.local():
2296 2297 return False
2297 2298 if not self.publishing():
2298 2299 return True
2299 2300 # if publishing we can't copy if there is filtered content
2300 2301 return not self.filtered(b'visible').changelog.filteredrevs
2301 2302
2302 2303 def shared(self):
2303 2304 '''the type of shared repository (None if not shared)'''
2304 2305 if self.sharedpath != self.path:
2305 2306 return b'store'
2306 2307 return None
2307 2308
2308 2309 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2309 2310 return self.vfs.reljoin(self.root, f, *insidef)
2310 2311
2311 2312 def setparents(self, p1, p2=None):
2312 2313 if p2 is None:
2313 2314 p2 = self.nullid
2314 2315 self[None].setparents(p1, p2)
2315 2316 self._quick_access_changeid_invalidate()
2316 2317
2317 2318 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2318 2319 """changeid must be a changeset revision, if specified.
2319 2320 fileid can be a file revision or node."""
2320 2321 return context.filectx(
2321 2322 self, path, changeid, fileid, changectx=changectx
2322 2323 )
2323 2324
2324 2325 def getcwd(self) -> bytes:
2325 2326 return self.dirstate.getcwd()
2326 2327
2327 2328 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2328 2329 return self.dirstate.pathto(f, cwd)
2329 2330
2330 2331 def _loadfilter(self, filter):
2331 2332 if filter not in self._filterpats:
2332 2333 l = []
2333 2334 for pat, cmd in self.ui.configitems(filter):
2334 2335 if cmd == b'!':
2335 2336 continue
2336 2337 mf = matchmod.match(self.root, b'', [pat])
2337 2338 fn = None
2338 2339 params = cmd
2339 2340 for name, filterfn in self._datafilters.items():
2340 2341 if cmd.startswith(name):
2341 2342 fn = filterfn
2342 2343 params = cmd[len(name) :].lstrip()
2343 2344 break
2344 2345 if not fn:
2345 2346 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2346 2347 fn.__name__ = 'commandfilter'
2347 2348 # Wrap old filters not supporting keyword arguments
2348 2349 if not pycompat.getargspec(fn)[2]:
2349 2350 oldfn = fn
2350 2351 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2351 2352 fn.__name__ = 'compat-' + oldfn.__name__
2352 2353 l.append((mf, fn, params))
2353 2354 self._filterpats[filter] = l
2354 2355 return self._filterpats[filter]
2355 2356
2356 2357 def _filter(self, filterpats, filename, data):
2357 2358 for mf, fn, cmd in filterpats:
2358 2359 if mf(filename):
2359 2360 self.ui.debug(
2360 2361 b"filtering %s through %s\n"
2361 2362 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2362 2363 )
2363 2364 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2364 2365 break
2365 2366
2366 2367 return data
2367 2368
2368 2369 @unfilteredpropertycache
2369 2370 def _encodefilterpats(self):
2370 2371 return self._loadfilter(b'encode')
2371 2372
2372 2373 @unfilteredpropertycache
2373 2374 def _decodefilterpats(self):
2374 2375 return self._loadfilter(b'decode')
2375 2376
2376 2377 def adddatafilter(self, name, filter):
2377 2378 self._datafilters[name] = filter
2378 2379
2379 2380 def wread(self, filename: bytes) -> bytes:
2380 2381 if self.wvfs.islink(filename):
2381 2382 data = self.wvfs.readlink(filename)
2382 2383 else:
2383 2384 data = self.wvfs.read(filename)
2384 2385 return self._filter(self._encodefilterpats, filename, data)
2385 2386
2386 2387 def wwrite(
2387 2388 self,
2388 2389 filename: bytes,
2389 2390 data: bytes,
2390 2391 flags: bytes,
2391 2392 backgroundclose=False,
2392 2393 **kwargs
2393 2394 ) -> int:
2394 2395 """write ``data`` into ``filename`` in the working directory
2395 2396
2396 2397 This returns length of written (maybe decoded) data.
2397 2398 """
2398 2399 data = self._filter(self._decodefilterpats, filename, data)
2399 2400 if b'l' in flags:
2400 2401 self.wvfs.symlink(data, filename)
2401 2402 else:
2402 2403 self.wvfs.write(
2403 2404 filename, data, backgroundclose=backgroundclose, **kwargs
2404 2405 )
2405 2406 if b'x' in flags:
2406 2407 self.wvfs.setflags(filename, False, True)
2407 2408 else:
2408 2409 self.wvfs.setflags(filename, False, False)
2409 2410 return len(data)
2410 2411
2411 2412 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2412 2413 return self._filter(self._decodefilterpats, filename, data)
2413 2414
2414 2415 def currenttransaction(self):
2415 2416 """return the current transaction or None if non exists"""
2416 2417 if self._transref:
2417 2418 tr = self._transref()
2418 2419 else:
2419 2420 tr = None
2420 2421
2421 2422 if tr and tr.running():
2422 2423 return tr
2423 2424 return None
2424 2425
2425 2426 def transaction(self, desc, report=None):
2426 2427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2427 2428 b'devel', b'check-locks'
2428 2429 ):
2429 2430 if self._currentlock(self._lockref) is None:
2430 2431 raise error.ProgrammingError(b'transaction requires locking')
2431 2432 tr = self.currenttransaction()
2432 2433 if tr is not None:
2433 2434 return tr.nest(name=desc)
2434 2435
2435 2436 # abort here if the journal already exists
2436 2437 if self.svfs.exists(b"journal"):
2437 2438 raise error.RepoError(
2438 2439 _(b"abandoned transaction found"),
2439 2440 hint=_(b"run 'hg recover' to clean up transaction"),
2440 2441 )
2441 2442
2442 2443 # At that point your dirstate should be clean:
2443 2444 #
2444 2445 # - If you don't have the wlock, why would you still have a dirty
2445 2446 # dirstate ?
2446 2447 #
2447 2448 # - If you hold the wlock, you should not be opening a transaction in
2448 2449 # the middle of a `distate.changing_*` block. The transaction needs to
2449 2450 # be open before that and wrap the change-context.
2450 2451 #
2451 2452 # - If you are not within a `dirstate.changing_*` context, why is our
2452 2453 # dirstate dirty?
2453 2454 if self.dirstate._dirty:
2454 2455 m = "cannot open a transaction with a dirty dirstate"
2455 2456 raise error.ProgrammingError(m)
2456 2457
2457 2458 idbase = b"%.40f#%f" % (random.random(), time.time())
2458 2459 ha = hex(hashutil.sha1(idbase).digest())
2459 2460 txnid = b'TXN:' + ha
2460 2461 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2461 2462
2462 2463 self._writejournal(desc)
2463 2464 if report:
2464 2465 rp = report
2465 2466 else:
2466 2467 rp = self.ui.warn
2467 2468 vfsmap = self.vfs_map
2468 2469 # we must avoid cyclic reference between repo and transaction.
2469 2470 reporef = weakref.ref(self)
2470 2471 # Code to track tag movement
2471 2472 #
2472 2473 # Since tags are all handled as file content, it is actually quite hard
2473 2474 # to track these movement from a code perspective. So we fallback to a
2474 2475 # tracking at the repository level. One could envision to track changes
2475 2476 # to the '.hgtags' file through changegroup apply but that fails to
2476 2477 # cope with case where transaction expose new heads without changegroup
2477 2478 # being involved (eg: phase movement).
2478 2479 #
2479 2480 # For now, We gate the feature behind a flag since this likely comes
2480 2481 # with performance impacts. The current code run more often than needed
2481 2482 # and do not use caches as much as it could. The current focus is on
2482 2483 # the behavior of the feature so we disable it by default. The flag
2483 2484 # will be removed when we are happy with the performance impact.
2484 2485 #
2485 2486 # Once this feature is no longer experimental move the following
2486 2487 # documentation to the appropriate help section:
2487 2488 #
2488 2489 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2489 2490 # tags (new or changed or deleted tags). In addition the details of
2490 2491 # these changes are made available in a file at:
2491 2492 # ``REPOROOT/.hg/changes/tags.changes``.
2492 2493 # Make sure you check for HG_TAG_MOVED before reading that file as it
2493 2494 # might exist from a previous transaction even if no tag were touched
2494 2495 # in this one. Changes are recorded in a line base format::
2495 2496 #
2496 2497 # <action> <hex-node> <tag-name>\n
2497 2498 #
2498 2499 # Actions are defined as follow:
2499 2500 # "-R": tag is removed,
2500 2501 # "+A": tag is added,
2501 2502 # "-M": tag is moved (old value),
2502 2503 # "+M": tag is moved (new value),
2503 2504 tracktags = lambda x: None
2504 2505 # experimental config: experimental.hook-track-tags
2505 2506 shouldtracktags = self.ui.configbool(
2506 2507 b'experimental', b'hook-track-tags'
2507 2508 )
2508 2509 if desc != b'strip' and shouldtracktags:
2509 2510 oldheads = self.changelog.headrevs()
2510 2511
2511 2512 def tracktags(tr2):
2512 2513 repo = reporef()
2513 2514 assert repo is not None # help pytype
2514 2515 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2515 2516 newheads = repo.changelog.headrevs()
2516 2517 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2517 2518 # notes: we compare lists here.
2518 2519 # As we do it only once buiding set would not be cheaper
2519 2520 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2520 2521 if changes:
2521 2522 tr2.hookargs[b'tag_moved'] = b'1'
2522 2523 with repo.vfs(
2523 2524 b'changes/tags.changes', b'w', atomictemp=True
2524 2525 ) as changesfile:
2525 2526 # note: we do not register the file to the transaction
2526 2527 # because we needs it to still exist on the transaction
2527 2528 # is close (for txnclose hooks)
2528 2529 tagsmod.writediff(changesfile, changes)
2529 2530
2530 2531 def validate(tr2):
2531 2532 """will run pre-closing hooks"""
2532 2533 # XXX the transaction API is a bit lacking here so we take a hacky
2533 2534 # path for now
2534 2535 #
2535 2536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2536 2537 # dict is copied before these run. In addition we needs the data
2537 2538 # available to in memory hooks too.
2538 2539 #
2539 2540 # Moreover, we also need to make sure this runs before txnclose
2540 2541 # hooks and there is no "pending" mechanism that would execute
2541 2542 # logic only if hooks are about to run.
2542 2543 #
2543 2544 # Fixing this limitation of the transaction is also needed to track
2544 2545 # other families of changes (bookmarks, phases, obsolescence).
2545 2546 #
2546 2547 # This will have to be fixed before we remove the experimental
2547 2548 # gating.
2548 2549 tracktags(tr2)
2549 2550 repo = reporef()
2550 2551 assert repo is not None # help pytype
2551 2552
2552 2553 singleheadopt = (b'experimental', b'single-head-per-branch')
2553 2554 singlehead = repo.ui.configbool(*singleheadopt)
2554 2555 if singlehead:
2555 2556 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2556 2557 accountclosed = singleheadsub.get(
2557 2558 b"account-closed-heads", False
2558 2559 )
2559 2560 if singleheadsub.get(b"public-changes-only", False):
2560 2561 filtername = b"immutable"
2561 2562 else:
2562 2563 filtername = b"visible"
2563 2564 scmutil.enforcesinglehead(
2564 2565 repo, tr2, desc, accountclosed, filtername
2565 2566 )
2566 2567 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2567 2568 for name, (old, new) in sorted(
2568 2569 tr.changes[b'bookmarks'].items()
2569 2570 ):
2570 2571 args = tr.hookargs.copy()
2571 2572 args.update(bookmarks.preparehookargs(name, old, new))
2572 2573 repo.hook(
2573 2574 b'pretxnclose-bookmark',
2574 2575 throw=True,
2575 2576 **pycompat.strkwargs(args)
2576 2577 )
2577 2578 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2578 2579 cl = repo.unfiltered().changelog
2579 2580 for revs, (old, new) in tr.changes[b'phases']:
2580 2581 for rev in revs:
2581 2582 args = tr.hookargs.copy()
2582 2583 node = hex(cl.node(rev))
2583 2584 args.update(phases.preparehookargs(node, old, new))
2584 2585 repo.hook(
2585 2586 b'pretxnclose-phase',
2586 2587 throw=True,
2587 2588 **pycompat.strkwargs(args)
2588 2589 )
2589 2590
2590 2591 repo.hook(
2591 2592 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2592 2593 )
2593 2594
2594 2595 def releasefn(tr, success):
2595 2596 repo = reporef()
2596 2597 if repo is None:
2597 2598 # If the repo has been GC'd (and this release function is being
2598 2599 # called from transaction.__del__), there's not much we can do,
2599 2600 # so just leave the unfinished transaction there and let the
2600 2601 # user run `hg recover`.
2601 2602 return
2602 2603 if success:
2603 2604 # this should be explicitly invoked here, because
2604 2605 # in-memory changes aren't written out at closing
2605 2606 # transaction, if tr.addfilegenerator (via
2606 2607 # dirstate.write or so) isn't invoked while
2607 2608 # transaction running
2608 2609 repo.dirstate.write(None)
2609 2610 else:
2610 2611 # discard all changes (including ones already written
2611 2612 # out) in this transaction
2612 2613 repo.invalidate(clearfilecache=True)
2613 2614
2614 2615 tr = transaction.transaction(
2615 2616 rp,
2616 2617 self.svfs,
2617 2618 vfsmap,
2618 2619 b"journal",
2619 2620 b"undo",
2620 2621 lambda: None,
2621 2622 self.store.createmode,
2622 2623 validator=validate,
2623 2624 releasefn=releasefn,
2624 2625 checkambigfiles=_cachedfiles,
2625 2626 name=desc,
2626 2627 )
2627 2628 for vfs_id, path in self._journalfiles():
2628 2629 tr.add_journal(vfs_id, path)
2629 2630 tr.changes[b'origrepolen'] = len(self)
2630 2631 tr.changes[b'obsmarkers'] = set()
2631 2632 tr.changes[b'phases'] = []
2632 2633 tr.changes[b'bookmarks'] = {}
2633 2634
2634 2635 tr.hookargs[b'txnid'] = txnid
2635 2636 tr.hookargs[b'txnname'] = desc
2636 2637 tr.hookargs[b'changes'] = tr.changes
2637 2638 # note: writing the fncache only during finalize mean that the file is
2638 2639 # outdated when running hooks. As fncache is used for streaming clone,
2639 2640 # this is not expected to break anything that happen during the hooks.
2640 2641 tr.addfinalize(b'flush-fncache', self.store.write)
2641 2642
2642 2643 def txnclosehook(tr2):
2643 2644 """To be run if transaction is successful, will schedule a hook run"""
2644 2645 # Don't reference tr2 in hook() so we don't hold a reference.
2645 2646 # This reduces memory consumption when there are multiple
2646 2647 # transactions per lock. This can likely go away if issue5045
2647 2648 # fixes the function accumulation.
2648 2649 hookargs = tr2.hookargs
2649 2650
2650 2651 def hookfunc(unused_success):
2651 2652 repo = reporef()
2652 2653 assert repo is not None # help pytype
2653 2654
2654 2655 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2655 2656 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2656 2657 for name, (old, new) in bmchanges:
2657 2658 args = tr.hookargs.copy()
2658 2659 args.update(bookmarks.preparehookargs(name, old, new))
2659 2660 repo.hook(
2660 2661 b'txnclose-bookmark',
2661 2662 throw=False,
2662 2663 **pycompat.strkwargs(args)
2663 2664 )
2664 2665
2665 2666 if hook.hashook(repo.ui, b'txnclose-phase'):
2666 2667 cl = repo.unfiltered().changelog
2667 2668 phasemv = sorted(
2668 2669 tr.changes[b'phases'], key=lambda r: r[0][0]
2669 2670 )
2670 2671 for revs, (old, new) in phasemv:
2671 2672 for rev in revs:
2672 2673 args = tr.hookargs.copy()
2673 2674 node = hex(cl.node(rev))
2674 2675 args.update(phases.preparehookargs(node, old, new))
2675 2676 repo.hook(
2676 2677 b'txnclose-phase',
2677 2678 throw=False,
2678 2679 **pycompat.strkwargs(args)
2679 2680 )
2680 2681
2681 2682 repo.hook(
2682 2683 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2683 2684 )
2684 2685
2685 2686 repo = reporef()
2686 2687 assert repo is not None # help pytype
2687 2688 repo._afterlock(hookfunc)
2688 2689
2689 2690 tr.addfinalize(b'txnclose-hook', txnclosehook)
2690 2691 # Include a leading "-" to make it happen before the transaction summary
2691 2692 # reports registered via scmutil.registersummarycallback() whose names
2692 2693 # are 00-txnreport etc. That way, the caches will be warm when the
2693 2694 # callbacks run.
2694 2695 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2695 2696
2696 2697 def txnaborthook(tr2):
2697 2698 """To be run if transaction is aborted"""
2698 2699 repo = reporef()
2699 2700 assert repo is not None # help pytype
2700 2701 repo.hook(
2701 2702 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2702 2703 )
2703 2704
2704 2705 tr.addabort(b'txnabort-hook', txnaborthook)
2705 2706 # avoid eager cache invalidation. in-memory data should be identical
2706 2707 # to stored data if transaction has no error.
2707 2708 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2708 2709 self._transref = weakref.ref(tr)
2709 2710 scmutil.registersummarycallback(self, tr, desc)
2710 2711 # This only exist to deal with the need of rollback to have viable
2711 2712 # parents at the end of the operation. So backup viable parents at the
2712 2713 # time of this operation.
2713 2714 #
2714 2715 # We only do it when the `wlock` is taken, otherwise other might be
2715 2716 # altering the dirstate under us.
2716 2717 #
2717 2718 # This is really not a great way to do this (first, because we cannot
2718 2719 # always do it). There are more viable alternative that exists
2719 2720 #
2720 2721 # - backing only the working copy parent in a dedicated files and doing
2721 2722 # a clean "keep-update" to them on `hg rollback`.
2722 2723 #
2723 2724 # - slightly changing the behavior an applying a logic similar to "hg
2724 2725 # strip" to pick a working copy destination on `hg rollback`
2725 2726 if self.currentwlock() is not None:
2726 2727 ds = self.dirstate
2727 2728 if not self.vfs.exists(b'branch'):
2728 2729 # force a file to be written if None exist
2729 2730 ds.setbranch(b'default', None)
2730 2731
2731 2732 def backup_dirstate(tr):
2732 2733 for f in ds.all_file_names():
2733 2734 # hardlink backup is okay because `dirstate` is always
2734 2735 # atomically written and possible data file are append only
2735 2736 # and resistant to trailing data.
2736 2737 tr.addbackup(f, hardlink=True, location=b'plain')
2737 2738
2738 2739 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2739 2740 return tr
2740 2741
2741 2742 def _journalfiles(self):
2742 2743 return (
2743 2744 (self.svfs, b'journal'),
2744 2745 (self.vfs, b'journal.desc'),
2745 2746 )
2746 2747
2747 2748 def undofiles(self):
2748 2749 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2749 2750
2750 2751 @unfilteredmethod
2751 2752 def _writejournal(self, desc):
2752 2753 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2753 2754
2754 2755 def recover(self):
2755 2756 with self.lock():
2756 2757 if self.svfs.exists(b"journal"):
2757 2758 self.ui.status(_(b"rolling back interrupted transaction\n"))
2758 2759 vfsmap = self.vfs_map
2759 2760 transaction.rollback(
2760 2761 self.svfs,
2761 2762 vfsmap,
2762 2763 b"journal",
2763 2764 self.ui.warn,
2764 2765 checkambigfiles=_cachedfiles,
2765 2766 )
2766 2767 self.invalidate()
2767 2768 return True
2768 2769 else:
2769 2770 self.ui.warn(_(b"no interrupted transaction available\n"))
2770 2771 return False
2771 2772
2772 2773 def rollback(self, dryrun=False, force=False):
2773 2774 wlock = lock = None
2774 2775 try:
2775 2776 wlock = self.wlock()
2776 2777 lock = self.lock()
2777 2778 if self.svfs.exists(b"undo"):
2778 2779 return self._rollback(dryrun, force)
2779 2780 else:
2780 2781 self.ui.warn(_(b"no rollback information available\n"))
2781 2782 return 1
2782 2783 finally:
2783 2784 release(lock, wlock)
2784 2785
2785 2786 @unfilteredmethod # Until we get smarter cache management
2786 2787 def _rollback(self, dryrun, force):
2787 2788 ui = self.ui
2788 2789
2789 2790 parents = self.dirstate.parents()
2790 2791 try:
2791 2792 args = self.vfs.read(b'undo.desc').splitlines()
2792 2793 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2793 2794 if len(args) >= 3:
2794 2795 detail = args[2]
2795 2796 oldtip = oldlen - 1
2796 2797
2797 2798 if detail and ui.verbose:
2798 2799 msg = _(
2799 2800 b'repository tip rolled back to revision %d'
2800 2801 b' (undo %s: %s)\n'
2801 2802 ) % (oldtip, desc, detail)
2802 2803 else:
2803 2804 msg = _(
2804 2805 b'repository tip rolled back to revision %d (undo %s)\n'
2805 2806 ) % (oldtip, desc)
2806 2807 parentgone = any(self[p].rev() > oldtip for p in parents)
2807 2808 except IOError:
2808 2809 msg = _(b'rolling back unknown transaction\n')
2809 2810 desc = None
2810 2811 parentgone = True
2811 2812
2812 2813 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2813 2814 raise error.Abort(
2814 2815 _(
2815 2816 b'rollback of last commit while not checked out '
2816 2817 b'may lose data'
2817 2818 ),
2818 2819 hint=_(b'use -f to force'),
2819 2820 )
2820 2821
2821 2822 ui.status(msg)
2822 2823 if dryrun:
2823 2824 return 0
2824 2825
2825 2826 self.destroying()
2826 2827 vfsmap = self.vfs_map
2827 2828 skip_journal_pattern = None
2828 2829 if not parentgone:
2829 2830 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2830 2831 transaction.rollback(
2831 2832 self.svfs,
2832 2833 vfsmap,
2833 2834 b'undo',
2834 2835 ui.warn,
2835 2836 checkambigfiles=_cachedfiles,
2836 2837 skip_journal_pattern=skip_journal_pattern,
2837 2838 )
2838 2839 self.invalidate()
2839 2840 self.dirstate.invalidate()
2840 2841
2841 2842 if parentgone:
2842 2843 # replace this with some explicit parent update in the future.
2843 2844 has_node = self.changelog.index.has_node
2844 2845 if not all(has_node(p) for p in self.dirstate._pl):
2845 2846 # There was no dirstate to backup initially, we need to drop
2846 2847 # the existing one.
2847 2848 with self.dirstate.changing_parents(self):
2848 2849 self.dirstate.setparents(self.nullid)
2849 2850 self.dirstate.clear()
2850 2851
2851 2852 parents = tuple([p.rev() for p in self[None].parents()])
2852 2853 if len(parents) > 1:
2853 2854 ui.status(
2854 2855 _(
2855 2856 b'working directory now based on '
2856 2857 b'revisions %d and %d\n'
2857 2858 )
2858 2859 % parents
2859 2860 )
2860 2861 else:
2861 2862 ui.status(
2862 2863 _(b'working directory now based on revision %d\n') % parents
2863 2864 )
2864 2865 mergestatemod.mergestate.clean(self)
2865 2866
2866 2867 # TODO: if we know which new heads may result from this rollback, pass
2867 2868 # them to destroy(), which will prevent the branchhead cache from being
2868 2869 # invalidated.
2869 2870 self.destroyed()
2870 2871 return 0
2871 2872
2872 2873 def _buildcacheupdater(self, newtransaction):
2873 2874 """called during transaction to build the callback updating cache
2874 2875
2875 2876 Lives on the repository to help extension who might want to augment
2876 2877 this logic. For this purpose, the created transaction is passed to the
2877 2878 method.
2878 2879 """
2879 2880 # we must avoid cyclic reference between repo and transaction.
2880 2881 reporef = weakref.ref(self)
2881 2882
2882 2883 def updater(tr):
2883 2884 repo = reporef()
2884 2885 assert repo is not None # help pytype
2885 2886 repo.updatecaches(tr)
2886 2887
2887 2888 return updater
2888 2889
2889 2890 @unfilteredmethod
2890 2891 def updatecaches(self, tr=None, full=False, caches=None):
2891 2892 """warm appropriate caches
2892 2893
2893 2894 If this function is called after a transaction closed. The transaction
2894 2895 will be available in the 'tr' argument. This can be used to selectively
2895 2896 update caches relevant to the changes in that transaction.
2896 2897
2897 2898 If 'full' is set, make sure all caches the function knows about have
2898 2899 up-to-date data. Even the ones usually loaded more lazily.
2899 2900
2900 2901 The `full` argument can take a special "post-clone" value. In this case
2901 2902 the cache warming is made after a clone and of the slower cache might
2902 2903 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2903 2904 as we plan for a cleaner way to deal with this for 5.9.
2904 2905 """
2905 2906 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2906 2907 # During strip, many caches are invalid but
2907 2908 # later call to `destroyed` will refresh them.
2908 2909 return
2909 2910
2910 2911 unfi = self.unfiltered()
2911 2912
2912 2913 if full:
2913 2914 msg = (
2914 2915 "`full` argument for `repo.updatecaches` is deprecated\n"
2915 2916 "(use `caches=repository.CACHE_ALL` instead)"
2916 2917 )
2917 2918 self.ui.deprecwarn(msg, b"5.9")
2918 2919 caches = repository.CACHES_ALL
2919 2920 if full == b"post-clone":
2920 2921 caches = repository.CACHES_POST_CLONE
2921 2922 caches = repository.CACHES_ALL
2922 2923 elif caches is None:
2923 2924 caches = repository.CACHES_DEFAULT
2924 2925
2925 2926 if repository.CACHE_BRANCHMAP_SERVED in caches:
2926 2927 if tr is None or tr.changes[b'origrepolen'] < len(self):
2927 2928 # accessing the 'served' branchmap should refresh all the others,
2928 2929 self.ui.debug(b'updating the branch cache\n')
2929 2930 self.filtered(b'served').branchmap()
2930 2931 self.filtered(b'served.hidden').branchmap()
2931 2932 # flush all possibly delayed write.
2932 2933 self._branchcaches.write_delayed(self)
2933 2934
2934 2935 if repository.CACHE_CHANGELOG_CACHE in caches:
2935 2936 self.changelog.update_caches(transaction=tr)
2936 2937
2937 2938 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2938 2939 self.manifestlog.update_caches(transaction=tr)
2939 2940 for entry in self.store.walk():
2940 2941 if not entry.is_revlog:
2941 2942 continue
2942 2943 if not entry.is_manifestlog:
2943 2944 continue
2944 2945 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2945 2946 if manifestrevlog is not None:
2946 2947 manifestrevlog.update_caches(transaction=tr)
2947 2948
2948 2949 if repository.CACHE_REV_BRANCH in caches:
2949 2950 rbc = unfi.revbranchcache()
2950 2951 for r in unfi.changelog:
2951 2952 rbc.branchinfo(r)
2952 2953 rbc.write()
2953 2954
2954 2955 if repository.CACHE_FULL_MANIFEST in caches:
2955 2956 # ensure the working copy parents are in the manifestfulltextcache
2956 2957 for ctx in self[b'.'].parents():
2957 2958 ctx.manifest() # accessing the manifest is enough
2958 2959
2959 2960 if repository.CACHE_FILE_NODE_TAGS in caches:
2960 2961 # accessing fnode cache warms the cache
2961 2962 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2962 2963
2963 2964 if repository.CACHE_TAGS_DEFAULT in caches:
2964 2965 # accessing tags warm the cache
2965 2966 self.tags()
2966 2967 if repository.CACHE_TAGS_SERVED in caches:
2967 2968 self.filtered(b'served').tags()
2968 2969
2969 2970 if repository.CACHE_BRANCHMAP_ALL in caches:
2970 2971 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2971 2972 # so we're forcing a write to cause these caches to be warmed up
2972 2973 # even if they haven't explicitly been requested yet (if they've
2973 2974 # never been used by hg, they won't ever have been written, even if
2974 2975 # they're a subset of another kind of cache that *has* been used).
2975 2976 for filt in repoview.filtertable.keys():
2976 2977 filtered = self.filtered(filt)
2977 2978 filtered.branchmap().write(filtered)
2978 2979
2979 2980 def invalidatecaches(self):
2980 2981 if '_tagscache' in vars(self):
2981 2982 # can't use delattr on proxy
2982 2983 del self.__dict__['_tagscache']
2983 2984
2984 2985 self._branchcaches.clear()
2985 2986 self.invalidatevolatilesets()
2986 2987 self._sparsesignaturecache.clear()
2987 2988
2988 2989 def invalidatevolatilesets(self):
2989 2990 self.filteredrevcache.clear()
2990 2991 obsolete.clearobscaches(self)
2991 2992 self._quick_access_changeid_invalidate()
2992 2993
2993 2994 def invalidatedirstate(self):
2994 2995 """Invalidates the dirstate, causing the next call to dirstate
2995 2996 to check if it was modified since the last time it was read,
2996 2997 rereading it if it has.
2997 2998
2998 2999 This is different to dirstate.invalidate() that it doesn't always
2999 3000 rereads the dirstate. Use dirstate.invalidate() if you want to
3000 3001 explicitly read the dirstate again (i.e. restoring it to a previous
3001 3002 known good state)."""
3002 3003 unfi = self.unfiltered()
3003 3004 if 'dirstate' in unfi.__dict__:
3004 3005 assert not self.dirstate.is_changing_any
3005 3006 del unfi.__dict__['dirstate']
3006 3007
3007 3008 def invalidate(self, clearfilecache=False):
3008 3009 """Invalidates both store and non-store parts other than dirstate
3009 3010
3010 3011 If a transaction is running, invalidation of store is omitted,
3011 3012 because discarding in-memory changes might cause inconsistency
3012 3013 (e.g. incomplete fncache causes unintentional failure, but
3013 3014 redundant one doesn't).
3014 3015 """
3015 3016 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3016 3017 for k in list(self._filecache.keys()):
3017 3018 if (
3018 3019 k == b'changelog'
3019 3020 and self.currenttransaction()
3020 3021 and self.changelog._delayed
3021 3022 ):
3022 3023 # The changelog object may store unwritten revisions. We don't
3023 3024 # want to lose them.
3024 3025 # TODO: Solve the problem instead of working around it.
3025 3026 continue
3026 3027
3027 3028 if clearfilecache:
3028 3029 del self._filecache[k]
3029 3030 try:
3030 3031 delattr(unfiltered, k)
3031 3032 except AttributeError:
3032 3033 pass
3033 3034 self.invalidatecaches()
3034 3035 if not self.currenttransaction():
3035 3036 # TODO: Changing contents of store outside transaction
3036 3037 # causes inconsistency. We should make in-memory store
3037 3038 # changes detectable, and abort if changed.
3038 3039 self.store.invalidatecaches()
3039 3040
3040 3041 def invalidateall(self):
3041 3042 """Fully invalidates both store and non-store parts, causing the
3042 3043 subsequent operation to reread any outside changes."""
3043 3044 # extension should hook this to invalidate its caches
3044 3045 self.invalidate()
3045 3046 self.invalidatedirstate()
3046 3047
3047 3048 @unfilteredmethod
3048 3049 def _refreshfilecachestats(self, tr):
3049 3050 """Reload stats of cached files so that they are flagged as valid"""
3050 3051 for k, ce in self._filecache.items():
3051 3052 k = pycompat.sysstr(k)
3052 3053 if k == 'dirstate' or k not in self.__dict__:
3053 3054 continue
3054 3055 ce.refresh()
3055 3056
3056 3057 def _lock(
3057 3058 self,
3058 3059 vfs,
3059 3060 lockname,
3060 3061 wait,
3061 3062 releasefn,
3062 3063 acquirefn,
3063 3064 desc,
3064 3065 ):
3065 3066 timeout = 0
3066 3067 warntimeout = 0
3067 3068 if wait:
3068 3069 timeout = self.ui.configint(b"ui", b"timeout")
3069 3070 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3070 3071 # internal config: ui.signal-safe-lock
3071 3072 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3072 3073
3073 3074 l = lockmod.trylock(
3074 3075 self.ui,
3075 3076 vfs,
3076 3077 lockname,
3077 3078 timeout,
3078 3079 warntimeout,
3079 3080 releasefn=releasefn,
3080 3081 acquirefn=acquirefn,
3081 3082 desc=desc,
3082 3083 signalsafe=signalsafe,
3083 3084 )
3084 3085 return l
3085 3086
3086 3087 def _afterlock(self, callback):
3087 3088 """add a callback to be run when the repository is fully unlocked
3088 3089
3089 3090 The callback will be executed when the outermost lock is released
3090 3091 (with wlock being higher level than 'lock')."""
3091 3092 for ref in (self._wlockref, self._lockref):
3092 3093 l = ref and ref()
3093 3094 if l and l.held:
3094 3095 l.postrelease.append(callback)
3095 3096 break
3096 3097 else: # no lock have been found.
3097 3098 callback(True)
3098 3099
3099 3100 def lock(self, wait=True):
3100 3101 """Lock the repository store (.hg/store) and return a weak reference
3101 3102 to the lock. Use this before modifying the store (e.g. committing or
3102 3103 stripping). If you are opening a transaction, get a lock as well.)
3103 3104
3104 3105 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3105 3106 'wlock' first to avoid a dead-lock hazard."""
3106 3107 l = self._currentlock(self._lockref)
3107 3108 if l is not None:
3108 3109 l.lock()
3109 3110 return l
3110 3111
3111 3112 l = self._lock(
3112 3113 vfs=self.svfs,
3113 3114 lockname=b"lock",
3114 3115 wait=wait,
3115 3116 releasefn=None,
3116 3117 acquirefn=self.invalidate,
3117 3118 desc=_(b'repository %s') % self.origroot,
3118 3119 )
3119 3120 self._lockref = weakref.ref(l)
3120 3121 return l
3121 3122
3122 3123 def wlock(self, wait=True):
3123 3124 """Lock the non-store parts of the repository (everything under
3124 3125 .hg except .hg/store) and return a weak reference to the lock.
3125 3126
3126 3127 Use this before modifying files in .hg.
3127 3128
3128 3129 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3129 3130 'wlock' first to avoid a dead-lock hazard."""
3130 3131 l = self._wlockref() if self._wlockref else None
3131 3132 if l is not None and l.held:
3132 3133 l.lock()
3133 3134 return l
3134 3135
3135 3136 # We do not need to check for non-waiting lock acquisition. Such
3136 3137 # acquisition would not cause dead-lock as they would just fail.
3137 3138 if wait and (
3138 3139 self.ui.configbool(b'devel', b'all-warnings')
3139 3140 or self.ui.configbool(b'devel', b'check-locks')
3140 3141 ):
3141 3142 if self._currentlock(self._lockref) is not None:
3142 3143 self.ui.develwarn(b'"wlock" acquired after "lock"')
3143 3144
3144 3145 def unlock():
3145 3146 if self.dirstate.is_changing_any:
3146 3147 msg = b"wlock release in the middle of a changing parents"
3147 3148 self.ui.develwarn(msg)
3148 3149 self.dirstate.invalidate()
3149 3150 else:
3150 3151 if self.dirstate._dirty:
3151 3152 msg = b"dirty dirstate on wlock release"
3152 3153 self.ui.develwarn(msg)
3153 3154 self.dirstate.write(None)
3154 3155
3155 3156 unfi = self.unfiltered()
3156 3157 if 'dirstate' in unfi.__dict__:
3157 3158 del unfi.__dict__['dirstate']
3158 3159
3159 3160 l = self._lock(
3160 3161 self.vfs,
3161 3162 b"wlock",
3162 3163 wait,
3163 3164 unlock,
3164 3165 self.invalidatedirstate,
3165 3166 _(b'working directory of %s') % self.origroot,
3166 3167 )
3167 3168 self._wlockref = weakref.ref(l)
3168 3169 return l
3169 3170
3170 3171 def _currentlock(self, lockref):
3171 3172 """Returns the lock if it's held, or None if it's not."""
3172 3173 if lockref is None:
3173 3174 return None
3174 3175 l = lockref()
3175 3176 if l is None or not l.held:
3176 3177 return None
3177 3178 return l
3178 3179
3179 3180 def currentwlock(self):
3180 3181 """Returns the wlock if it's held, or None if it's not."""
3181 3182 return self._currentlock(self._wlockref)
3182 3183
3183 3184 def currentlock(self):
3184 3185 """Returns the lock if it's held, or None if it's not."""
3185 3186 return self._currentlock(self._lockref)
3186 3187
3187 3188 def checkcommitpatterns(self, wctx, match, status, fail):
3188 3189 """check for commit arguments that aren't committable"""
3189 3190 if match.isexact() or match.prefix():
3190 3191 matched = set(status.modified + status.added + status.removed)
3191 3192
3192 3193 for f in match.files():
3193 3194 f = self.dirstate.normalize(f)
3194 3195 if f == b'.' or f in matched or f in wctx.substate:
3195 3196 continue
3196 3197 if f in status.deleted:
3197 3198 fail(f, _(b'file not found!'))
3198 3199 # Is it a directory that exists or used to exist?
3199 3200 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3200 3201 d = f + b'/'
3201 3202 for mf in matched:
3202 3203 if mf.startswith(d):
3203 3204 break
3204 3205 else:
3205 3206 fail(f, _(b"no match under directory!"))
3206 3207 elif f not in self.dirstate:
3207 3208 fail(f, _(b"file not tracked!"))
3208 3209
3209 3210 @unfilteredmethod
3210 3211 def commit(
3211 3212 self,
3212 3213 text=b"",
3213 3214 user=None,
3214 3215 date=None,
3215 3216 match=None,
3216 3217 force=False,
3217 3218 editor=None,
3218 3219 extra=None,
3219 3220 ):
3220 3221 """Add a new revision to current repository.
3221 3222
3222 3223 Revision information is gathered from the working directory,
3223 3224 match can be used to filter the committed files. If editor is
3224 3225 supplied, it is called to get a commit message.
3225 3226 """
3226 3227 if extra is None:
3227 3228 extra = {}
3228 3229
3229 3230 def fail(f, msg):
3230 3231 raise error.InputError(b'%s: %s' % (f, msg))
3231 3232
3232 3233 if not match:
3233 3234 match = matchmod.always()
3234 3235
3235 3236 if not force:
3236 3237 match.bad = fail
3237 3238
3238 3239 # lock() for recent changelog (see issue4368)
3239 3240 with self.wlock(), self.lock():
3240 3241 wctx = self[None]
3241 3242 merge = len(wctx.parents()) > 1
3242 3243
3243 3244 if not force and merge and not match.always():
3244 3245 raise error.Abort(
3245 3246 _(
3246 3247 b'cannot partially commit a merge '
3247 3248 b'(do not specify files or patterns)'
3248 3249 )
3249 3250 )
3250 3251
3251 3252 status = self.status(match=match, clean=force)
3252 3253 if force:
3253 3254 status.modified.extend(
3254 3255 status.clean
3255 3256 ) # mq may commit clean files
3256 3257
3257 3258 # check subrepos
3258 3259 subs, commitsubs, newstate = subrepoutil.precommit(
3259 3260 self.ui, wctx, status, match, force=force
3260 3261 )
3261 3262
3262 3263 # make sure all explicit patterns are matched
3263 3264 if not force:
3264 3265 self.checkcommitpatterns(wctx, match, status, fail)
3265 3266
3266 3267 cctx = context.workingcommitctx(
3267 3268 self, status, text, user, date, extra
3268 3269 )
3269 3270
3270 3271 ms = mergestatemod.mergestate.read(self)
3271 3272 mergeutil.checkunresolved(ms)
3272 3273
3273 3274 # internal config: ui.allowemptycommit
3274 3275 if cctx.isempty() and not self.ui.configbool(
3275 3276 b'ui', b'allowemptycommit'
3276 3277 ):
3277 3278 self.ui.debug(b'nothing to commit, clearing merge state\n')
3278 3279 ms.reset()
3279 3280 return None
3280 3281
3281 3282 if merge and cctx.deleted():
3282 3283 raise error.Abort(_(b"cannot commit merge with missing files"))
3283 3284
3284 3285 if editor:
3285 3286 cctx._text = editor(self, cctx, subs)
3286 3287 edited = text != cctx._text
3287 3288
3288 3289 # Save commit message in case this transaction gets rolled back
3289 3290 # (e.g. by a pretxncommit hook). Leave the content alone on
3290 3291 # the assumption that the user will use the same editor again.
3291 3292 msg_path = self.savecommitmessage(cctx._text)
3292 3293
3293 3294 # commit subs and write new state
3294 3295 if subs:
3295 3296 uipathfn = scmutil.getuipathfn(self)
3296 3297 for s in sorted(commitsubs):
3297 3298 sub = wctx.sub(s)
3298 3299 self.ui.status(
3299 3300 _(b'committing subrepository %s\n')
3300 3301 % uipathfn(subrepoutil.subrelpath(sub))
3301 3302 )
3302 3303 sr = sub.commit(cctx._text, user, date)
3303 3304 newstate[s] = (newstate[s][0], sr)
3304 3305 subrepoutil.writestate(self, newstate)
3305 3306
3306 3307 p1, p2 = self.dirstate.parents()
3307 3308 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3308 3309 try:
3309 3310 self.hook(
3310 3311 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3311 3312 )
3312 3313 with self.transaction(b'commit'):
3313 3314 ret = self.commitctx(cctx, True)
3314 3315 # update bookmarks, dirstate and mergestate
3315 3316 bookmarks.update(self, [p1, p2], ret)
3316 3317 cctx.markcommitted(ret)
3317 3318 ms.reset()
3318 3319 except: # re-raises
3319 3320 if edited:
3320 3321 self.ui.write(
3321 3322 _(b'note: commit message saved in %s\n') % msg_path
3322 3323 )
3323 3324 self.ui.write(
3324 3325 _(
3325 3326 b"note: use 'hg commit --logfile "
3326 3327 b"%s --edit' to reuse it\n"
3327 3328 )
3328 3329 % msg_path
3329 3330 )
3330 3331 raise
3331 3332
3332 3333 def commithook(unused_success):
3333 3334 # hack for command that use a temporary commit (eg: histedit)
3334 3335 # temporary commit got stripped before hook release
3335 3336 if self.changelog.hasnode(ret):
3336 3337 self.hook(
3337 3338 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3338 3339 )
3339 3340
3340 3341 self._afterlock(commithook)
3341 3342 return ret
3342 3343
3343 3344 @unfilteredmethod
3344 3345 def commitctx(self, ctx, error=False, origctx=None):
3345 3346 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3346 3347
3347 3348 @unfilteredmethod
3348 3349 def destroying(self):
3349 3350 """Inform the repository that nodes are about to be destroyed.
3350 3351 Intended for use by strip and rollback, so there's a common
3351 3352 place for anything that has to be done before destroying history.
3352 3353
3353 3354 This is mostly useful for saving state that is in memory and waiting
3354 3355 to be flushed when the current lock is released. Because a call to
3355 3356 destroyed is imminent, the repo will be invalidated causing those
3356 3357 changes to stay in memory (waiting for the next unlock), or vanish
3357 3358 completely.
3358 3359 """
3359 3360 # When using the same lock to commit and strip, the phasecache is left
3360 3361 # dirty after committing. Then when we strip, the repo is invalidated,
3361 3362 # causing those changes to disappear.
3362 3363 if '_phasecache' in vars(self):
3363 3364 self._phasecache.write()
3364 3365
3365 3366 @unfilteredmethod
3366 3367 def destroyed(self):
3367 3368 """Inform the repository that nodes have been destroyed.
3368 3369 Intended for use by strip and rollback, so there's a common
3369 3370 place for anything that has to be done after destroying history.
3370 3371 """
3371 3372 # When one tries to:
3372 3373 # 1) destroy nodes thus calling this method (e.g. strip)
3373 3374 # 2) use phasecache somewhere (e.g. commit)
3374 3375 #
3375 3376 # then 2) will fail because the phasecache contains nodes that were
3376 3377 # removed. We can either remove phasecache from the filecache,
3377 3378 # causing it to reload next time it is accessed, or simply filter
3378 3379 # the removed nodes now and write the updated cache.
3379 3380 self._phasecache.filterunknown(self)
3380 3381 self._phasecache.write()
3381 3382
3382 3383 # refresh all repository caches
3383 3384 self.updatecaches()
3384 3385
3385 3386 # Ensure the persistent tag cache is updated. Doing it now
3386 3387 # means that the tag cache only has to worry about destroyed
3387 3388 # heads immediately after a strip/rollback. That in turn
3388 3389 # guarantees that "cachetip == currenttip" (comparing both rev
3389 3390 # and node) always means no nodes have been added or destroyed.
3390 3391
3391 3392 # XXX this is suboptimal when qrefresh'ing: we strip the current
3392 3393 # head, refresh the tag cache, then immediately add a new head.
3393 3394 # But I think doing it this way is necessary for the "instant
3394 3395 # tag cache retrieval" case to work.
3395 3396 self.invalidate()
3396 3397
3397 3398 def status(
3398 3399 self,
3399 3400 node1=b'.',
3400 3401 node2=None,
3401 3402 match=None,
3402 3403 ignored=False,
3403 3404 clean=False,
3404 3405 unknown=False,
3405 3406 listsubrepos=False,
3406 3407 ):
3407 3408 '''a convenience method that calls node1.status(node2)'''
3408 3409 return self[node1].status(
3409 3410 node2, match, ignored, clean, unknown, listsubrepos
3410 3411 )
3411 3412
3412 3413 def addpostdsstatus(self, ps):
3413 3414 """Add a callback to run within the wlock, at the point at which status
3414 3415 fixups happen.
3415 3416
3416 3417 On status completion, callback(wctx, status) will be called with the
3417 3418 wlock held, unless the dirstate has changed from underneath or the wlock
3418 3419 couldn't be grabbed.
3419 3420
3420 3421 Callbacks should not capture and use a cached copy of the dirstate --
3421 3422 it might change in the meanwhile. Instead, they should access the
3422 3423 dirstate via wctx.repo().dirstate.
3423 3424
3424 3425 This list is emptied out after each status run -- extensions should
3425 3426 make sure it adds to this list each time dirstate.status is called.
3426 3427 Extensions should also make sure they don't call this for statuses
3427 3428 that don't involve the dirstate.
3428 3429 """
3429 3430
3430 3431 # The list is located here for uniqueness reasons -- it is actually
3431 3432 # managed by the workingctx, but that isn't unique per-repo.
3432 3433 self._postdsstatus.append(ps)
3433 3434
3434 3435 def postdsstatus(self):
3435 3436 """Used by workingctx to get the list of post-dirstate-status hooks."""
3436 3437 return self._postdsstatus
3437 3438
3438 3439 def clearpostdsstatus(self):
3439 3440 """Used by workingctx to clear post-dirstate-status hooks."""
3440 3441 del self._postdsstatus[:]
3441 3442
3442 3443 def heads(self, start=None):
3443 3444 if start is None:
3444 3445 cl = self.changelog
3445 3446 headrevs = reversed(cl.headrevs())
3446 3447 return [cl.node(rev) for rev in headrevs]
3447 3448
3448 3449 heads = self.changelog.heads(start)
3449 3450 # sort the output in rev descending order
3450 3451 return sorted(heads, key=self.changelog.rev, reverse=True)
3451 3452
3452 3453 def branchheads(self, branch=None, start=None, closed=False):
3453 3454 """return a (possibly filtered) list of heads for the given branch
3454 3455
3455 3456 Heads are returned in topological order, from newest to oldest.
3456 3457 If branch is None, use the dirstate branch.
3457 3458 If start is not None, return only heads reachable from start.
3458 3459 If closed is True, return heads that are marked as closed as well.
3459 3460 """
3460 3461 if branch is None:
3461 3462 branch = self[None].branch()
3462 3463 branches = self.branchmap()
3463 3464 if not branches.hasbranch(branch):
3464 3465 return []
3465 3466 # the cache returns heads ordered lowest to highest
3466 3467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3467 3468 if start is not None:
3468 3469 # filter out the heads that cannot be reached from startrev
3469 3470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3470 3471 bheads = [h for h in bheads if h in fbheads]
3471 3472 return bheads
3472 3473
3473 3474 def branches(self, nodes):
3474 3475 if not nodes:
3475 3476 nodes = [self.changelog.tip()]
3476 3477 b = []
3477 3478 for n in nodes:
3478 3479 t = n
3479 3480 while True:
3480 3481 p = self.changelog.parents(n)
3481 3482 if p[1] != self.nullid or p[0] == self.nullid:
3482 3483 b.append((t, n, p[0], p[1]))
3483 3484 break
3484 3485 n = p[0]
3485 3486 return b
3486 3487
3487 3488 def between(self, pairs):
3488 3489 r = []
3489 3490
3490 3491 for top, bottom in pairs:
3491 3492 n, l, i = top, [], 0
3492 3493 f = 1
3493 3494
3494 3495 while n != bottom and n != self.nullid:
3495 3496 p = self.changelog.parents(n)[0]
3496 3497 if i == f:
3497 3498 l.append(n)
3498 3499 f = f * 2
3499 3500 n = p
3500 3501 i += 1
3501 3502
3502 3503 r.append(l)
3503 3504
3504 3505 return r
3505 3506
3506 3507 def checkpush(self, pushop):
3507 3508 """Extensions can override this function if additional checks have
3508 3509 to be performed before pushing, or call it if they override push
3509 3510 command.
3510 3511 """
3511 3512
3512 3513 @unfilteredpropertycache
3513 3514 def prepushoutgoinghooks(self):
3514 3515 """Return util.hooks consists of a pushop with repo, remote, outgoing
3515 3516 methods, which are called before pushing changesets.
3516 3517 """
3517 3518 return util.hooks()
3518 3519
3519 3520 def pushkey(self, namespace, key, old, new):
3520 3521 try:
3521 3522 tr = self.currenttransaction()
3522 3523 hookargs = {}
3523 3524 if tr is not None:
3524 3525 hookargs.update(tr.hookargs)
3525 3526 hookargs = pycompat.strkwargs(hookargs)
3526 3527 hookargs['namespace'] = namespace
3527 3528 hookargs['key'] = key
3528 3529 hookargs['old'] = old
3529 3530 hookargs['new'] = new
3530 3531 self.hook(b'prepushkey', throw=True, **hookargs)
3531 3532 except error.HookAbort as exc:
3532 3533 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3533 3534 if exc.hint:
3534 3535 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3535 3536 return False
3536 3537 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3537 3538 ret = pushkey.push(self, namespace, key, old, new)
3538 3539
3539 3540 def runhook(unused_success):
3540 3541 self.hook(
3541 3542 b'pushkey',
3542 3543 namespace=namespace,
3543 3544 key=key,
3544 3545 old=old,
3545 3546 new=new,
3546 3547 ret=ret,
3547 3548 )
3548 3549
3549 3550 self._afterlock(runhook)
3550 3551 return ret
3551 3552
3552 3553 def listkeys(self, namespace):
3553 3554 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3554 3555 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3555 3556 values = pushkey.list(self, namespace)
3556 3557 self.hook(b'listkeys', namespace=namespace, values=values)
3557 3558 return values
3558 3559
3559 3560 def debugwireargs(self, one, two, three=None, four=None, five=None):
3560 3561 '''used to test argument passing over the wire'''
3561 3562 return b"%s %s %s %s %s" % (
3562 3563 one,
3563 3564 two,
3564 3565 pycompat.bytestr(three),
3565 3566 pycompat.bytestr(four),
3566 3567 pycompat.bytestr(five),
3567 3568 )
3568 3569
3569 3570 def savecommitmessage(self, text):
3570 3571 fp = self.vfs(b'last-message.txt', b'wb')
3571 3572 try:
3572 3573 fp.write(text)
3573 3574 finally:
3574 3575 fp.close()
3575 3576 return self.pathto(fp.name[len(self.root) + 1 :])
3576 3577
3577 3578 def register_wanted_sidedata(self, category):
3578 3579 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3579 3580 # Only revlogv2 repos can want sidedata.
3580 3581 return
3581 3582 self._wanted_sidedata.add(pycompat.bytestr(category))
3582 3583
3583 3584 def register_sidedata_computer(
3584 3585 self, kind, category, keys, computer, flags, replace=False
3585 3586 ):
3586 3587 if kind not in revlogconst.ALL_KINDS:
3587 3588 msg = _(b"unexpected revlog kind '%s'.")
3588 3589 raise error.ProgrammingError(msg % kind)
3589 3590 category = pycompat.bytestr(category)
3590 3591 already_registered = category in self._sidedata_computers.get(kind, [])
3591 3592 if already_registered and not replace:
3592 3593 msg = _(
3593 3594 b"cannot register a sidedata computer twice for category '%s'."
3594 3595 )
3595 3596 raise error.ProgrammingError(msg % category)
3596 3597 if replace and not already_registered:
3597 3598 msg = _(
3598 3599 b"cannot replace a sidedata computer that isn't registered "
3599 3600 b"for category '%s'."
3600 3601 )
3601 3602 raise error.ProgrammingError(msg % category)
3602 3603 self._sidedata_computers.setdefault(kind, {})
3603 3604 self._sidedata_computers[kind][category] = (keys, computer, flags)
3604 3605
3605 3606
3606 3607 def undoname(fn: bytes) -> bytes:
3607 3608 base, name = os.path.split(fn)
3608 3609 assert name.startswith(b'journal')
3609 3610 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3610 3611
3611 3612
3612 3613 def instance(ui, path: bytes, create, intents=None, createopts=None):
3613 3614 # prevent cyclic import localrepo -> upgrade -> localrepo
3614 3615 from . import upgrade
3615 3616
3616 3617 localpath = urlutil.urllocalpath(path)
3617 3618 if create:
3618 3619 createrepository(ui, localpath, createopts=createopts)
3619 3620
3620 3621 def repo_maker():
3621 3622 return makelocalrepository(ui, localpath, intents=intents)
3622 3623
3623 3624 repo = repo_maker()
3624 3625 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3625 3626 return repo
3626 3627
3627 3628
3628 3629 def islocal(path: bytes) -> bool:
3629 3630 return True
3630 3631
3631 3632
3632 3633 def defaultcreateopts(ui, createopts=None):
3633 3634 """Populate the default creation options for a repository.
3634 3635
3635 3636 A dictionary of explicitly requested creation options can be passed
3636 3637 in. Missing keys will be populated.
3637 3638 """
3638 3639 createopts = dict(createopts or {})
3639 3640
3640 3641 if b'backend' not in createopts:
3641 3642 # experimental config: storage.new-repo-backend
3642 3643 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3643 3644
3644 3645 return createopts
3645 3646
3646 3647
3647 3648 def clone_requirements(ui, createopts, srcrepo):
3648 3649 """clone the requirements of a local repo for a local clone
3649 3650
3650 3651 The store requirements are unchanged while the working copy requirements
3651 3652 depends on the configuration
3652 3653 """
3653 3654 target_requirements = set()
3654 3655 if not srcrepo.requirements:
3655 3656 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3656 3657 # with it.
3657 3658 return target_requirements
3658 3659 createopts = defaultcreateopts(ui, createopts=createopts)
3659 3660 for r in newreporequirements(ui, createopts):
3660 3661 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3661 3662 target_requirements.add(r)
3662 3663
3663 3664 for r in srcrepo.requirements:
3664 3665 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3665 3666 target_requirements.add(r)
3666 3667 return target_requirements
3667 3668
3668 3669
3669 3670 def newreporequirements(ui, createopts):
3670 3671 """Determine the set of requirements for a new local repository.
3671 3672
3672 3673 Extensions can wrap this function to specify custom requirements for
3673 3674 new repositories.
3674 3675 """
3675 3676
3676 3677 if b'backend' not in createopts:
3677 3678 raise error.ProgrammingError(
3678 3679 b'backend key not present in createopts; '
3679 3680 b'was defaultcreateopts() called?'
3680 3681 )
3681 3682
3682 3683 if createopts[b'backend'] != b'revlogv1':
3683 3684 raise error.Abort(
3684 3685 _(
3685 3686 b'unable to determine repository requirements for '
3686 3687 b'storage backend: %s'
3687 3688 )
3688 3689 % createopts[b'backend']
3689 3690 )
3690 3691
3691 3692 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3692 3693 if ui.configbool(b'format', b'usestore'):
3693 3694 requirements.add(requirementsmod.STORE_REQUIREMENT)
3694 3695 if ui.configbool(b'format', b'usefncache'):
3695 3696 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3696 3697 if ui.configbool(b'format', b'dotencode'):
3697 3698 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3698 3699
3699 3700 compengines = ui.configlist(b'format', b'revlog-compression')
3700 3701 for compengine in compengines:
3701 3702 if compengine in util.compengines:
3702 3703 engine = util.compengines[compengine]
3703 3704 if engine.available() and engine.revlogheader():
3704 3705 break
3705 3706 else:
3706 3707 raise error.Abort(
3707 3708 _(
3708 3709 b'compression engines %s defined by '
3709 3710 b'format.revlog-compression not available'
3710 3711 )
3711 3712 % b', '.join(b'"%s"' % e for e in compengines),
3712 3713 hint=_(
3713 3714 b'run "hg debuginstall" to list available '
3714 3715 b'compression engines'
3715 3716 ),
3716 3717 )
3717 3718
3718 3719 # zlib is the historical default and doesn't need an explicit requirement.
3719 3720 if compengine == b'zstd':
3720 3721 requirements.add(b'revlog-compression-zstd')
3721 3722 elif compengine != b'zlib':
3722 3723 requirements.add(b'exp-compression-%s' % compengine)
3723 3724
3724 3725 if scmutil.gdinitconfig(ui):
3725 3726 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3726 3727 if ui.configbool(b'format', b'sparse-revlog'):
3727 3728 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3728 3729
3729 3730 # experimental config: format.use-dirstate-v2
3730 3731 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3731 3732 if ui.configbool(b'format', b'use-dirstate-v2'):
3732 3733 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3733 3734
3734 3735 # experimental config: format.exp-use-copies-side-data-changeset
3735 3736 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3736 3737 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3737 3738 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3738 3739 if ui.configbool(b'experimental', b'treemanifest'):
3739 3740 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3740 3741
3741 3742 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3742 3743 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3743 3744 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3744 3745
3745 3746 revlogv2 = ui.config(b'experimental', b'revlogv2')
3746 3747 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3747 3748 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3748 3749 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3749 3750 # experimental config: format.internal-phase
3750 3751 if ui.configbool(b'format', b'use-internal-phase'):
3751 3752 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3752 3753
3753 3754 # experimental config: format.exp-archived-phase
3754 3755 if ui.configbool(b'format', b'exp-archived-phase'):
3755 3756 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3756 3757
3757 3758 if createopts.get(b'narrowfiles'):
3758 3759 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3759 3760
3760 3761 if createopts.get(b'lfs'):
3761 3762 requirements.add(b'lfs')
3762 3763
3763 3764 if ui.configbool(b'format', b'bookmarks-in-store'):
3764 3765 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3765 3766
3766 if ui.configbool(b'format', b'use-persistent-nodemap'):
3767 # The feature is disabled unless a fast implementation is available.
3768 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 if ui.configbool(
3770 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 ):
3767 3772 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3768 3773
3769 3774 # if share-safe is enabled, let's create the new repository with the new
3770 3775 # requirement
3771 3776 if ui.configbool(b'format', b'use-share-safe'):
3772 3777 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3773 3778
3774 3779 # if we are creating a share-repo¹ we have to handle requirement
3775 3780 # differently.
3776 3781 #
3777 3782 # [1] (i.e. reusing the store from another repository, just having a
3778 3783 # working copy)
3779 3784 if b'sharedrepo' in createopts:
3780 3785 source_requirements = set(createopts[b'sharedrepo'].requirements)
3781 3786
3782 3787 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3783 3788 # share to an old school repository, we have to copy the
3784 3789 # requirements and hope for the best.
3785 3790 requirements = source_requirements
3786 3791 else:
3787 3792 # We have control on the working copy only, so "copy" the non
3788 3793 # working copy part over, ignoring previous logic.
3789 3794 to_drop = set()
3790 3795 for req in requirements:
3791 3796 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3792 3797 continue
3793 3798 if req in source_requirements:
3794 3799 continue
3795 3800 to_drop.add(req)
3796 3801 requirements -= to_drop
3797 3802 requirements |= source_requirements
3798 3803
3799 3804 if createopts.get(b'sharedrelative'):
3800 3805 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3801 3806 else:
3802 3807 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3803 3808
3804 3809 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3805 3810 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3806 3811 msg = _(b"ignoring unknown tracked key version: %d\n")
3807 3812 hint = _(
3808 3813 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3809 3814 )
3810 3815 if version != 1:
3811 3816 ui.warn(msg % version, hint=hint)
3812 3817 else:
3813 3818 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3814 3819
3815 3820 return requirements
3816 3821
3817 3822
3818 3823 def checkrequirementscompat(ui, requirements):
3819 3824 """Checks compatibility of repository requirements enabled and disabled.
3820 3825
3821 3826 Returns a set of requirements which needs to be dropped because dependend
3822 3827 requirements are not enabled. Also warns users about it"""
3823 3828
3824 3829 dropped = set()
3825 3830
3826 3831 if requirementsmod.STORE_REQUIREMENT not in requirements:
3827 3832 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3828 3833 ui.warn(
3829 3834 _(
3830 3835 b'ignoring enabled \'format.bookmarks-in-store\' config '
3831 3836 b'beacuse it is incompatible with disabled '
3832 3837 b'\'format.usestore\' config\n'
3833 3838 )
3834 3839 )
3835 3840 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3836 3841
3837 3842 if (
3838 3843 requirementsmod.SHARED_REQUIREMENT in requirements
3839 3844 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3840 3845 ):
3841 3846 raise error.Abort(
3842 3847 _(
3843 3848 b"cannot create shared repository as source was created"
3844 3849 b" with 'format.usestore' config disabled"
3845 3850 )
3846 3851 )
3847 3852
3848 3853 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3849 3854 if ui.hasconfig(b'format', b'use-share-safe'):
3850 3855 msg = _(
3851 3856 b"ignoring enabled 'format.use-share-safe' config because "
3852 3857 b"it is incompatible with disabled 'format.usestore'"
3853 3858 b" config\n"
3854 3859 )
3855 3860 ui.warn(msg)
3856 3861 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3857 3862
3858 3863 return dropped
3859 3864
3860 3865
3861 3866 def filterknowncreateopts(ui, createopts):
3862 3867 """Filters a dict of repo creation options against options that are known.
3863 3868
3864 3869 Receives a dict of repo creation options and returns a dict of those
3865 3870 options that we don't know how to handle.
3866 3871
3867 3872 This function is called as part of repository creation. If the
3868 3873 returned dict contains any items, repository creation will not
3869 3874 be allowed, as it means there was a request to create a repository
3870 3875 with options not recognized by loaded code.
3871 3876
3872 3877 Extensions can wrap this function to filter out creation options
3873 3878 they know how to handle.
3874 3879 """
3875 3880 known = {
3876 3881 b'backend',
3877 3882 b'lfs',
3878 3883 b'narrowfiles',
3879 3884 b'sharedrepo',
3880 3885 b'sharedrelative',
3881 3886 b'shareditems',
3882 3887 b'shallowfilestore',
3883 3888 }
3884 3889
3885 3890 return {k: v for k, v in createopts.items() if k not in known}
3886 3891
3887 3892
3888 3893 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3889 3894 """Create a new repository in a vfs.
3890 3895
3891 3896 ``path`` path to the new repo's working directory.
3892 3897 ``createopts`` options for the new repository.
3893 3898 ``requirement`` predefined set of requirements.
3894 3899 (incompatible with ``createopts``)
3895 3900
3896 3901 The following keys for ``createopts`` are recognized:
3897 3902
3898 3903 backend
3899 3904 The storage backend to use.
3900 3905 lfs
3901 3906 Repository will be created with ``lfs`` requirement. The lfs extension
3902 3907 will automatically be loaded when the repository is accessed.
3903 3908 narrowfiles
3904 3909 Set up repository to support narrow file storage.
3905 3910 sharedrepo
3906 3911 Repository object from which storage should be shared.
3907 3912 sharedrelative
3908 3913 Boolean indicating if the path to the shared repo should be
3909 3914 stored as relative. By default, the pointer to the "parent" repo
3910 3915 is stored as an absolute path.
3911 3916 shareditems
3912 3917 Set of items to share to the new repository (in addition to storage).
3913 3918 shallowfilestore
3914 3919 Indicates that storage for files should be shallow (not all ancestor
3915 3920 revisions are known).
3916 3921 """
3917 3922
3918 3923 if requirements is not None:
3919 3924 if createopts is not None:
3920 3925 msg = b'cannot specify both createopts and requirements'
3921 3926 raise error.ProgrammingError(msg)
3922 3927 createopts = {}
3923 3928 else:
3924 3929 createopts = defaultcreateopts(ui, createopts=createopts)
3925 3930
3926 3931 unknownopts = filterknowncreateopts(ui, createopts)
3927 3932
3928 3933 if not isinstance(unknownopts, dict):
3929 3934 raise error.ProgrammingError(
3930 3935 b'filterknowncreateopts() did not return a dict'
3931 3936 )
3932 3937
3933 3938 if unknownopts:
3934 3939 raise error.Abort(
3935 3940 _(
3936 3941 b'unable to create repository because of unknown '
3937 3942 b'creation option: %s'
3938 3943 )
3939 3944 % b', '.join(sorted(unknownopts)),
3940 3945 hint=_(b'is a required extension not loaded?'),
3941 3946 )
3942 3947
3943 3948 requirements = newreporequirements(ui, createopts=createopts)
3944 3949 requirements -= checkrequirementscompat(ui, requirements)
3945 3950
3946 3951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3947 3952
3948 3953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3949 3954 if hgvfs.exists():
3950 3955 raise error.RepoError(_(b'repository %s already exists') % path)
3951 3956
3952 3957 if b'sharedrepo' in createopts:
3953 3958 sharedpath = createopts[b'sharedrepo'].sharedpath
3954 3959
3955 3960 if createopts.get(b'sharedrelative'):
3956 3961 try:
3957 3962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3958 3963 sharedpath = util.pconvert(sharedpath)
3959 3964 except (IOError, ValueError) as e:
3960 3965 # ValueError is raised on Windows if the drive letters differ
3961 3966 # on each path.
3962 3967 raise error.Abort(
3963 3968 _(b'cannot calculate relative path'),
3964 3969 hint=stringutil.forcebytestr(e),
3965 3970 )
3966 3971
3967 3972 if not wdirvfs.exists():
3968 3973 wdirvfs.makedirs()
3969 3974
3970 3975 hgvfs.makedir(notindexed=True)
3971 3976 if b'sharedrepo' not in createopts:
3972 3977 hgvfs.mkdir(b'cache')
3973 3978 hgvfs.mkdir(b'wcache')
3974 3979
3975 3980 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3976 3981 if has_store and b'sharedrepo' not in createopts:
3977 3982 hgvfs.mkdir(b'store')
3978 3983
3979 3984 # We create an invalid changelog outside the store so very old
3980 3985 # Mercurial versions (which didn't know about the requirements
3981 3986 # file) encounter an error on reading the changelog. This
3982 3987 # effectively locks out old clients and prevents them from
3983 3988 # mucking with a repo in an unknown format.
3984 3989 #
3985 3990 # The revlog header has version 65535, which won't be recognized by
3986 3991 # such old clients.
3987 3992 hgvfs.append(
3988 3993 b'00changelog.i',
3989 3994 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3990 3995 b'layout',
3991 3996 )
3992 3997
3993 3998 # Filter the requirements into working copy and store ones
3994 3999 wcreq, storereq = scmutil.filterrequirements(requirements)
3995 4000 # write working copy ones
3996 4001 scmutil.writerequires(hgvfs, wcreq)
3997 4002 # If there are store requirements and the current repository
3998 4003 # is not a shared one, write stored requirements
3999 4004 # For new shared repository, we don't need to write the store
4000 4005 # requirements as they are already present in store requires
4001 4006 if storereq and b'sharedrepo' not in createopts:
4002 4007 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4003 4008 scmutil.writerequires(storevfs, storereq)
4004 4009
4005 4010 # Write out file telling readers where to find the shared store.
4006 4011 if b'sharedrepo' in createopts:
4007 4012 hgvfs.write(b'sharedpath', sharedpath)
4008 4013
4009 4014 if createopts.get(b'shareditems'):
4010 4015 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4011 4016 hgvfs.write(b'shared', shared)
4012 4017
4013 4018
4014 4019 def poisonrepository(repo):
4015 4020 """Poison a repository instance so it can no longer be used."""
4016 4021 # Perform any cleanup on the instance.
4017 4022 repo.close()
4018 4023
4019 4024 # Our strategy is to replace the type of the object with one that
4020 4025 # has all attribute lookups result in error.
4021 4026 #
4022 4027 # But we have to allow the close() method because some constructors
4023 4028 # of repos call close() on repo references.
4024 4029 class poisonedrepository:
4025 4030 def __getattribute__(self, item):
4026 4031 if item == 'close':
4027 4032 return object.__getattribute__(self, item)
4028 4033
4029 4034 raise error.ProgrammingError(
4030 4035 b'repo instances should not be used after unshare'
4031 4036 )
4032 4037
4033 4038 def close(self):
4034 4039 pass
4035 4040
4036 4041 # We may have a repoview, which intercepts __setattr__. So be sure
4037 4042 # we operate at the lowest level possible.
4038 4043 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now