##// END OF EJS Templates
sharesafe: add functionality to automatically downgrade shares...
Pulkit Goyal -
r46853:eec47efe default
parent child Browse files
Show More
@@ -1,2540 +1,2545 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'debug',
574 574 b'dirstate.delaywrite',
575 575 default=0,
576 576 )
577 577 coreconfigitem(
578 578 b'defaults',
579 579 b'.*',
580 580 default=None,
581 581 generic=True,
582 582 )
583 583 coreconfigitem(
584 584 b'devel',
585 585 b'all-warnings',
586 586 default=False,
587 587 )
588 588 coreconfigitem(
589 589 b'devel',
590 590 b'bundle2.debug',
591 591 default=False,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'bundle.delta',
596 596 default=b'',
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'cache-vfs',
601 601 default=None,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'check-locks',
606 606 default=False,
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'check-relroot',
611 611 default=False,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'default-date',
616 616 default=None,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'deprec-warn',
621 621 default=False,
622 622 )
623 623 coreconfigitem(
624 624 b'devel',
625 625 b'disableloaddefaultcerts',
626 626 default=False,
627 627 )
628 628 coreconfigitem(
629 629 b'devel',
630 630 b'warn-empty-changegroup',
631 631 default=False,
632 632 )
633 633 coreconfigitem(
634 634 b'devel',
635 635 b'legacy.exchange',
636 636 default=list,
637 637 )
638 638 coreconfigitem(
639 639 b'devel',
640 640 b'persistent-nodemap',
641 641 default=False,
642 642 )
643 643 coreconfigitem(
644 644 b'devel',
645 645 b'servercafile',
646 646 default=b'',
647 647 )
648 648 coreconfigitem(
649 649 b'devel',
650 650 b'serverexactprotocol',
651 651 default=b'',
652 652 )
653 653 coreconfigitem(
654 654 b'devel',
655 655 b'serverrequirecert',
656 656 default=False,
657 657 )
658 658 coreconfigitem(
659 659 b'devel',
660 660 b'strip-obsmarkers',
661 661 default=True,
662 662 )
663 663 coreconfigitem(
664 664 b'devel',
665 665 b'warn-config',
666 666 default=None,
667 667 )
668 668 coreconfigitem(
669 669 b'devel',
670 670 b'warn-config-default',
671 671 default=None,
672 672 )
673 673 coreconfigitem(
674 674 b'devel',
675 675 b'user.obsmarker',
676 676 default=None,
677 677 )
678 678 coreconfigitem(
679 679 b'devel',
680 680 b'warn-config-unknown',
681 681 default=None,
682 682 )
683 683 coreconfigitem(
684 684 b'devel',
685 685 b'debug.copies',
686 686 default=False,
687 687 )
688 688 coreconfigitem(
689 689 b'devel',
690 690 b'debug.extensions',
691 691 default=False,
692 692 )
693 693 coreconfigitem(
694 694 b'devel',
695 695 b'debug.repo-filters',
696 696 default=False,
697 697 )
698 698 coreconfigitem(
699 699 b'devel',
700 700 b'debug.peer-request',
701 701 default=False,
702 702 )
703 703 coreconfigitem(
704 704 b'devel',
705 705 b'discovery.randomize',
706 706 default=True,
707 707 )
708 708 _registerdiffopts(section=b'diff')
709 709 coreconfigitem(
710 710 b'email',
711 711 b'bcc',
712 712 default=None,
713 713 )
714 714 coreconfigitem(
715 715 b'email',
716 716 b'cc',
717 717 default=None,
718 718 )
719 719 coreconfigitem(
720 720 b'email',
721 721 b'charsets',
722 722 default=list,
723 723 )
724 724 coreconfigitem(
725 725 b'email',
726 726 b'from',
727 727 default=None,
728 728 )
729 729 coreconfigitem(
730 730 b'email',
731 731 b'method',
732 732 default=b'smtp',
733 733 )
734 734 coreconfigitem(
735 735 b'email',
736 736 b'reply-to',
737 737 default=None,
738 738 )
739 739 coreconfigitem(
740 740 b'email',
741 741 b'to',
742 742 default=None,
743 743 )
744 744 coreconfigitem(
745 745 b'experimental',
746 746 b'archivemetatemplate',
747 747 default=dynamicdefault,
748 748 )
749 749 coreconfigitem(
750 750 b'experimental',
751 751 b'auto-publish',
752 752 default=b'publish',
753 753 )
754 754 coreconfigitem(
755 755 b'experimental',
756 756 b'bundle-phases',
757 757 default=False,
758 758 )
759 759 coreconfigitem(
760 760 b'experimental',
761 761 b'bundle2-advertise',
762 762 default=True,
763 763 )
764 764 coreconfigitem(
765 765 b'experimental',
766 766 b'bundle2-output-capture',
767 767 default=False,
768 768 )
769 769 coreconfigitem(
770 770 b'experimental',
771 771 b'bundle2.pushback',
772 772 default=False,
773 773 )
774 774 coreconfigitem(
775 775 b'experimental',
776 776 b'bundle2lazylocking',
777 777 default=False,
778 778 )
779 779 coreconfigitem(
780 780 b'experimental',
781 781 b'bundlecomplevel',
782 782 default=None,
783 783 )
784 784 coreconfigitem(
785 785 b'experimental',
786 786 b'bundlecomplevel.bzip2',
787 787 default=None,
788 788 )
789 789 coreconfigitem(
790 790 b'experimental',
791 791 b'bundlecomplevel.gzip',
792 792 default=None,
793 793 )
794 794 coreconfigitem(
795 795 b'experimental',
796 796 b'bundlecomplevel.none',
797 797 default=None,
798 798 )
799 799 coreconfigitem(
800 800 b'experimental',
801 801 b'bundlecomplevel.zstd',
802 802 default=None,
803 803 )
804 804 coreconfigitem(
805 805 b'experimental',
806 806 b'changegroup3',
807 807 default=False,
808 808 )
809 809 coreconfigitem(
810 810 b'experimental',
811 811 b'cleanup-as-archived',
812 812 default=False,
813 813 )
814 814 coreconfigitem(
815 815 b'experimental',
816 816 b'clientcompressionengines',
817 817 default=list,
818 818 )
819 819 coreconfigitem(
820 820 b'experimental',
821 821 b'copytrace',
822 822 default=b'on',
823 823 )
824 824 coreconfigitem(
825 825 b'experimental',
826 826 b'copytrace.movecandidateslimit',
827 827 default=100,
828 828 )
829 829 coreconfigitem(
830 830 b'experimental',
831 831 b'copytrace.sourcecommitlimit',
832 832 default=100,
833 833 )
834 834 coreconfigitem(
835 835 b'experimental',
836 836 b'copies.read-from',
837 837 default=b"filelog-only",
838 838 )
839 839 coreconfigitem(
840 840 b'experimental',
841 841 b'copies.write-to',
842 842 default=b'filelog-only',
843 843 )
844 844 coreconfigitem(
845 845 b'experimental',
846 846 b'crecordtest',
847 847 default=None,
848 848 )
849 849 coreconfigitem(
850 850 b'experimental',
851 851 b'directaccess',
852 852 default=False,
853 853 )
854 854 coreconfigitem(
855 855 b'experimental',
856 856 b'directaccess.revnums',
857 857 default=False,
858 858 )
859 859 coreconfigitem(
860 860 b'experimental',
861 861 b'editortmpinhg',
862 862 default=False,
863 863 )
864 864 coreconfigitem(
865 865 b'experimental',
866 866 b'evolution',
867 867 default=list,
868 868 )
869 869 coreconfigitem(
870 870 b'experimental',
871 871 b'evolution.allowdivergence',
872 872 default=False,
873 873 alias=[(b'experimental', b'allowdivergence')],
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'evolution.allowunstable',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'evolution.createmarkers',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'evolution.effect-flags',
888 888 default=True,
889 889 alias=[(b'experimental', b'effect-flags')],
890 890 )
891 891 coreconfigitem(
892 892 b'experimental',
893 893 b'evolution.exchange',
894 894 default=None,
895 895 )
896 896 coreconfigitem(
897 897 b'experimental',
898 898 b'evolution.bundle-obsmarker',
899 899 default=False,
900 900 )
901 901 coreconfigitem(
902 902 b'experimental',
903 903 b'evolution.bundle-obsmarker:mandatory',
904 904 default=True,
905 905 )
906 906 coreconfigitem(
907 907 b'experimental',
908 908 b'log.topo',
909 909 default=False,
910 910 )
911 911 coreconfigitem(
912 912 b'experimental',
913 913 b'evolution.report-instabilities',
914 914 default=True,
915 915 )
916 916 coreconfigitem(
917 917 b'experimental',
918 918 b'evolution.track-operation',
919 919 default=True,
920 920 )
921 921 # repo-level config to exclude a revset visibility
922 922 #
923 923 # The target use case is to use `share` to expose different subset of the same
924 924 # repository, especially server side. See also `server.view`.
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'extra-filter-revs',
928 928 default=None,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'maxdeltachainspan',
933 933 default=-1,
934 934 )
935 935 # tracks files which were undeleted (merge might delete them but we explicitly
936 936 # kept/undeleted them) and creates new filenodes for them
937 937 coreconfigitem(
938 938 b'experimental',
939 939 b'merge-track-salvaged',
940 940 default=False,
941 941 )
942 942 coreconfigitem(
943 943 b'experimental',
944 944 b'mergetempdirprefix',
945 945 default=None,
946 946 )
947 947 coreconfigitem(
948 948 b'experimental',
949 949 b'mmapindexthreshold',
950 950 default=None,
951 951 )
952 952 coreconfigitem(
953 953 b'experimental',
954 954 b'narrow',
955 955 default=False,
956 956 )
957 957 coreconfigitem(
958 958 b'experimental',
959 959 b'nonnormalparanoidcheck',
960 960 default=False,
961 961 )
962 962 coreconfigitem(
963 963 b'experimental',
964 964 b'exportableenviron',
965 965 default=list,
966 966 )
967 967 coreconfigitem(
968 968 b'experimental',
969 969 b'extendedheader.index',
970 970 default=None,
971 971 )
972 972 coreconfigitem(
973 973 b'experimental',
974 974 b'extendedheader.similarity',
975 975 default=False,
976 976 )
977 977 coreconfigitem(
978 978 b'experimental',
979 979 b'graphshorten',
980 980 default=False,
981 981 )
982 982 coreconfigitem(
983 983 b'experimental',
984 984 b'graphstyle.parent',
985 985 default=dynamicdefault,
986 986 )
987 987 coreconfigitem(
988 988 b'experimental',
989 989 b'graphstyle.missing',
990 990 default=dynamicdefault,
991 991 )
992 992 coreconfigitem(
993 993 b'experimental',
994 994 b'graphstyle.grandparent',
995 995 default=dynamicdefault,
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'hook-track-tags',
1000 1000 default=False,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'httppeer.advertise-v2',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'httppeer.v2-encoder-order',
1010 1010 default=None,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'httppostargs',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1018 1018 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1019 1019
1020 1020 coreconfigitem(
1021 1021 b'experimental',
1022 1022 b'obsmarkers-exchange-debug',
1023 1023 default=False,
1024 1024 )
1025 1025 coreconfigitem(
1026 1026 b'experimental',
1027 1027 b'remotenames',
1028 1028 default=False,
1029 1029 )
1030 1030 coreconfigitem(
1031 1031 b'experimental',
1032 1032 b'removeemptydirs',
1033 1033 default=True,
1034 1034 )
1035 1035 coreconfigitem(
1036 1036 b'experimental',
1037 1037 b'revert.interactive.select-to-keep',
1038 1038 default=False,
1039 1039 )
1040 1040 coreconfigitem(
1041 1041 b'experimental',
1042 1042 b'revisions.prefixhexnode',
1043 1043 default=False,
1044 1044 )
1045 1045 coreconfigitem(
1046 1046 b'experimental',
1047 1047 b'revlogv2',
1048 1048 default=None,
1049 1049 )
1050 1050 coreconfigitem(
1051 1051 b'experimental',
1052 1052 b'revisions.disambiguatewithin',
1053 1053 default=None,
1054 1054 )
1055 1055 coreconfigitem(
1056 1056 b'experimental',
1057 1057 b'rust.index',
1058 1058 default=False,
1059 1059 )
1060 1060 coreconfigitem(
1061 1061 b'experimental',
1062 1062 b'server.filesdata.recommended-batch-size',
1063 1063 default=50000,
1064 1064 )
1065 1065 coreconfigitem(
1066 1066 b'experimental',
1067 1067 b'server.manifestdata.recommended-batch-size',
1068 1068 default=100000,
1069 1069 )
1070 1070 coreconfigitem(
1071 1071 b'experimental',
1072 1072 b'server.stream-narrow-clones',
1073 1073 default=False,
1074 1074 )
1075 1075 coreconfigitem(
1076 1076 b'experimental',
1077 b'sharesafe-auto-downgrade-shares',
1078 default=False,
1079 )
1080 coreconfigitem(
1081 b'experimental',
1077 1082 b'sharesafe-auto-upgrade-shares',
1078 1083 default=False,
1079 1084 )
1080 1085 coreconfigitem(
1081 1086 b'experimental',
1082 1087 b'single-head-per-branch',
1083 1088 default=False,
1084 1089 )
1085 1090 coreconfigitem(
1086 1091 b'experimental',
1087 1092 b'single-head-per-branch:account-closed-heads',
1088 1093 default=False,
1089 1094 )
1090 1095 coreconfigitem(
1091 1096 b'experimental',
1092 1097 b'single-head-per-branch:public-changes-only',
1093 1098 default=False,
1094 1099 )
1095 1100 coreconfigitem(
1096 1101 b'experimental',
1097 1102 b'sshserver.support-v2',
1098 1103 default=False,
1099 1104 )
1100 1105 coreconfigitem(
1101 1106 b'experimental',
1102 1107 b'sparse-read',
1103 1108 default=False,
1104 1109 )
1105 1110 coreconfigitem(
1106 1111 b'experimental',
1107 1112 b'sparse-read.density-threshold',
1108 1113 default=0.50,
1109 1114 )
1110 1115 coreconfigitem(
1111 1116 b'experimental',
1112 1117 b'sparse-read.min-gap-size',
1113 1118 default=b'65K',
1114 1119 )
1115 1120 coreconfigitem(
1116 1121 b'experimental',
1117 1122 b'treemanifest',
1118 1123 default=False,
1119 1124 )
1120 1125 coreconfigitem(
1121 1126 b'experimental',
1122 1127 b'update.atomic-file',
1123 1128 default=False,
1124 1129 )
1125 1130 coreconfigitem(
1126 1131 b'experimental',
1127 1132 b'sshpeer.advertise-v2',
1128 1133 default=False,
1129 1134 )
1130 1135 coreconfigitem(
1131 1136 b'experimental',
1132 1137 b'web.apiserver',
1133 1138 default=False,
1134 1139 )
1135 1140 coreconfigitem(
1136 1141 b'experimental',
1137 1142 b'web.api.http-v2',
1138 1143 default=False,
1139 1144 )
1140 1145 coreconfigitem(
1141 1146 b'experimental',
1142 1147 b'web.api.debugreflect',
1143 1148 default=False,
1144 1149 )
1145 1150 coreconfigitem(
1146 1151 b'experimental',
1147 1152 b'worker.wdir-get-thread-safe',
1148 1153 default=False,
1149 1154 )
1150 1155 coreconfigitem(
1151 1156 b'experimental',
1152 1157 b'worker.repository-upgrade',
1153 1158 default=False,
1154 1159 )
1155 1160 coreconfigitem(
1156 1161 b'experimental',
1157 1162 b'xdiff',
1158 1163 default=False,
1159 1164 )
1160 1165 coreconfigitem(
1161 1166 b'extensions',
1162 1167 b'.*',
1163 1168 default=None,
1164 1169 generic=True,
1165 1170 )
1166 1171 coreconfigitem(
1167 1172 b'extdata',
1168 1173 b'.*',
1169 1174 default=None,
1170 1175 generic=True,
1171 1176 )
1172 1177 coreconfigitem(
1173 1178 b'format',
1174 1179 b'bookmarks-in-store',
1175 1180 default=False,
1176 1181 )
1177 1182 coreconfigitem(
1178 1183 b'format',
1179 1184 b'chunkcachesize',
1180 1185 default=None,
1181 1186 experimental=True,
1182 1187 )
1183 1188 coreconfigitem(
1184 1189 b'format',
1185 1190 b'dotencode',
1186 1191 default=True,
1187 1192 )
1188 1193 coreconfigitem(
1189 1194 b'format',
1190 1195 b'generaldelta',
1191 1196 default=False,
1192 1197 experimental=True,
1193 1198 )
1194 1199 coreconfigitem(
1195 1200 b'format',
1196 1201 b'manifestcachesize',
1197 1202 default=None,
1198 1203 experimental=True,
1199 1204 )
1200 1205 coreconfigitem(
1201 1206 b'format',
1202 1207 b'maxchainlen',
1203 1208 default=dynamicdefault,
1204 1209 experimental=True,
1205 1210 )
1206 1211 coreconfigitem(
1207 1212 b'format',
1208 1213 b'obsstore-version',
1209 1214 default=None,
1210 1215 )
1211 1216 coreconfigitem(
1212 1217 b'format',
1213 1218 b'sparse-revlog',
1214 1219 default=True,
1215 1220 )
1216 1221 coreconfigitem(
1217 1222 b'format',
1218 1223 b'revlog-compression',
1219 1224 default=lambda: [b'zlib'],
1220 1225 alias=[(b'experimental', b'format.compression')],
1221 1226 )
1222 1227 coreconfigitem(
1223 1228 b'format',
1224 1229 b'usefncache',
1225 1230 default=True,
1226 1231 )
1227 1232 coreconfigitem(
1228 1233 b'format',
1229 1234 b'usegeneraldelta',
1230 1235 default=True,
1231 1236 )
1232 1237 coreconfigitem(
1233 1238 b'format',
1234 1239 b'usestore',
1235 1240 default=True,
1236 1241 )
1237 1242 # Right now, the only efficient implement of the nodemap logic is in Rust, so
1238 1243 # the persistent nodemap feature needs to stay experimental as long as the Rust
1239 1244 # extensions are an experimental feature.
1240 1245 coreconfigitem(
1241 1246 b'format', b'use-persistent-nodemap', default=False, experimental=True
1242 1247 )
1243 1248 coreconfigitem(
1244 1249 b'format',
1245 1250 b'exp-use-copies-side-data-changeset',
1246 1251 default=False,
1247 1252 experimental=True,
1248 1253 )
1249 1254 coreconfigitem(
1250 1255 b'format',
1251 1256 b'exp-use-side-data',
1252 1257 default=False,
1253 1258 experimental=True,
1254 1259 )
1255 1260 coreconfigitem(
1256 1261 b'format',
1257 1262 b'exp-share-safe',
1258 1263 default=False,
1259 1264 experimental=True,
1260 1265 )
1261 1266 coreconfigitem(
1262 1267 b'format',
1263 1268 b'internal-phase',
1264 1269 default=False,
1265 1270 experimental=True,
1266 1271 )
1267 1272 coreconfigitem(
1268 1273 b'fsmonitor',
1269 1274 b'warn_when_unused',
1270 1275 default=True,
1271 1276 )
1272 1277 coreconfigitem(
1273 1278 b'fsmonitor',
1274 1279 b'warn_update_file_count',
1275 1280 default=50000,
1276 1281 )
1277 1282 coreconfigitem(
1278 1283 b'fsmonitor',
1279 1284 b'warn_update_file_count_rust',
1280 1285 default=400000,
1281 1286 )
1282 1287 coreconfigitem(
1283 1288 b'help',
1284 1289 br'hidden-command\..*',
1285 1290 default=False,
1286 1291 generic=True,
1287 1292 )
1288 1293 coreconfigitem(
1289 1294 b'help',
1290 1295 br'hidden-topic\..*',
1291 1296 default=False,
1292 1297 generic=True,
1293 1298 )
1294 1299 coreconfigitem(
1295 1300 b'hooks',
1296 1301 b'.*',
1297 1302 default=dynamicdefault,
1298 1303 generic=True,
1299 1304 )
1300 1305 coreconfigitem(
1301 1306 b'hgweb-paths',
1302 1307 b'.*',
1303 1308 default=list,
1304 1309 generic=True,
1305 1310 )
1306 1311 coreconfigitem(
1307 1312 b'hostfingerprints',
1308 1313 b'.*',
1309 1314 default=list,
1310 1315 generic=True,
1311 1316 )
1312 1317 coreconfigitem(
1313 1318 b'hostsecurity',
1314 1319 b'ciphers',
1315 1320 default=None,
1316 1321 )
1317 1322 coreconfigitem(
1318 1323 b'hostsecurity',
1319 1324 b'minimumprotocol',
1320 1325 default=dynamicdefault,
1321 1326 )
1322 1327 coreconfigitem(
1323 1328 b'hostsecurity',
1324 1329 b'.*:minimumprotocol$',
1325 1330 default=dynamicdefault,
1326 1331 generic=True,
1327 1332 )
1328 1333 coreconfigitem(
1329 1334 b'hostsecurity',
1330 1335 b'.*:ciphers$',
1331 1336 default=dynamicdefault,
1332 1337 generic=True,
1333 1338 )
1334 1339 coreconfigitem(
1335 1340 b'hostsecurity',
1336 1341 b'.*:fingerprints$',
1337 1342 default=list,
1338 1343 generic=True,
1339 1344 )
1340 1345 coreconfigitem(
1341 1346 b'hostsecurity',
1342 1347 b'.*:verifycertsfile$',
1343 1348 default=None,
1344 1349 generic=True,
1345 1350 )
1346 1351
1347 1352 coreconfigitem(
1348 1353 b'http_proxy',
1349 1354 b'always',
1350 1355 default=False,
1351 1356 )
1352 1357 coreconfigitem(
1353 1358 b'http_proxy',
1354 1359 b'host',
1355 1360 default=None,
1356 1361 )
1357 1362 coreconfigitem(
1358 1363 b'http_proxy',
1359 1364 b'no',
1360 1365 default=list,
1361 1366 )
1362 1367 coreconfigitem(
1363 1368 b'http_proxy',
1364 1369 b'passwd',
1365 1370 default=None,
1366 1371 )
1367 1372 coreconfigitem(
1368 1373 b'http_proxy',
1369 1374 b'user',
1370 1375 default=None,
1371 1376 )
1372 1377
1373 1378 coreconfigitem(
1374 1379 b'http',
1375 1380 b'timeout',
1376 1381 default=None,
1377 1382 )
1378 1383
1379 1384 coreconfigitem(
1380 1385 b'logtoprocess',
1381 1386 b'commandexception',
1382 1387 default=None,
1383 1388 )
1384 1389 coreconfigitem(
1385 1390 b'logtoprocess',
1386 1391 b'commandfinish',
1387 1392 default=None,
1388 1393 )
1389 1394 coreconfigitem(
1390 1395 b'logtoprocess',
1391 1396 b'command',
1392 1397 default=None,
1393 1398 )
1394 1399 coreconfigitem(
1395 1400 b'logtoprocess',
1396 1401 b'develwarn',
1397 1402 default=None,
1398 1403 )
1399 1404 coreconfigitem(
1400 1405 b'logtoprocess',
1401 1406 b'uiblocked',
1402 1407 default=None,
1403 1408 )
1404 1409 coreconfigitem(
1405 1410 b'merge',
1406 1411 b'checkunknown',
1407 1412 default=b'abort',
1408 1413 )
1409 1414 coreconfigitem(
1410 1415 b'merge',
1411 1416 b'checkignored',
1412 1417 default=b'abort',
1413 1418 )
1414 1419 coreconfigitem(
1415 1420 b'experimental',
1416 1421 b'merge.checkpathconflicts',
1417 1422 default=False,
1418 1423 )
1419 1424 coreconfigitem(
1420 1425 b'merge',
1421 1426 b'followcopies',
1422 1427 default=True,
1423 1428 )
1424 1429 coreconfigitem(
1425 1430 b'merge',
1426 1431 b'on-failure',
1427 1432 default=b'continue',
1428 1433 )
1429 1434 coreconfigitem(
1430 1435 b'merge',
1431 1436 b'preferancestor',
1432 1437 default=lambda: [b'*'],
1433 1438 experimental=True,
1434 1439 )
1435 1440 coreconfigitem(
1436 1441 b'merge',
1437 1442 b'strict-capability-check',
1438 1443 default=False,
1439 1444 )
1440 1445 coreconfigitem(
1441 1446 b'merge-tools',
1442 1447 b'.*',
1443 1448 default=None,
1444 1449 generic=True,
1445 1450 )
1446 1451 coreconfigitem(
1447 1452 b'merge-tools',
1448 1453 br'.*\.args$',
1449 1454 default=b"$local $base $other",
1450 1455 generic=True,
1451 1456 priority=-1,
1452 1457 )
1453 1458 coreconfigitem(
1454 1459 b'merge-tools',
1455 1460 br'.*\.binary$',
1456 1461 default=False,
1457 1462 generic=True,
1458 1463 priority=-1,
1459 1464 )
1460 1465 coreconfigitem(
1461 1466 b'merge-tools',
1462 1467 br'.*\.check$',
1463 1468 default=list,
1464 1469 generic=True,
1465 1470 priority=-1,
1466 1471 )
1467 1472 coreconfigitem(
1468 1473 b'merge-tools',
1469 1474 br'.*\.checkchanged$',
1470 1475 default=False,
1471 1476 generic=True,
1472 1477 priority=-1,
1473 1478 )
1474 1479 coreconfigitem(
1475 1480 b'merge-tools',
1476 1481 br'.*\.executable$',
1477 1482 default=dynamicdefault,
1478 1483 generic=True,
1479 1484 priority=-1,
1480 1485 )
1481 1486 coreconfigitem(
1482 1487 b'merge-tools',
1483 1488 br'.*\.fixeol$',
1484 1489 default=False,
1485 1490 generic=True,
1486 1491 priority=-1,
1487 1492 )
1488 1493 coreconfigitem(
1489 1494 b'merge-tools',
1490 1495 br'.*\.gui$',
1491 1496 default=False,
1492 1497 generic=True,
1493 1498 priority=-1,
1494 1499 )
1495 1500 coreconfigitem(
1496 1501 b'merge-tools',
1497 1502 br'.*\.mergemarkers$',
1498 1503 default=b'basic',
1499 1504 generic=True,
1500 1505 priority=-1,
1501 1506 )
1502 1507 coreconfigitem(
1503 1508 b'merge-tools',
1504 1509 br'.*\.mergemarkertemplate$',
1505 1510 default=dynamicdefault, # take from command-templates.mergemarker
1506 1511 generic=True,
1507 1512 priority=-1,
1508 1513 )
1509 1514 coreconfigitem(
1510 1515 b'merge-tools',
1511 1516 br'.*\.priority$',
1512 1517 default=0,
1513 1518 generic=True,
1514 1519 priority=-1,
1515 1520 )
1516 1521 coreconfigitem(
1517 1522 b'merge-tools',
1518 1523 br'.*\.premerge$',
1519 1524 default=dynamicdefault,
1520 1525 generic=True,
1521 1526 priority=-1,
1522 1527 )
1523 1528 coreconfigitem(
1524 1529 b'merge-tools',
1525 1530 br'.*\.symlink$',
1526 1531 default=False,
1527 1532 generic=True,
1528 1533 priority=-1,
1529 1534 )
1530 1535 coreconfigitem(
1531 1536 b'pager',
1532 1537 b'attend-.*',
1533 1538 default=dynamicdefault,
1534 1539 generic=True,
1535 1540 )
1536 1541 coreconfigitem(
1537 1542 b'pager',
1538 1543 b'ignore',
1539 1544 default=list,
1540 1545 )
1541 1546 coreconfigitem(
1542 1547 b'pager',
1543 1548 b'pager',
1544 1549 default=dynamicdefault,
1545 1550 )
1546 1551 coreconfigitem(
1547 1552 b'patch',
1548 1553 b'eol',
1549 1554 default=b'strict',
1550 1555 )
1551 1556 coreconfigitem(
1552 1557 b'patch',
1553 1558 b'fuzz',
1554 1559 default=2,
1555 1560 )
1556 1561 coreconfigitem(
1557 1562 b'paths',
1558 1563 b'default',
1559 1564 default=None,
1560 1565 )
1561 1566 coreconfigitem(
1562 1567 b'paths',
1563 1568 b'default-push',
1564 1569 default=None,
1565 1570 )
1566 1571 coreconfigitem(
1567 1572 b'paths',
1568 1573 b'.*',
1569 1574 default=None,
1570 1575 generic=True,
1571 1576 )
1572 1577 coreconfigitem(
1573 1578 b'phases',
1574 1579 b'checksubrepos',
1575 1580 default=b'follow',
1576 1581 )
1577 1582 coreconfigitem(
1578 1583 b'phases',
1579 1584 b'new-commit',
1580 1585 default=b'draft',
1581 1586 )
1582 1587 coreconfigitem(
1583 1588 b'phases',
1584 1589 b'publish',
1585 1590 default=True,
1586 1591 )
1587 1592 coreconfigitem(
1588 1593 b'profiling',
1589 1594 b'enabled',
1590 1595 default=False,
1591 1596 )
1592 1597 coreconfigitem(
1593 1598 b'profiling',
1594 1599 b'format',
1595 1600 default=b'text',
1596 1601 )
1597 1602 coreconfigitem(
1598 1603 b'profiling',
1599 1604 b'freq',
1600 1605 default=1000,
1601 1606 )
1602 1607 coreconfigitem(
1603 1608 b'profiling',
1604 1609 b'limit',
1605 1610 default=30,
1606 1611 )
1607 1612 coreconfigitem(
1608 1613 b'profiling',
1609 1614 b'nested',
1610 1615 default=0,
1611 1616 )
1612 1617 coreconfigitem(
1613 1618 b'profiling',
1614 1619 b'output',
1615 1620 default=None,
1616 1621 )
1617 1622 coreconfigitem(
1618 1623 b'profiling',
1619 1624 b'showmax',
1620 1625 default=0.999,
1621 1626 )
1622 1627 coreconfigitem(
1623 1628 b'profiling',
1624 1629 b'showmin',
1625 1630 default=dynamicdefault,
1626 1631 )
1627 1632 coreconfigitem(
1628 1633 b'profiling',
1629 1634 b'showtime',
1630 1635 default=True,
1631 1636 )
1632 1637 coreconfigitem(
1633 1638 b'profiling',
1634 1639 b'sort',
1635 1640 default=b'inlinetime',
1636 1641 )
1637 1642 coreconfigitem(
1638 1643 b'profiling',
1639 1644 b'statformat',
1640 1645 default=b'hotpath',
1641 1646 )
1642 1647 coreconfigitem(
1643 1648 b'profiling',
1644 1649 b'time-track',
1645 1650 default=dynamicdefault,
1646 1651 )
1647 1652 coreconfigitem(
1648 1653 b'profiling',
1649 1654 b'type',
1650 1655 default=b'stat',
1651 1656 )
1652 1657 coreconfigitem(
1653 1658 b'progress',
1654 1659 b'assume-tty',
1655 1660 default=False,
1656 1661 )
1657 1662 coreconfigitem(
1658 1663 b'progress',
1659 1664 b'changedelay',
1660 1665 default=1,
1661 1666 )
1662 1667 coreconfigitem(
1663 1668 b'progress',
1664 1669 b'clear-complete',
1665 1670 default=True,
1666 1671 )
1667 1672 coreconfigitem(
1668 1673 b'progress',
1669 1674 b'debug',
1670 1675 default=False,
1671 1676 )
1672 1677 coreconfigitem(
1673 1678 b'progress',
1674 1679 b'delay',
1675 1680 default=3,
1676 1681 )
1677 1682 coreconfigitem(
1678 1683 b'progress',
1679 1684 b'disable',
1680 1685 default=False,
1681 1686 )
1682 1687 coreconfigitem(
1683 1688 b'progress',
1684 1689 b'estimateinterval',
1685 1690 default=60.0,
1686 1691 )
1687 1692 coreconfigitem(
1688 1693 b'progress',
1689 1694 b'format',
1690 1695 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1691 1696 )
1692 1697 coreconfigitem(
1693 1698 b'progress',
1694 1699 b'refresh',
1695 1700 default=0.1,
1696 1701 )
1697 1702 coreconfigitem(
1698 1703 b'progress',
1699 1704 b'width',
1700 1705 default=dynamicdefault,
1701 1706 )
1702 1707 coreconfigitem(
1703 1708 b'pull',
1704 1709 b'confirm',
1705 1710 default=False,
1706 1711 )
1707 1712 coreconfigitem(
1708 1713 b'push',
1709 1714 b'pushvars.server',
1710 1715 default=False,
1711 1716 )
1712 1717 coreconfigitem(
1713 1718 b'rewrite',
1714 1719 b'backup-bundle',
1715 1720 default=True,
1716 1721 alias=[(b'ui', b'history-editing-backup')],
1717 1722 )
1718 1723 coreconfigitem(
1719 1724 b'rewrite',
1720 1725 b'update-timestamp',
1721 1726 default=False,
1722 1727 )
1723 1728 coreconfigitem(
1724 1729 b'rewrite',
1725 1730 b'empty-successor',
1726 1731 default=b'skip',
1727 1732 experimental=True,
1728 1733 )
1729 1734 coreconfigitem(
1730 1735 b'storage',
1731 1736 b'new-repo-backend',
1732 1737 default=b'revlogv1',
1733 1738 experimental=True,
1734 1739 )
1735 1740 coreconfigitem(
1736 1741 b'storage',
1737 1742 b'revlog.optimize-delta-parent-choice',
1738 1743 default=True,
1739 1744 alias=[(b'format', b'aggressivemergedeltas')],
1740 1745 )
1741 1746 # experimental as long as rust is experimental (or a C version is implemented)
1742 1747 coreconfigitem(
1743 1748 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1744 1749 )
1745 1750 # experimental as long as format.use-persistent-nodemap is.
1746 1751 coreconfigitem(
1747 1752 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1748 1753 )
1749 1754 coreconfigitem(
1750 1755 b'storage',
1751 1756 b'revlog.reuse-external-delta',
1752 1757 default=True,
1753 1758 )
1754 1759 coreconfigitem(
1755 1760 b'storage',
1756 1761 b'revlog.reuse-external-delta-parent',
1757 1762 default=None,
1758 1763 )
1759 1764 coreconfigitem(
1760 1765 b'storage',
1761 1766 b'revlog.zlib.level',
1762 1767 default=None,
1763 1768 )
1764 1769 coreconfigitem(
1765 1770 b'storage',
1766 1771 b'revlog.zstd.level',
1767 1772 default=None,
1768 1773 )
1769 1774 coreconfigitem(
1770 1775 b'server',
1771 1776 b'bookmarks-pushkey-compat',
1772 1777 default=True,
1773 1778 )
1774 1779 coreconfigitem(
1775 1780 b'server',
1776 1781 b'bundle1',
1777 1782 default=True,
1778 1783 )
1779 1784 coreconfigitem(
1780 1785 b'server',
1781 1786 b'bundle1gd',
1782 1787 default=None,
1783 1788 )
1784 1789 coreconfigitem(
1785 1790 b'server',
1786 1791 b'bundle1.pull',
1787 1792 default=None,
1788 1793 )
1789 1794 coreconfigitem(
1790 1795 b'server',
1791 1796 b'bundle1gd.pull',
1792 1797 default=None,
1793 1798 )
1794 1799 coreconfigitem(
1795 1800 b'server',
1796 1801 b'bundle1.push',
1797 1802 default=None,
1798 1803 )
1799 1804 coreconfigitem(
1800 1805 b'server',
1801 1806 b'bundle1gd.push',
1802 1807 default=None,
1803 1808 )
1804 1809 coreconfigitem(
1805 1810 b'server',
1806 1811 b'bundle2.stream',
1807 1812 default=True,
1808 1813 alias=[(b'experimental', b'bundle2.stream')],
1809 1814 )
1810 1815 coreconfigitem(
1811 1816 b'server',
1812 1817 b'compressionengines',
1813 1818 default=list,
1814 1819 )
1815 1820 coreconfigitem(
1816 1821 b'server',
1817 1822 b'concurrent-push-mode',
1818 1823 default=b'check-related',
1819 1824 )
1820 1825 coreconfigitem(
1821 1826 b'server',
1822 1827 b'disablefullbundle',
1823 1828 default=False,
1824 1829 )
1825 1830 coreconfigitem(
1826 1831 b'server',
1827 1832 b'maxhttpheaderlen',
1828 1833 default=1024,
1829 1834 )
1830 1835 coreconfigitem(
1831 1836 b'server',
1832 1837 b'pullbundle',
1833 1838 default=False,
1834 1839 )
1835 1840 coreconfigitem(
1836 1841 b'server',
1837 1842 b'preferuncompressed',
1838 1843 default=False,
1839 1844 )
1840 1845 coreconfigitem(
1841 1846 b'server',
1842 1847 b'streamunbundle',
1843 1848 default=False,
1844 1849 )
1845 1850 coreconfigitem(
1846 1851 b'server',
1847 1852 b'uncompressed',
1848 1853 default=True,
1849 1854 )
1850 1855 coreconfigitem(
1851 1856 b'server',
1852 1857 b'uncompressedallowsecret',
1853 1858 default=False,
1854 1859 )
1855 1860 coreconfigitem(
1856 1861 b'server',
1857 1862 b'view',
1858 1863 default=b'served',
1859 1864 )
1860 1865 coreconfigitem(
1861 1866 b'server',
1862 1867 b'validate',
1863 1868 default=False,
1864 1869 )
1865 1870 coreconfigitem(
1866 1871 b'server',
1867 1872 b'zliblevel',
1868 1873 default=-1,
1869 1874 )
1870 1875 coreconfigitem(
1871 1876 b'server',
1872 1877 b'zstdlevel',
1873 1878 default=3,
1874 1879 )
1875 1880 coreconfigitem(
1876 1881 b'share',
1877 1882 b'pool',
1878 1883 default=None,
1879 1884 )
1880 1885 coreconfigitem(
1881 1886 b'share',
1882 1887 b'poolnaming',
1883 1888 default=b'identity',
1884 1889 )
1885 1890 coreconfigitem(
1886 1891 b'shelve',
1887 1892 b'maxbackups',
1888 1893 default=10,
1889 1894 )
1890 1895 coreconfigitem(
1891 1896 b'smtp',
1892 1897 b'host',
1893 1898 default=None,
1894 1899 )
1895 1900 coreconfigitem(
1896 1901 b'smtp',
1897 1902 b'local_hostname',
1898 1903 default=None,
1899 1904 )
1900 1905 coreconfigitem(
1901 1906 b'smtp',
1902 1907 b'password',
1903 1908 default=None,
1904 1909 )
1905 1910 coreconfigitem(
1906 1911 b'smtp',
1907 1912 b'port',
1908 1913 default=dynamicdefault,
1909 1914 )
1910 1915 coreconfigitem(
1911 1916 b'smtp',
1912 1917 b'tls',
1913 1918 default=b'none',
1914 1919 )
1915 1920 coreconfigitem(
1916 1921 b'smtp',
1917 1922 b'username',
1918 1923 default=None,
1919 1924 )
1920 1925 coreconfigitem(
1921 1926 b'sparse',
1922 1927 b'missingwarning',
1923 1928 default=True,
1924 1929 experimental=True,
1925 1930 )
1926 1931 coreconfigitem(
1927 1932 b'subrepos',
1928 1933 b'allowed',
1929 1934 default=dynamicdefault, # to make backporting simpler
1930 1935 )
1931 1936 coreconfigitem(
1932 1937 b'subrepos',
1933 1938 b'hg:allowed',
1934 1939 default=dynamicdefault,
1935 1940 )
1936 1941 coreconfigitem(
1937 1942 b'subrepos',
1938 1943 b'git:allowed',
1939 1944 default=dynamicdefault,
1940 1945 )
1941 1946 coreconfigitem(
1942 1947 b'subrepos',
1943 1948 b'svn:allowed',
1944 1949 default=dynamicdefault,
1945 1950 )
1946 1951 coreconfigitem(
1947 1952 b'templates',
1948 1953 b'.*',
1949 1954 default=None,
1950 1955 generic=True,
1951 1956 )
1952 1957 coreconfigitem(
1953 1958 b'templateconfig',
1954 1959 b'.*',
1955 1960 default=dynamicdefault,
1956 1961 generic=True,
1957 1962 )
1958 1963 coreconfigitem(
1959 1964 b'trusted',
1960 1965 b'groups',
1961 1966 default=list,
1962 1967 )
1963 1968 coreconfigitem(
1964 1969 b'trusted',
1965 1970 b'users',
1966 1971 default=list,
1967 1972 )
1968 1973 coreconfigitem(
1969 1974 b'ui',
1970 1975 b'_usedassubrepo',
1971 1976 default=False,
1972 1977 )
1973 1978 coreconfigitem(
1974 1979 b'ui',
1975 1980 b'allowemptycommit',
1976 1981 default=False,
1977 1982 )
1978 1983 coreconfigitem(
1979 1984 b'ui',
1980 1985 b'archivemeta',
1981 1986 default=True,
1982 1987 )
1983 1988 coreconfigitem(
1984 1989 b'ui',
1985 1990 b'askusername',
1986 1991 default=False,
1987 1992 )
1988 1993 coreconfigitem(
1989 1994 b'ui',
1990 1995 b'available-memory',
1991 1996 default=None,
1992 1997 )
1993 1998
1994 1999 coreconfigitem(
1995 2000 b'ui',
1996 2001 b'clonebundlefallback',
1997 2002 default=False,
1998 2003 )
1999 2004 coreconfigitem(
2000 2005 b'ui',
2001 2006 b'clonebundleprefers',
2002 2007 default=list,
2003 2008 )
2004 2009 coreconfigitem(
2005 2010 b'ui',
2006 2011 b'clonebundles',
2007 2012 default=True,
2008 2013 )
2009 2014 coreconfigitem(
2010 2015 b'ui',
2011 2016 b'color',
2012 2017 default=b'auto',
2013 2018 )
2014 2019 coreconfigitem(
2015 2020 b'ui',
2016 2021 b'commitsubrepos',
2017 2022 default=False,
2018 2023 )
2019 2024 coreconfigitem(
2020 2025 b'ui',
2021 2026 b'debug',
2022 2027 default=False,
2023 2028 )
2024 2029 coreconfigitem(
2025 2030 b'ui',
2026 2031 b'debugger',
2027 2032 default=None,
2028 2033 )
2029 2034 coreconfigitem(
2030 2035 b'ui',
2031 2036 b'editor',
2032 2037 default=dynamicdefault,
2033 2038 )
2034 2039 coreconfigitem(
2035 2040 b'ui',
2036 2041 b'detailed-exit-code',
2037 2042 default=False,
2038 2043 experimental=True,
2039 2044 )
2040 2045 coreconfigitem(
2041 2046 b'ui',
2042 2047 b'fallbackencoding',
2043 2048 default=None,
2044 2049 )
2045 2050 coreconfigitem(
2046 2051 b'ui',
2047 2052 b'forcecwd',
2048 2053 default=None,
2049 2054 )
2050 2055 coreconfigitem(
2051 2056 b'ui',
2052 2057 b'forcemerge',
2053 2058 default=None,
2054 2059 )
2055 2060 coreconfigitem(
2056 2061 b'ui',
2057 2062 b'formatdebug',
2058 2063 default=False,
2059 2064 )
2060 2065 coreconfigitem(
2061 2066 b'ui',
2062 2067 b'formatjson',
2063 2068 default=False,
2064 2069 )
2065 2070 coreconfigitem(
2066 2071 b'ui',
2067 2072 b'formatted',
2068 2073 default=None,
2069 2074 )
2070 2075 coreconfigitem(
2071 2076 b'ui',
2072 2077 b'interactive',
2073 2078 default=None,
2074 2079 )
2075 2080 coreconfigitem(
2076 2081 b'ui',
2077 2082 b'interface',
2078 2083 default=None,
2079 2084 )
2080 2085 coreconfigitem(
2081 2086 b'ui',
2082 2087 b'interface.chunkselector',
2083 2088 default=None,
2084 2089 )
2085 2090 coreconfigitem(
2086 2091 b'ui',
2087 2092 b'large-file-limit',
2088 2093 default=10000000,
2089 2094 )
2090 2095 coreconfigitem(
2091 2096 b'ui',
2092 2097 b'logblockedtimes',
2093 2098 default=False,
2094 2099 )
2095 2100 coreconfigitem(
2096 2101 b'ui',
2097 2102 b'merge',
2098 2103 default=None,
2099 2104 )
2100 2105 coreconfigitem(
2101 2106 b'ui',
2102 2107 b'mergemarkers',
2103 2108 default=b'basic',
2104 2109 )
2105 2110 coreconfigitem(
2106 2111 b'ui',
2107 2112 b'message-output',
2108 2113 default=b'stdio',
2109 2114 )
2110 2115 coreconfigitem(
2111 2116 b'ui',
2112 2117 b'nontty',
2113 2118 default=False,
2114 2119 )
2115 2120 coreconfigitem(
2116 2121 b'ui',
2117 2122 b'origbackuppath',
2118 2123 default=None,
2119 2124 )
2120 2125 coreconfigitem(
2121 2126 b'ui',
2122 2127 b'paginate',
2123 2128 default=True,
2124 2129 )
2125 2130 coreconfigitem(
2126 2131 b'ui',
2127 2132 b'patch',
2128 2133 default=None,
2129 2134 )
2130 2135 coreconfigitem(
2131 2136 b'ui',
2132 2137 b'portablefilenames',
2133 2138 default=b'warn',
2134 2139 )
2135 2140 coreconfigitem(
2136 2141 b'ui',
2137 2142 b'promptecho',
2138 2143 default=False,
2139 2144 )
2140 2145 coreconfigitem(
2141 2146 b'ui',
2142 2147 b'quiet',
2143 2148 default=False,
2144 2149 )
2145 2150 coreconfigitem(
2146 2151 b'ui',
2147 2152 b'quietbookmarkmove',
2148 2153 default=False,
2149 2154 )
2150 2155 coreconfigitem(
2151 2156 b'ui',
2152 2157 b'relative-paths',
2153 2158 default=b'legacy',
2154 2159 )
2155 2160 coreconfigitem(
2156 2161 b'ui',
2157 2162 b'remotecmd',
2158 2163 default=b'hg',
2159 2164 )
2160 2165 coreconfigitem(
2161 2166 b'ui',
2162 2167 b'report_untrusted',
2163 2168 default=True,
2164 2169 )
2165 2170 coreconfigitem(
2166 2171 b'ui',
2167 2172 b'rollback',
2168 2173 default=True,
2169 2174 )
2170 2175 coreconfigitem(
2171 2176 b'ui',
2172 2177 b'signal-safe-lock',
2173 2178 default=True,
2174 2179 )
2175 2180 coreconfigitem(
2176 2181 b'ui',
2177 2182 b'slash',
2178 2183 default=False,
2179 2184 )
2180 2185 coreconfigitem(
2181 2186 b'ui',
2182 2187 b'ssh',
2183 2188 default=b'ssh',
2184 2189 )
2185 2190 coreconfigitem(
2186 2191 b'ui',
2187 2192 b'ssherrorhint',
2188 2193 default=None,
2189 2194 )
2190 2195 coreconfigitem(
2191 2196 b'ui',
2192 2197 b'statuscopies',
2193 2198 default=False,
2194 2199 )
2195 2200 coreconfigitem(
2196 2201 b'ui',
2197 2202 b'strict',
2198 2203 default=False,
2199 2204 )
2200 2205 coreconfigitem(
2201 2206 b'ui',
2202 2207 b'style',
2203 2208 default=b'',
2204 2209 )
2205 2210 coreconfigitem(
2206 2211 b'ui',
2207 2212 b'supportcontact',
2208 2213 default=None,
2209 2214 )
2210 2215 coreconfigitem(
2211 2216 b'ui',
2212 2217 b'textwidth',
2213 2218 default=78,
2214 2219 )
2215 2220 coreconfigitem(
2216 2221 b'ui',
2217 2222 b'timeout',
2218 2223 default=b'600',
2219 2224 )
2220 2225 coreconfigitem(
2221 2226 b'ui',
2222 2227 b'timeout.warn',
2223 2228 default=0,
2224 2229 )
2225 2230 coreconfigitem(
2226 2231 b'ui',
2227 2232 b'timestamp-output',
2228 2233 default=False,
2229 2234 )
2230 2235 coreconfigitem(
2231 2236 b'ui',
2232 2237 b'traceback',
2233 2238 default=False,
2234 2239 )
2235 2240 coreconfigitem(
2236 2241 b'ui',
2237 2242 b'tweakdefaults',
2238 2243 default=False,
2239 2244 )
2240 2245 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2241 2246 coreconfigitem(
2242 2247 b'ui',
2243 2248 b'verbose',
2244 2249 default=False,
2245 2250 )
2246 2251 coreconfigitem(
2247 2252 b'verify',
2248 2253 b'skipflags',
2249 2254 default=None,
2250 2255 )
2251 2256 coreconfigitem(
2252 2257 b'web',
2253 2258 b'allowbz2',
2254 2259 default=False,
2255 2260 )
2256 2261 coreconfigitem(
2257 2262 b'web',
2258 2263 b'allowgz',
2259 2264 default=False,
2260 2265 )
2261 2266 coreconfigitem(
2262 2267 b'web',
2263 2268 b'allow-pull',
2264 2269 alias=[(b'web', b'allowpull')],
2265 2270 default=True,
2266 2271 )
2267 2272 coreconfigitem(
2268 2273 b'web',
2269 2274 b'allow-push',
2270 2275 alias=[(b'web', b'allow_push')],
2271 2276 default=list,
2272 2277 )
2273 2278 coreconfigitem(
2274 2279 b'web',
2275 2280 b'allowzip',
2276 2281 default=False,
2277 2282 )
2278 2283 coreconfigitem(
2279 2284 b'web',
2280 2285 b'archivesubrepos',
2281 2286 default=False,
2282 2287 )
2283 2288 coreconfigitem(
2284 2289 b'web',
2285 2290 b'cache',
2286 2291 default=True,
2287 2292 )
2288 2293 coreconfigitem(
2289 2294 b'web',
2290 2295 b'comparisoncontext',
2291 2296 default=5,
2292 2297 )
2293 2298 coreconfigitem(
2294 2299 b'web',
2295 2300 b'contact',
2296 2301 default=None,
2297 2302 )
2298 2303 coreconfigitem(
2299 2304 b'web',
2300 2305 b'deny_push',
2301 2306 default=list,
2302 2307 )
2303 2308 coreconfigitem(
2304 2309 b'web',
2305 2310 b'guessmime',
2306 2311 default=False,
2307 2312 )
2308 2313 coreconfigitem(
2309 2314 b'web',
2310 2315 b'hidden',
2311 2316 default=False,
2312 2317 )
2313 2318 coreconfigitem(
2314 2319 b'web',
2315 2320 b'labels',
2316 2321 default=list,
2317 2322 )
2318 2323 coreconfigitem(
2319 2324 b'web',
2320 2325 b'logoimg',
2321 2326 default=b'hglogo.png',
2322 2327 )
2323 2328 coreconfigitem(
2324 2329 b'web',
2325 2330 b'logourl',
2326 2331 default=b'https://mercurial-scm.org/',
2327 2332 )
2328 2333 coreconfigitem(
2329 2334 b'web',
2330 2335 b'accesslog',
2331 2336 default=b'-',
2332 2337 )
2333 2338 coreconfigitem(
2334 2339 b'web',
2335 2340 b'address',
2336 2341 default=b'',
2337 2342 )
2338 2343 coreconfigitem(
2339 2344 b'web',
2340 2345 b'allow-archive',
2341 2346 alias=[(b'web', b'allow_archive')],
2342 2347 default=list,
2343 2348 )
2344 2349 coreconfigitem(
2345 2350 b'web',
2346 2351 b'allow_read',
2347 2352 default=list,
2348 2353 )
2349 2354 coreconfigitem(
2350 2355 b'web',
2351 2356 b'baseurl',
2352 2357 default=None,
2353 2358 )
2354 2359 coreconfigitem(
2355 2360 b'web',
2356 2361 b'cacerts',
2357 2362 default=None,
2358 2363 )
2359 2364 coreconfigitem(
2360 2365 b'web',
2361 2366 b'certificate',
2362 2367 default=None,
2363 2368 )
2364 2369 coreconfigitem(
2365 2370 b'web',
2366 2371 b'collapse',
2367 2372 default=False,
2368 2373 )
2369 2374 coreconfigitem(
2370 2375 b'web',
2371 2376 b'csp',
2372 2377 default=None,
2373 2378 )
2374 2379 coreconfigitem(
2375 2380 b'web',
2376 2381 b'deny_read',
2377 2382 default=list,
2378 2383 )
2379 2384 coreconfigitem(
2380 2385 b'web',
2381 2386 b'descend',
2382 2387 default=True,
2383 2388 )
2384 2389 coreconfigitem(
2385 2390 b'web',
2386 2391 b'description',
2387 2392 default=b"",
2388 2393 )
2389 2394 coreconfigitem(
2390 2395 b'web',
2391 2396 b'encoding',
2392 2397 default=lambda: encoding.encoding,
2393 2398 )
2394 2399 coreconfigitem(
2395 2400 b'web',
2396 2401 b'errorlog',
2397 2402 default=b'-',
2398 2403 )
2399 2404 coreconfigitem(
2400 2405 b'web',
2401 2406 b'ipv6',
2402 2407 default=False,
2403 2408 )
2404 2409 coreconfigitem(
2405 2410 b'web',
2406 2411 b'maxchanges',
2407 2412 default=10,
2408 2413 )
2409 2414 coreconfigitem(
2410 2415 b'web',
2411 2416 b'maxfiles',
2412 2417 default=10,
2413 2418 )
2414 2419 coreconfigitem(
2415 2420 b'web',
2416 2421 b'maxshortchanges',
2417 2422 default=60,
2418 2423 )
2419 2424 coreconfigitem(
2420 2425 b'web',
2421 2426 b'motd',
2422 2427 default=b'',
2423 2428 )
2424 2429 coreconfigitem(
2425 2430 b'web',
2426 2431 b'name',
2427 2432 default=dynamicdefault,
2428 2433 )
2429 2434 coreconfigitem(
2430 2435 b'web',
2431 2436 b'port',
2432 2437 default=8000,
2433 2438 )
2434 2439 coreconfigitem(
2435 2440 b'web',
2436 2441 b'prefix',
2437 2442 default=b'',
2438 2443 )
2439 2444 coreconfigitem(
2440 2445 b'web',
2441 2446 b'push_ssl',
2442 2447 default=True,
2443 2448 )
2444 2449 coreconfigitem(
2445 2450 b'web',
2446 2451 b'refreshinterval',
2447 2452 default=20,
2448 2453 )
2449 2454 coreconfigitem(
2450 2455 b'web',
2451 2456 b'server-header',
2452 2457 default=None,
2453 2458 )
2454 2459 coreconfigitem(
2455 2460 b'web',
2456 2461 b'static',
2457 2462 default=None,
2458 2463 )
2459 2464 coreconfigitem(
2460 2465 b'web',
2461 2466 b'staticurl',
2462 2467 default=None,
2463 2468 )
2464 2469 coreconfigitem(
2465 2470 b'web',
2466 2471 b'stripes',
2467 2472 default=1,
2468 2473 )
2469 2474 coreconfigitem(
2470 2475 b'web',
2471 2476 b'style',
2472 2477 default=b'paper',
2473 2478 )
2474 2479 coreconfigitem(
2475 2480 b'web',
2476 2481 b'templates',
2477 2482 default=None,
2478 2483 )
2479 2484 coreconfigitem(
2480 2485 b'web',
2481 2486 b'view',
2482 2487 default=b'served',
2483 2488 experimental=True,
2484 2489 )
2485 2490 coreconfigitem(
2486 2491 b'worker',
2487 2492 b'backgroundclose',
2488 2493 default=dynamicdefault,
2489 2494 )
2490 2495 # Windows defaults to a limit of 512 open files. A buffer of 128
2491 2496 # should give us enough headway.
2492 2497 coreconfigitem(
2493 2498 b'worker',
2494 2499 b'backgroundclosemaxqueue',
2495 2500 default=384,
2496 2501 )
2497 2502 coreconfigitem(
2498 2503 b'worker',
2499 2504 b'backgroundcloseminfilecount',
2500 2505 default=2048,
2501 2506 )
2502 2507 coreconfigitem(
2503 2508 b'worker',
2504 2509 b'backgroundclosethreadcount',
2505 2510 default=4,
2506 2511 )
2507 2512 coreconfigitem(
2508 2513 b'worker',
2509 2514 b'enabled',
2510 2515 default=True,
2511 2516 )
2512 2517 coreconfigitem(
2513 2518 b'worker',
2514 2519 b'numcpus',
2515 2520 default=None,
2516 2521 )
2517 2522
2518 2523 # Rebase related configuration moved to core because other extension are doing
2519 2524 # strange things. For example, shelve import the extensions to reuse some bit
2520 2525 # without formally loading it.
2521 2526 coreconfigitem(
2522 2527 b'commands',
2523 2528 b'rebase.requiredest',
2524 2529 default=False,
2525 2530 )
2526 2531 coreconfigitem(
2527 2532 b'experimental',
2528 2533 b'rebaseskipobsolete',
2529 2534 default=True,
2530 2535 )
2531 2536 coreconfigitem(
2532 2537 b'rebase',
2533 2538 b'singletransaction',
2534 2539 default=False,
2535 2540 )
2536 2541 coreconfigitem(
2537 2542 b'rebase',
2538 2543 b'experimental.inmemory',
2539 2544 default=False,
2540 2545 )
@@ -1,3602 +1,3617 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revset,
63 63 revsetlang,
64 64 scmutil,
65 65 sparse,
66 66 store as storemod,
67 67 subrepoutil,
68 68 tags as tagsmod,
69 69 transaction,
70 70 txnutil,
71 71 util,
72 72 vfs as vfsmod,
73 73 )
74 74
75 75 from .interfaces import (
76 76 repository,
77 77 util as interfaceutil,
78 78 )
79 79
80 80 from .utils import (
81 81 hashutil,
82 82 procutil,
83 83 stringutil,
84 84 )
85 85
86 86 from .revlogutils import constants as revlogconst
87 87
88 88 release = lockmod.release
89 89 urlerr = util.urlerr
90 90 urlreq = util.urlreq
91 91
92 92 # set of (path, vfs-location) tuples. vfs-location is:
93 93 # - 'plain for vfs relative paths
94 94 # - '' for svfs relative paths
95 95 _cachedfiles = set()
96 96
97 97
98 98 class _basefilecache(scmutil.filecache):
99 99 """All filecache usage on repo are done for logic that should be unfiltered"""
100 100
101 101 def __get__(self, repo, type=None):
102 102 if repo is None:
103 103 return self
104 104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 105 unfi = repo.unfiltered()
106 106 try:
107 107 return unfi.__dict__[self.sname]
108 108 except KeyError:
109 109 pass
110 110 return super(_basefilecache, self).__get__(unfi, type)
111 111
112 112 def set(self, repo, value):
113 113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 114
115 115
116 116 class repofilecache(_basefilecache):
117 117 """filecache for files in .hg but outside of .hg/store"""
118 118
119 119 def __init__(self, *paths):
120 120 super(repofilecache, self).__init__(*paths)
121 121 for path in paths:
122 122 _cachedfiles.add((path, b'plain'))
123 123
124 124 def join(self, obj, fname):
125 125 return obj.vfs.join(fname)
126 126
127 127
128 128 class storecache(_basefilecache):
129 129 """filecache for files in the store"""
130 130
131 131 def __init__(self, *paths):
132 132 super(storecache, self).__init__(*paths)
133 133 for path in paths:
134 134 _cachedfiles.add((path, b''))
135 135
136 136 def join(self, obj, fname):
137 137 return obj.sjoin(fname)
138 138
139 139
140 140 class mixedrepostorecache(_basefilecache):
141 141 """filecache for a mix files in .hg/store and outside"""
142 142
143 143 def __init__(self, *pathsandlocations):
144 144 # scmutil.filecache only uses the path for passing back into our
145 145 # join(), so we can safely pass a list of paths and locations
146 146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 147 _cachedfiles.update(pathsandlocations)
148 148
149 149 def join(self, obj, fnameandlocation):
150 150 fname, location = fnameandlocation
151 151 if location == b'plain':
152 152 return obj.vfs.join(fname)
153 153 else:
154 154 if location != b'':
155 155 raise error.ProgrammingError(
156 156 b'unexpected location: %s' % location
157 157 )
158 158 return obj.sjoin(fname)
159 159
160 160
161 161 def isfilecached(repo, name):
162 162 """check if a repo has already cached "name" filecache-ed property
163 163
164 164 This returns (cachedobj-or-None, iscached) tuple.
165 165 """
166 166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 167 if not cacheentry:
168 168 return None, False
169 169 return cacheentry.obj, True
170 170
171 171
172 172 class unfilteredpropertycache(util.propertycache):
173 173 """propertycache that apply to unfiltered repo only"""
174 174
175 175 def __get__(self, repo, type=None):
176 176 unfi = repo.unfiltered()
177 177 if unfi is repo:
178 178 return super(unfilteredpropertycache, self).__get__(unfi)
179 179 return getattr(unfi, self.name)
180 180
181 181
182 182 class filteredpropertycache(util.propertycache):
183 183 """propertycache that must take filtering in account"""
184 184
185 185 def cachevalue(self, obj, value):
186 186 object.__setattr__(obj, self.name, value)
187 187
188 188
189 189 def hasunfilteredcache(repo, name):
190 190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 191 return name in vars(repo.unfiltered())
192 192
193 193
194 194 def unfilteredmethod(orig):
195 195 """decorate method that always need to be run on unfiltered version"""
196 196
197 197 @functools.wraps(orig)
198 198 def wrapper(repo, *args, **kwargs):
199 199 return orig(repo.unfiltered(), *args, **kwargs)
200 200
201 201 return wrapper
202 202
203 203
204 204 moderncaps = {
205 205 b'lookup',
206 206 b'branchmap',
207 207 b'pushkey',
208 208 b'known',
209 209 b'getbundle',
210 210 b'unbundle',
211 211 }
212 212 legacycaps = moderncaps.union({b'changegroupsubset'})
213 213
214 214
215 215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 216 class localcommandexecutor(object):
217 217 def __init__(self, peer):
218 218 self._peer = peer
219 219 self._sent = False
220 220 self._closed = False
221 221
222 222 def __enter__(self):
223 223 return self
224 224
225 225 def __exit__(self, exctype, excvalue, exctb):
226 226 self.close()
227 227
228 228 def callcommand(self, command, args):
229 229 if self._sent:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after sendcommands()'
232 232 )
233 233
234 234 if self._closed:
235 235 raise error.ProgrammingError(
236 236 b'callcommand() cannot be used after close()'
237 237 )
238 238
239 239 # We don't need to support anything fancy. Just call the named
240 240 # method on the peer and return a resolved future.
241 241 fn = getattr(self._peer, pycompat.sysstr(command))
242 242
243 243 f = pycompat.futures.Future()
244 244
245 245 try:
246 246 result = fn(**pycompat.strkwargs(args))
247 247 except Exception:
248 248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 249 else:
250 250 f.set_result(result)
251 251
252 252 return f
253 253
254 254 def sendcommands(self):
255 255 self._sent = True
256 256
257 257 def close(self):
258 258 self._closed = True
259 259
260 260
261 261 @interfaceutil.implementer(repository.ipeercommands)
262 262 class localpeer(repository.peer):
263 263 '''peer for a local repo; reflects only the most recent API'''
264 264
265 265 def __init__(self, repo, caps=None):
266 266 super(localpeer, self).__init__()
267 267
268 268 if caps is None:
269 269 caps = moderncaps.copy()
270 270 self._repo = repo.filtered(b'served')
271 271 self.ui = repo.ui
272 272 self._caps = repo._restrictcapabilities(caps)
273 273
274 274 # Begin of _basepeer interface.
275 275
276 276 def url(self):
277 277 return self._repo.url()
278 278
279 279 def local(self):
280 280 return self._repo
281 281
282 282 def peer(self):
283 283 return self
284 284
285 285 def canpush(self):
286 286 return True
287 287
288 288 def close(self):
289 289 self._repo.close()
290 290
291 291 # End of _basepeer interface.
292 292
293 293 # Begin of _basewirecommands interface.
294 294
295 295 def branchmap(self):
296 296 return self._repo.branchmap()
297 297
298 298 def capabilities(self):
299 299 return self._caps
300 300
301 301 def clonebundles(self):
302 302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 303
304 304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 305 """Used to test argument passing over the wire"""
306 306 return b"%s %s %s %s %s" % (
307 307 one,
308 308 two,
309 309 pycompat.bytestr(three),
310 310 pycompat.bytestr(four),
311 311 pycompat.bytestr(five),
312 312 )
313 313
314 314 def getbundle(
315 315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 316 ):
317 317 chunks = exchange.getbundlechunks(
318 318 self._repo,
319 319 source,
320 320 heads=heads,
321 321 common=common,
322 322 bundlecaps=bundlecaps,
323 323 **kwargs
324 324 )[1]
325 325 cb = util.chunkbuffer(chunks)
326 326
327 327 if exchange.bundle2requested(bundlecaps):
328 328 # When requesting a bundle2, getbundle returns a stream to make the
329 329 # wire level function happier. We need to build a proper object
330 330 # from it in local peer.
331 331 return bundle2.getunbundler(self.ui, cb)
332 332 else:
333 333 return changegroup.getunbundler(b'01', cb, None)
334 334
335 335 def heads(self):
336 336 return self._repo.heads()
337 337
338 338 def known(self, nodes):
339 339 return self._repo.known(nodes)
340 340
341 341 def listkeys(self, namespace):
342 342 return self._repo.listkeys(namespace)
343 343
344 344 def lookup(self, key):
345 345 return self._repo.lookup(key)
346 346
347 347 def pushkey(self, namespace, key, old, new):
348 348 return self._repo.pushkey(namespace, key, old, new)
349 349
350 350 def stream_out(self):
351 351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 352
353 353 def unbundle(self, bundle, heads, url):
354 354 """apply a bundle on a repo
355 355
356 356 This function handles the repo locking itself."""
357 357 try:
358 358 try:
359 359 bundle = exchange.readbundle(self.ui, bundle, None)
360 360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 361 if util.safehasattr(ret, b'getchunks'):
362 362 # This is a bundle20 object, turn it into an unbundler.
363 363 # This little dance should be dropped eventually when the
364 364 # API is finally improved.
365 365 stream = util.chunkbuffer(ret.getchunks())
366 366 ret = bundle2.getunbundler(self.ui, stream)
367 367 return ret
368 368 except Exception as exc:
369 369 # If the exception contains output salvaged from a bundle2
370 370 # reply, we need to make sure it is printed before continuing
371 371 # to fail. So we build a bundle2 with such output and consume
372 372 # it directly.
373 373 #
374 374 # This is not very elegant but allows a "simple" solution for
375 375 # issue4594
376 376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 377 if output:
378 378 bundler = bundle2.bundle20(self._repo.ui)
379 379 for out in output:
380 380 bundler.addpart(out)
381 381 stream = util.chunkbuffer(bundler.getchunks())
382 382 b = bundle2.getunbundler(self.ui, stream)
383 383 bundle2.processbundle(self._repo, b)
384 384 raise
385 385 except error.PushRaced as exc:
386 386 raise error.ResponseError(
387 387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 388 )
389 389
390 390 # End of _basewirecommands interface.
391 391
392 392 # Begin of peer interface.
393 393
394 394 def commandexecutor(self):
395 395 return localcommandexecutor(self)
396 396
397 397 # End of peer interface.
398 398
399 399
400 400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 401 class locallegacypeer(localpeer):
402 402 """peer extension which implements legacy methods too; used for tests with
403 403 restricted capabilities"""
404 404
405 405 def __init__(self, repo):
406 406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 407
408 408 # Begin of baselegacywirecommands interface.
409 409
410 410 def between(self, pairs):
411 411 return self._repo.between(pairs)
412 412
413 413 def branches(self, nodes):
414 414 return self._repo.branches(nodes)
415 415
416 416 def changegroup(self, nodes, source):
417 417 outgoing = discovery.outgoing(
418 418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 419 )
420 420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 421
422 422 def changegroupsubset(self, bases, heads, source):
423 423 outgoing = discovery.outgoing(
424 424 self._repo, missingroots=bases, ancestorsof=heads
425 425 )
426 426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 427
428 428 # End of baselegacywirecommands interface.
429 429
430 430
431 431 # Functions receiving (ui, features) that extensions can register to impact
432 432 # the ability to load repositories with custom requirements. Only
433 433 # functions defined in loaded extensions are called.
434 434 #
435 435 # The function receives a set of requirement strings that the repository
436 436 # is capable of opening. Functions will typically add elements to the
437 437 # set to reflect that the extension knows how to handle that requirements.
438 438 featuresetupfuncs = set()
439 439
440 440
441 441 def _getsharedvfs(hgvfs, requirements):
442 442 """returns the vfs object pointing to root of shared source
443 443 repo for a shared repository
444 444
445 445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 446 requirements is a set of requirements of current repo (shared one)
447 447 """
448 448 # The ``shared`` or ``relshared`` requirements indicate the
449 449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 450 # This is an absolute path for ``shared`` and relative to
451 451 # ``.hg/`` for ``relshared``.
452 452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 454 sharedpath = hgvfs.join(sharedpath)
455 455
456 456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 457
458 458 if not sharedvfs.exists():
459 459 raise error.RepoError(
460 460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 461 % sharedvfs.base
462 462 )
463 463 return sharedvfs
464 464
465 465
466 466 def _readrequires(vfs, allowmissing):
467 467 """reads the require file present at root of this vfs
468 468 and return a set of requirements
469 469
470 470 If allowmissing is True, we suppress ENOENT if raised"""
471 471 # requires file contains a newline-delimited list of
472 472 # features/capabilities the opener (us) must have in order to use
473 473 # the repository. This file was introduced in Mercurial 0.9.2,
474 474 # which means very old repositories may not have one. We assume
475 475 # a missing file translates to no requirements.
476 476 try:
477 477 requirements = set(vfs.read(b'requires').splitlines())
478 478 except IOError as e:
479 479 if not (allowmissing and e.errno == errno.ENOENT):
480 480 raise
481 481 requirements = set()
482 482 return requirements
483 483
484 484
485 485 def makelocalrepository(baseui, path, intents=None):
486 486 """Create a local repository object.
487 487
488 488 Given arguments needed to construct a local repository, this function
489 489 performs various early repository loading functionality (such as
490 490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 491 the repository can be opened, derives a type suitable for representing
492 492 that repository, and returns an instance of it.
493 493
494 494 The returned object conforms to the ``repository.completelocalrepository``
495 495 interface.
496 496
497 497 The repository type is derived by calling a series of factory functions
498 498 for each aspect/interface of the final repository. These are defined by
499 499 ``REPO_INTERFACES``.
500 500
501 501 Each factory function is called to produce a type implementing a specific
502 502 interface. The cumulative list of returned types will be combined into a
503 503 new type and that type will be instantiated to represent the local
504 504 repository.
505 505
506 506 The factory functions each receive various state that may be consulted
507 507 as part of deriving a type.
508 508
509 509 Extensions should wrap these factory functions to customize repository type
510 510 creation. Note that an extension's wrapped function may be called even if
511 511 that extension is not loaded for the repo being constructed. Extensions
512 512 should check if their ``__name__`` appears in the
513 513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 514 not.
515 515 """
516 516 ui = baseui.copy()
517 517 # Prevent copying repo configuration.
518 518 ui.copy = baseui.copy
519 519
520 520 # Working directory VFS rooted at repository root.
521 521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 522
523 523 # Main VFS for .hg/ directory.
524 524 hgpath = wdirvfs.join(b'.hg')
525 525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 526 # Whether this repository is shared one or not
527 527 shared = False
528 528 # If this repository is shared, vfs pointing to shared repo
529 529 sharedvfs = None
530 530
531 531 # The .hg/ path should exist and should be a directory. All other
532 532 # cases are errors.
533 533 if not hgvfs.isdir():
534 534 try:
535 535 hgvfs.stat()
536 536 except OSError as e:
537 537 if e.errno != errno.ENOENT:
538 538 raise
539 539 except ValueError as e:
540 540 # Can be raised on Python 3.8 when path is invalid.
541 541 raise error.Abort(
542 542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 543 )
544 544
545 545 raise error.RepoError(_(b'repository %s not found') % path)
546 546
547 547 requirements = _readrequires(hgvfs, True)
548 548 shared = (
549 549 requirementsmod.SHARED_REQUIREMENT in requirements
550 550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 551 )
552 552 storevfs = None
553 553 if shared:
554 554 # This is a shared repo
555 555 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 556 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 557 else:
558 558 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559 559
560 560 # if .hg/requires contains the sharesafe requirement, it means
561 561 # there exists a `.hg/store/requires` too and we should read it
562 562 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 563 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 564 # is not present, refer checkrequirementscompat() for that
565 565 #
566 566 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 567 # repository was shared the old way. We check the share source .hg/requires
568 568 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 569 # to be reshared
570 570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571 571
572 572 if (
573 573 shared
574 574 and requirementsmod.SHARESAFE_REQUIREMENT
575 575 not in _readrequires(sharedvfs, True)
576 576 ):
577 if ui.configbool(
578 b'experimental', b'sharesafe-auto-downgrade-shares'
579 ):
580 # prevent cyclic import localrepo -> upgrade -> localrepo
581 from . import upgrade
582
583 upgrade.downgrade_share_to_non_safe(
584 ui,
585 hgvfs,
586 sharedvfs,
587 requirements,
588 )
589 else:
577 590 raise error.Abort(
578 _(b"share source does not support exp-sharesafe requirement")
591 _(
592 b"share source does not support exp-sharesafe requirement"
579 593 )
580
594 )
595 else:
581 596 requirements |= _readrequires(storevfs, False)
582 597 elif shared:
583 598 sourcerequires = _readrequires(sharedvfs, False)
584 599 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
585 600 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
586 601 # prevent cyclic import localrepo -> upgrade -> localrepo
587 602 from . import upgrade
588 603
589 604 upgrade.upgrade_share_to_safe(
590 605 ui,
591 606 hgvfs,
592 607 storevfs,
593 608 requirements,
594 609 )
595 610 else:
596 611 ui.warn(
597 612 _(
598 613 b'warning: source repository supports share-safe functionality.'
599 614 b' Reshare to upgrade.\n'
600 615 )
601 616 )
602 617
603 618 # The .hg/hgrc file may load extensions or contain config options
604 619 # that influence repository construction. Attempt to load it and
605 620 # process any new extensions that it may have pulled in.
606 621 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
607 622 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
608 623 extensions.loadall(ui)
609 624 extensions.populateui(ui)
610 625
611 626 # Set of module names of extensions loaded for this repository.
612 627 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
613 628
614 629 supportedrequirements = gathersupportedrequirements(ui)
615 630
616 631 # We first validate the requirements are known.
617 632 ensurerequirementsrecognized(requirements, supportedrequirements)
618 633
619 634 # Then we validate that the known set is reasonable to use together.
620 635 ensurerequirementscompatible(ui, requirements)
621 636
622 637 # TODO there are unhandled edge cases related to opening repositories with
623 638 # shared storage. If storage is shared, we should also test for requirements
624 639 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
625 640 # that repo, as that repo may load extensions needed to open it. This is a
626 641 # bit complicated because we don't want the other hgrc to overwrite settings
627 642 # in this hgrc.
628 643 #
629 644 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
630 645 # file when sharing repos. But if a requirement is added after the share is
631 646 # performed, thereby introducing a new requirement for the opener, we may
632 647 # will not see that and could encounter a run-time error interacting with
633 648 # that shared store since it has an unknown-to-us requirement.
634 649
635 650 # At this point, we know we should be capable of opening the repository.
636 651 # Now get on with doing that.
637 652
638 653 features = set()
639 654
640 655 # The "store" part of the repository holds versioned data. How it is
641 656 # accessed is determined by various requirements. If `shared` or
642 657 # `relshared` requirements are present, this indicates current repository
643 658 # is a share and store exists in path mentioned in `.hg/sharedpath`
644 659 if shared:
645 660 storebasepath = sharedvfs.base
646 661 cachepath = sharedvfs.join(b'cache')
647 662 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
648 663 else:
649 664 storebasepath = hgvfs.base
650 665 cachepath = hgvfs.join(b'cache')
651 666 wcachepath = hgvfs.join(b'wcache')
652 667
653 668 # The store has changed over time and the exact layout is dictated by
654 669 # requirements. The store interface abstracts differences across all
655 670 # of them.
656 671 store = makestore(
657 672 requirements,
658 673 storebasepath,
659 674 lambda base: vfsmod.vfs(base, cacheaudited=True),
660 675 )
661 676 hgvfs.createmode = store.createmode
662 677
663 678 storevfs = store.vfs
664 679 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
665 680
666 681 # The cache vfs is used to manage cache files.
667 682 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
668 683 cachevfs.createmode = store.createmode
669 684 # The cache vfs is used to manage cache files related to the working copy
670 685 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
671 686 wcachevfs.createmode = store.createmode
672 687
673 688 # Now resolve the type for the repository object. We do this by repeatedly
674 689 # calling a factory function to produces types for specific aspects of the
675 690 # repo's operation. The aggregate returned types are used as base classes
676 691 # for a dynamically-derived type, which will represent our new repository.
677 692
678 693 bases = []
679 694 extrastate = {}
680 695
681 696 for iface, fn in REPO_INTERFACES:
682 697 # We pass all potentially useful state to give extensions tons of
683 698 # flexibility.
684 699 typ = fn()(
685 700 ui=ui,
686 701 intents=intents,
687 702 requirements=requirements,
688 703 features=features,
689 704 wdirvfs=wdirvfs,
690 705 hgvfs=hgvfs,
691 706 store=store,
692 707 storevfs=storevfs,
693 708 storeoptions=storevfs.options,
694 709 cachevfs=cachevfs,
695 710 wcachevfs=wcachevfs,
696 711 extensionmodulenames=extensionmodulenames,
697 712 extrastate=extrastate,
698 713 baseclasses=bases,
699 714 )
700 715
701 716 if not isinstance(typ, type):
702 717 raise error.ProgrammingError(
703 718 b'unable to construct type for %s' % iface
704 719 )
705 720
706 721 bases.append(typ)
707 722
708 723 # type() allows you to use characters in type names that wouldn't be
709 724 # recognized as Python symbols in source code. We abuse that to add
710 725 # rich information about our constructed repo.
711 726 name = pycompat.sysstr(
712 727 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
713 728 )
714 729
715 730 cls = type(name, tuple(bases), {})
716 731
717 732 return cls(
718 733 baseui=baseui,
719 734 ui=ui,
720 735 origroot=path,
721 736 wdirvfs=wdirvfs,
722 737 hgvfs=hgvfs,
723 738 requirements=requirements,
724 739 supportedrequirements=supportedrequirements,
725 740 sharedpath=storebasepath,
726 741 store=store,
727 742 cachevfs=cachevfs,
728 743 wcachevfs=wcachevfs,
729 744 features=features,
730 745 intents=intents,
731 746 )
732 747
733 748
734 749 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
735 750 """Load hgrc files/content into a ui instance.
736 751
737 752 This is called during repository opening to load any additional
738 753 config files or settings relevant to the current repository.
739 754
740 755 Returns a bool indicating whether any additional configs were loaded.
741 756
742 757 Extensions should monkeypatch this function to modify how per-repo
743 758 configs are loaded. For example, an extension may wish to pull in
744 759 configs from alternate files or sources.
745 760
746 761 sharedvfs is vfs object pointing to source repo if the current one is a
747 762 shared one
748 763 """
749 764 if not rcutil.use_repo_hgrc():
750 765 return False
751 766
752 767 ret = False
753 768 # first load config from shared source if we has to
754 769 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
755 770 try:
756 771 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
757 772 ret = True
758 773 except IOError:
759 774 pass
760 775
761 776 try:
762 777 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
763 778 ret = True
764 779 except IOError:
765 780 pass
766 781
767 782 try:
768 783 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
769 784 ret = True
770 785 except IOError:
771 786 pass
772 787
773 788 return ret
774 789
775 790
776 791 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
777 792 """Perform additional actions after .hg/hgrc is loaded.
778 793
779 794 This function is called during repository loading immediately after
780 795 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
781 796
782 797 The function can be used to validate configs, automatically add
783 798 options (including extensions) based on requirements, etc.
784 799 """
785 800
786 801 # Map of requirements to list of extensions to load automatically when
787 802 # requirement is present.
788 803 autoextensions = {
789 804 b'git': [b'git'],
790 805 b'largefiles': [b'largefiles'],
791 806 b'lfs': [b'lfs'],
792 807 }
793 808
794 809 for requirement, names in sorted(autoextensions.items()):
795 810 if requirement not in requirements:
796 811 continue
797 812
798 813 for name in names:
799 814 if not ui.hasconfig(b'extensions', name):
800 815 ui.setconfig(b'extensions', name, b'', source=b'autoload')
801 816
802 817
803 818 def gathersupportedrequirements(ui):
804 819 """Determine the complete set of recognized requirements."""
805 820 # Start with all requirements supported by this file.
806 821 supported = set(localrepository._basesupported)
807 822
808 823 # Execute ``featuresetupfuncs`` entries if they belong to an extension
809 824 # relevant to this ui instance.
810 825 modules = {m.__name__ for n, m in extensions.extensions(ui)}
811 826
812 827 for fn in featuresetupfuncs:
813 828 if fn.__module__ in modules:
814 829 fn(ui, supported)
815 830
816 831 # Add derived requirements from registered compression engines.
817 832 for name in util.compengines:
818 833 engine = util.compengines[name]
819 834 if engine.available() and engine.revlogheader():
820 835 supported.add(b'exp-compression-%s' % name)
821 836 if engine.name() == b'zstd':
822 837 supported.add(b'revlog-compression-zstd')
823 838
824 839 return supported
825 840
826 841
827 842 def ensurerequirementsrecognized(requirements, supported):
828 843 """Validate that a set of local requirements is recognized.
829 844
830 845 Receives a set of requirements. Raises an ``error.RepoError`` if there
831 846 exists any requirement in that set that currently loaded code doesn't
832 847 recognize.
833 848
834 849 Returns a set of supported requirements.
835 850 """
836 851 missing = set()
837 852
838 853 for requirement in requirements:
839 854 if requirement in supported:
840 855 continue
841 856
842 857 if not requirement or not requirement[0:1].isalnum():
843 858 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
844 859
845 860 missing.add(requirement)
846 861
847 862 if missing:
848 863 raise error.RequirementError(
849 864 _(b'repository requires features unknown to this Mercurial: %s')
850 865 % b' '.join(sorted(missing)),
851 866 hint=_(
852 867 b'see https://mercurial-scm.org/wiki/MissingRequirement '
853 868 b'for more information'
854 869 ),
855 870 )
856 871
857 872
858 873 def ensurerequirementscompatible(ui, requirements):
859 874 """Validates that a set of recognized requirements is mutually compatible.
860 875
861 876 Some requirements may not be compatible with others or require
862 877 config options that aren't enabled. This function is called during
863 878 repository opening to ensure that the set of requirements needed
864 879 to open a repository is sane and compatible with config options.
865 880
866 881 Extensions can monkeypatch this function to perform additional
867 882 checking.
868 883
869 884 ``error.RepoError`` should be raised on failure.
870 885 """
871 886 if (
872 887 requirementsmod.SPARSE_REQUIREMENT in requirements
873 888 and not sparse.enabled
874 889 ):
875 890 raise error.RepoError(
876 891 _(
877 892 b'repository is using sparse feature but '
878 893 b'sparse is not enabled; enable the '
879 894 b'"sparse" extensions to access'
880 895 )
881 896 )
882 897
883 898
884 899 def makestore(requirements, path, vfstype):
885 900 """Construct a storage object for a repository."""
886 901 if b'store' in requirements:
887 902 if b'fncache' in requirements:
888 903 return storemod.fncachestore(
889 904 path, vfstype, b'dotencode' in requirements
890 905 )
891 906
892 907 return storemod.encodedstore(path, vfstype)
893 908
894 909 return storemod.basicstore(path, vfstype)
895 910
896 911
897 912 def resolvestorevfsoptions(ui, requirements, features):
898 913 """Resolve the options to pass to the store vfs opener.
899 914
900 915 The returned dict is used to influence behavior of the storage layer.
901 916 """
902 917 options = {}
903 918
904 919 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
905 920 options[b'treemanifest'] = True
906 921
907 922 # experimental config: format.manifestcachesize
908 923 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
909 924 if manifestcachesize is not None:
910 925 options[b'manifestcachesize'] = manifestcachesize
911 926
912 927 # In the absence of another requirement superseding a revlog-related
913 928 # requirement, we have to assume the repo is using revlog version 0.
914 929 # This revlog format is super old and we don't bother trying to parse
915 930 # opener options for it because those options wouldn't do anything
916 931 # meaningful on such old repos.
917 932 if (
918 933 b'revlogv1' in requirements
919 934 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
920 935 ):
921 936 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
922 937 else: # explicitly mark repo as using revlogv0
923 938 options[b'revlogv0'] = True
924 939
925 940 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
926 941 options[b'copies-storage'] = b'changeset-sidedata'
927 942 else:
928 943 writecopiesto = ui.config(b'experimental', b'copies.write-to')
929 944 copiesextramode = (b'changeset-only', b'compatibility')
930 945 if writecopiesto in copiesextramode:
931 946 options[b'copies-storage'] = b'extra'
932 947
933 948 return options
934 949
935 950
936 951 def resolverevlogstorevfsoptions(ui, requirements, features):
937 952 """Resolve opener options specific to revlogs."""
938 953
939 954 options = {}
940 955 options[b'flagprocessors'] = {}
941 956
942 957 if b'revlogv1' in requirements:
943 958 options[b'revlogv1'] = True
944 959 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
945 960 options[b'revlogv2'] = True
946 961
947 962 if b'generaldelta' in requirements:
948 963 options[b'generaldelta'] = True
949 964
950 965 # experimental config: format.chunkcachesize
951 966 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
952 967 if chunkcachesize is not None:
953 968 options[b'chunkcachesize'] = chunkcachesize
954 969
955 970 deltabothparents = ui.configbool(
956 971 b'storage', b'revlog.optimize-delta-parent-choice'
957 972 )
958 973 options[b'deltabothparents'] = deltabothparents
959 974
960 975 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
961 976 lazydeltabase = False
962 977 if lazydelta:
963 978 lazydeltabase = ui.configbool(
964 979 b'storage', b'revlog.reuse-external-delta-parent'
965 980 )
966 981 if lazydeltabase is None:
967 982 lazydeltabase = not scmutil.gddeltaconfig(ui)
968 983 options[b'lazydelta'] = lazydelta
969 984 options[b'lazydeltabase'] = lazydeltabase
970 985
971 986 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
972 987 if 0 <= chainspan:
973 988 options[b'maxdeltachainspan'] = chainspan
974 989
975 990 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
976 991 if mmapindexthreshold is not None:
977 992 options[b'mmapindexthreshold'] = mmapindexthreshold
978 993
979 994 withsparseread = ui.configbool(b'experimental', b'sparse-read')
980 995 srdensitythres = float(
981 996 ui.config(b'experimental', b'sparse-read.density-threshold')
982 997 )
983 998 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
984 999 options[b'with-sparse-read'] = withsparseread
985 1000 options[b'sparse-read-density-threshold'] = srdensitythres
986 1001 options[b'sparse-read-min-gap-size'] = srmingapsize
987 1002
988 1003 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
989 1004 options[b'sparse-revlog'] = sparserevlog
990 1005 if sparserevlog:
991 1006 options[b'generaldelta'] = True
992 1007
993 1008 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
994 1009 options[b'side-data'] = sidedata
995 1010
996 1011 maxchainlen = None
997 1012 if sparserevlog:
998 1013 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
999 1014 # experimental config: format.maxchainlen
1000 1015 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1001 1016 if maxchainlen is not None:
1002 1017 options[b'maxchainlen'] = maxchainlen
1003 1018
1004 1019 for r in requirements:
1005 1020 # we allow multiple compression engine requirement to co-exist because
1006 1021 # strickly speaking, revlog seems to support mixed compression style.
1007 1022 #
1008 1023 # The compression used for new entries will be "the last one"
1009 1024 prefix = r.startswith
1010 1025 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1011 1026 options[b'compengine'] = r.split(b'-', 2)[2]
1012 1027
1013 1028 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1014 1029 if options[b'zlib.level'] is not None:
1015 1030 if not (0 <= options[b'zlib.level'] <= 9):
1016 1031 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1017 1032 raise error.Abort(msg % options[b'zlib.level'])
1018 1033 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1019 1034 if options[b'zstd.level'] is not None:
1020 1035 if not (0 <= options[b'zstd.level'] <= 22):
1021 1036 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1022 1037 raise error.Abort(msg % options[b'zstd.level'])
1023 1038
1024 1039 if requirementsmod.NARROW_REQUIREMENT in requirements:
1025 1040 options[b'enableellipsis'] = True
1026 1041
1027 1042 if ui.configbool(b'experimental', b'rust.index'):
1028 1043 options[b'rust.index'] = True
1029 1044 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1030 1045 options[b'persistent-nodemap'] = True
1031 1046 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1032 1047 options[b'persistent-nodemap.mmap'] = True
1033 1048 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1034 1049 options[b'persistent-nodemap.mode'] = epnm
1035 1050 if ui.configbool(b'devel', b'persistent-nodemap'):
1036 1051 options[b'devel-force-nodemap'] = True
1037 1052
1038 1053 return options
1039 1054
1040 1055
1041 1056 def makemain(**kwargs):
1042 1057 """Produce a type conforming to ``ilocalrepositorymain``."""
1043 1058 return localrepository
1044 1059
1045 1060
1046 1061 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1047 1062 class revlogfilestorage(object):
1048 1063 """File storage when using revlogs."""
1049 1064
1050 1065 def file(self, path):
1051 1066 if path[0] == b'/':
1052 1067 path = path[1:]
1053 1068
1054 1069 return filelog.filelog(self.svfs, path)
1055 1070
1056 1071
1057 1072 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1058 1073 class revlognarrowfilestorage(object):
1059 1074 """File storage when using revlogs and narrow files."""
1060 1075
1061 1076 def file(self, path):
1062 1077 if path[0] == b'/':
1063 1078 path = path[1:]
1064 1079
1065 1080 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1066 1081
1067 1082
1068 1083 def makefilestorage(requirements, features, **kwargs):
1069 1084 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1070 1085 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1071 1086 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1072 1087
1073 1088 if requirementsmod.NARROW_REQUIREMENT in requirements:
1074 1089 return revlognarrowfilestorage
1075 1090 else:
1076 1091 return revlogfilestorage
1077 1092
1078 1093
1079 1094 # List of repository interfaces and factory functions for them. Each
1080 1095 # will be called in order during ``makelocalrepository()`` to iteratively
1081 1096 # derive the final type for a local repository instance. We capture the
1082 1097 # function as a lambda so we don't hold a reference and the module-level
1083 1098 # functions can be wrapped.
1084 1099 REPO_INTERFACES = [
1085 1100 (repository.ilocalrepositorymain, lambda: makemain),
1086 1101 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1087 1102 ]
1088 1103
1089 1104
1090 1105 @interfaceutil.implementer(repository.ilocalrepositorymain)
1091 1106 class localrepository(object):
1092 1107 """Main class for representing local repositories.
1093 1108
1094 1109 All local repositories are instances of this class.
1095 1110
1096 1111 Constructed on its own, instances of this class are not usable as
1097 1112 repository objects. To obtain a usable repository object, call
1098 1113 ``hg.repository()``, ``localrepo.instance()``, or
1099 1114 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1100 1115 ``instance()`` adds support for creating new repositories.
1101 1116 ``hg.repository()`` adds more extension integration, including calling
1102 1117 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1103 1118 used.
1104 1119 """
1105 1120
1106 1121 # obsolete experimental requirements:
1107 1122 # - manifestv2: An experimental new manifest format that allowed
1108 1123 # for stem compression of long paths. Experiment ended up not
1109 1124 # being successful (repository sizes went up due to worse delta
1110 1125 # chains), and the code was deleted in 4.6.
1111 1126 supportedformats = {
1112 1127 b'revlogv1',
1113 1128 b'generaldelta',
1114 1129 requirementsmod.TREEMANIFEST_REQUIREMENT,
1115 1130 requirementsmod.COPIESSDC_REQUIREMENT,
1116 1131 requirementsmod.REVLOGV2_REQUIREMENT,
1117 1132 requirementsmod.SIDEDATA_REQUIREMENT,
1118 1133 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1119 1134 requirementsmod.NODEMAP_REQUIREMENT,
1120 1135 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1121 1136 requirementsmod.SHARESAFE_REQUIREMENT,
1122 1137 }
1123 1138 _basesupported = supportedformats | {
1124 1139 b'store',
1125 1140 b'fncache',
1126 1141 requirementsmod.SHARED_REQUIREMENT,
1127 1142 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1128 1143 b'dotencode',
1129 1144 requirementsmod.SPARSE_REQUIREMENT,
1130 1145 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1131 1146 }
1132 1147
1133 1148 # list of prefix for file which can be written without 'wlock'
1134 1149 # Extensions should extend this list when needed
1135 1150 _wlockfreeprefix = {
1136 1151 # We migh consider requiring 'wlock' for the next
1137 1152 # two, but pretty much all the existing code assume
1138 1153 # wlock is not needed so we keep them excluded for
1139 1154 # now.
1140 1155 b'hgrc',
1141 1156 b'requires',
1142 1157 # XXX cache is a complicatged business someone
1143 1158 # should investigate this in depth at some point
1144 1159 b'cache/',
1145 1160 # XXX shouldn't be dirstate covered by the wlock?
1146 1161 b'dirstate',
1147 1162 # XXX bisect was still a bit too messy at the time
1148 1163 # this changeset was introduced. Someone should fix
1149 1164 # the remainig bit and drop this line
1150 1165 b'bisect.state',
1151 1166 }
1152 1167
1153 1168 def __init__(
1154 1169 self,
1155 1170 baseui,
1156 1171 ui,
1157 1172 origroot,
1158 1173 wdirvfs,
1159 1174 hgvfs,
1160 1175 requirements,
1161 1176 supportedrequirements,
1162 1177 sharedpath,
1163 1178 store,
1164 1179 cachevfs,
1165 1180 wcachevfs,
1166 1181 features,
1167 1182 intents=None,
1168 1183 ):
1169 1184 """Create a new local repository instance.
1170 1185
1171 1186 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1172 1187 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1173 1188 object.
1174 1189
1175 1190 Arguments:
1176 1191
1177 1192 baseui
1178 1193 ``ui.ui`` instance that ``ui`` argument was based off of.
1179 1194
1180 1195 ui
1181 1196 ``ui.ui`` instance for use by the repository.
1182 1197
1183 1198 origroot
1184 1199 ``bytes`` path to working directory root of this repository.
1185 1200
1186 1201 wdirvfs
1187 1202 ``vfs.vfs`` rooted at the working directory.
1188 1203
1189 1204 hgvfs
1190 1205 ``vfs.vfs`` rooted at .hg/
1191 1206
1192 1207 requirements
1193 1208 ``set`` of bytestrings representing repository opening requirements.
1194 1209
1195 1210 supportedrequirements
1196 1211 ``set`` of bytestrings representing repository requirements that we
1197 1212 know how to open. May be a supetset of ``requirements``.
1198 1213
1199 1214 sharedpath
1200 1215 ``bytes`` Defining path to storage base directory. Points to a
1201 1216 ``.hg/`` directory somewhere.
1202 1217
1203 1218 store
1204 1219 ``store.basicstore`` (or derived) instance providing access to
1205 1220 versioned storage.
1206 1221
1207 1222 cachevfs
1208 1223 ``vfs.vfs`` used for cache files.
1209 1224
1210 1225 wcachevfs
1211 1226 ``vfs.vfs`` used for cache files related to the working copy.
1212 1227
1213 1228 features
1214 1229 ``set`` of bytestrings defining features/capabilities of this
1215 1230 instance.
1216 1231
1217 1232 intents
1218 1233 ``set`` of system strings indicating what this repo will be used
1219 1234 for.
1220 1235 """
1221 1236 self.baseui = baseui
1222 1237 self.ui = ui
1223 1238 self.origroot = origroot
1224 1239 # vfs rooted at working directory.
1225 1240 self.wvfs = wdirvfs
1226 1241 self.root = wdirvfs.base
1227 1242 # vfs rooted at .hg/. Used to access most non-store paths.
1228 1243 self.vfs = hgvfs
1229 1244 self.path = hgvfs.base
1230 1245 self.requirements = requirements
1231 1246 self.supported = supportedrequirements
1232 1247 self.sharedpath = sharedpath
1233 1248 self.store = store
1234 1249 self.cachevfs = cachevfs
1235 1250 self.wcachevfs = wcachevfs
1236 1251 self.features = features
1237 1252
1238 1253 self.filtername = None
1239 1254
1240 1255 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1241 1256 b'devel', b'check-locks'
1242 1257 ):
1243 1258 self.vfs.audit = self._getvfsward(self.vfs.audit)
1244 1259 # A list of callback to shape the phase if no data were found.
1245 1260 # Callback are in the form: func(repo, roots) --> processed root.
1246 1261 # This list it to be filled by extension during repo setup
1247 1262 self._phasedefaults = []
1248 1263
1249 1264 color.setup(self.ui)
1250 1265
1251 1266 self.spath = self.store.path
1252 1267 self.svfs = self.store.vfs
1253 1268 self.sjoin = self.store.join
1254 1269 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1255 1270 b'devel', b'check-locks'
1256 1271 ):
1257 1272 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1258 1273 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1259 1274 else: # standard vfs
1260 1275 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1261 1276
1262 1277 self._dirstatevalidatewarned = False
1263 1278
1264 1279 self._branchcaches = branchmap.BranchMapCache()
1265 1280 self._revbranchcache = None
1266 1281 self._filterpats = {}
1267 1282 self._datafilters = {}
1268 1283 self._transref = self._lockref = self._wlockref = None
1269 1284
1270 1285 # A cache for various files under .hg/ that tracks file changes,
1271 1286 # (used by the filecache decorator)
1272 1287 #
1273 1288 # Maps a property name to its util.filecacheentry
1274 1289 self._filecache = {}
1275 1290
1276 1291 # hold sets of revision to be filtered
1277 1292 # should be cleared when something might have changed the filter value:
1278 1293 # - new changesets,
1279 1294 # - phase change,
1280 1295 # - new obsolescence marker,
1281 1296 # - working directory parent change,
1282 1297 # - bookmark changes
1283 1298 self.filteredrevcache = {}
1284 1299
1285 1300 # post-dirstate-status hooks
1286 1301 self._postdsstatus = []
1287 1302
1288 1303 # generic mapping between names and nodes
1289 1304 self.names = namespaces.namespaces()
1290 1305
1291 1306 # Key to signature value.
1292 1307 self._sparsesignaturecache = {}
1293 1308 # Signature to cached matcher instance.
1294 1309 self._sparsematchercache = {}
1295 1310
1296 1311 self._extrafilterid = repoview.extrafilter(ui)
1297 1312
1298 1313 self.filecopiesmode = None
1299 1314 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1300 1315 self.filecopiesmode = b'changeset-sidedata'
1301 1316
1302 1317 def _getvfsward(self, origfunc):
1303 1318 """build a ward for self.vfs"""
1304 1319 rref = weakref.ref(self)
1305 1320
1306 1321 def checkvfs(path, mode=None):
1307 1322 ret = origfunc(path, mode=mode)
1308 1323 repo = rref()
1309 1324 if (
1310 1325 repo is None
1311 1326 or not util.safehasattr(repo, b'_wlockref')
1312 1327 or not util.safehasattr(repo, b'_lockref')
1313 1328 ):
1314 1329 return
1315 1330 if mode in (None, b'r', b'rb'):
1316 1331 return
1317 1332 if path.startswith(repo.path):
1318 1333 # truncate name relative to the repository (.hg)
1319 1334 path = path[len(repo.path) + 1 :]
1320 1335 if path.startswith(b'cache/'):
1321 1336 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1322 1337 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1323 1338 # path prefixes covered by 'lock'
1324 1339 vfs_path_prefixes = (
1325 1340 b'journal.',
1326 1341 b'undo.',
1327 1342 b'strip-backup/',
1328 1343 b'cache/',
1329 1344 )
1330 1345 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1331 1346 if repo._currentlock(repo._lockref) is None:
1332 1347 repo.ui.develwarn(
1333 1348 b'write with no lock: "%s"' % path,
1334 1349 stacklevel=3,
1335 1350 config=b'check-locks',
1336 1351 )
1337 1352 elif repo._currentlock(repo._wlockref) is None:
1338 1353 # rest of vfs files are covered by 'wlock'
1339 1354 #
1340 1355 # exclude special files
1341 1356 for prefix in self._wlockfreeprefix:
1342 1357 if path.startswith(prefix):
1343 1358 return
1344 1359 repo.ui.develwarn(
1345 1360 b'write with no wlock: "%s"' % path,
1346 1361 stacklevel=3,
1347 1362 config=b'check-locks',
1348 1363 )
1349 1364 return ret
1350 1365
1351 1366 return checkvfs
1352 1367
1353 1368 def _getsvfsward(self, origfunc):
1354 1369 """build a ward for self.svfs"""
1355 1370 rref = weakref.ref(self)
1356 1371
1357 1372 def checksvfs(path, mode=None):
1358 1373 ret = origfunc(path, mode=mode)
1359 1374 repo = rref()
1360 1375 if repo is None or not util.safehasattr(repo, b'_lockref'):
1361 1376 return
1362 1377 if mode in (None, b'r', b'rb'):
1363 1378 return
1364 1379 if path.startswith(repo.sharedpath):
1365 1380 # truncate name relative to the repository (.hg)
1366 1381 path = path[len(repo.sharedpath) + 1 :]
1367 1382 if repo._currentlock(repo._lockref) is None:
1368 1383 repo.ui.develwarn(
1369 1384 b'write with no lock: "%s"' % path, stacklevel=4
1370 1385 )
1371 1386 return ret
1372 1387
1373 1388 return checksvfs
1374 1389
1375 1390 def close(self):
1376 1391 self._writecaches()
1377 1392
1378 1393 def _writecaches(self):
1379 1394 if self._revbranchcache:
1380 1395 self._revbranchcache.write()
1381 1396
1382 1397 def _restrictcapabilities(self, caps):
1383 1398 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1384 1399 caps = set(caps)
1385 1400 capsblob = bundle2.encodecaps(
1386 1401 bundle2.getrepocaps(self, role=b'client')
1387 1402 )
1388 1403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1389 1404 return caps
1390 1405
1391 1406 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1392 1407 # self -> auditor -> self._checknested -> self
1393 1408
1394 1409 @property
1395 1410 def auditor(self):
1396 1411 # This is only used by context.workingctx.match in order to
1397 1412 # detect files in subrepos.
1398 1413 return pathutil.pathauditor(self.root, callback=self._checknested)
1399 1414
1400 1415 @property
1401 1416 def nofsauditor(self):
1402 1417 # This is only used by context.basectx.match in order to detect
1403 1418 # files in subrepos.
1404 1419 return pathutil.pathauditor(
1405 1420 self.root, callback=self._checknested, realfs=False, cached=True
1406 1421 )
1407 1422
1408 1423 def _checknested(self, path):
1409 1424 """Determine if path is a legal nested repository."""
1410 1425 if not path.startswith(self.root):
1411 1426 return False
1412 1427 subpath = path[len(self.root) + 1 :]
1413 1428 normsubpath = util.pconvert(subpath)
1414 1429
1415 1430 # XXX: Checking against the current working copy is wrong in
1416 1431 # the sense that it can reject things like
1417 1432 #
1418 1433 # $ hg cat -r 10 sub/x.txt
1419 1434 #
1420 1435 # if sub/ is no longer a subrepository in the working copy
1421 1436 # parent revision.
1422 1437 #
1423 1438 # However, it can of course also allow things that would have
1424 1439 # been rejected before, such as the above cat command if sub/
1425 1440 # is a subrepository now, but was a normal directory before.
1426 1441 # The old path auditor would have rejected by mistake since it
1427 1442 # panics when it sees sub/.hg/.
1428 1443 #
1429 1444 # All in all, checking against the working copy seems sensible
1430 1445 # since we want to prevent access to nested repositories on
1431 1446 # the filesystem *now*.
1432 1447 ctx = self[None]
1433 1448 parts = util.splitpath(subpath)
1434 1449 while parts:
1435 1450 prefix = b'/'.join(parts)
1436 1451 if prefix in ctx.substate:
1437 1452 if prefix == normsubpath:
1438 1453 return True
1439 1454 else:
1440 1455 sub = ctx.sub(prefix)
1441 1456 return sub.checknested(subpath[len(prefix) + 1 :])
1442 1457 else:
1443 1458 parts.pop()
1444 1459 return False
1445 1460
1446 1461 def peer(self):
1447 1462 return localpeer(self) # not cached to avoid reference cycle
1448 1463
1449 1464 def unfiltered(self):
1450 1465 """Return unfiltered version of the repository
1451 1466
1452 1467 Intended to be overwritten by filtered repo."""
1453 1468 return self
1454 1469
1455 1470 def filtered(self, name, visibilityexceptions=None):
1456 1471 """Return a filtered version of a repository
1457 1472
1458 1473 The `name` parameter is the identifier of the requested view. This
1459 1474 will return a repoview object set "exactly" to the specified view.
1460 1475
1461 1476 This function does not apply recursive filtering to a repository. For
1462 1477 example calling `repo.filtered("served")` will return a repoview using
1463 1478 the "served" view, regardless of the initial view used by `repo`.
1464 1479
1465 1480 In other word, there is always only one level of `repoview` "filtering".
1466 1481 """
1467 1482 if self._extrafilterid is not None and b'%' not in name:
1468 1483 name = name + b'%' + self._extrafilterid
1469 1484
1470 1485 cls = repoview.newtype(self.unfiltered().__class__)
1471 1486 return cls(self, name, visibilityexceptions)
1472 1487
1473 1488 @mixedrepostorecache(
1474 1489 (b'bookmarks', b'plain'),
1475 1490 (b'bookmarks.current', b'plain'),
1476 1491 (b'bookmarks', b''),
1477 1492 (b'00changelog.i', b''),
1478 1493 )
1479 1494 def _bookmarks(self):
1480 1495 # Since the multiple files involved in the transaction cannot be
1481 1496 # written atomically (with current repository format), there is a race
1482 1497 # condition here.
1483 1498 #
1484 1499 # 1) changelog content A is read
1485 1500 # 2) outside transaction update changelog to content B
1486 1501 # 3) outside transaction update bookmark file referring to content B
1487 1502 # 4) bookmarks file content is read and filtered against changelog-A
1488 1503 #
1489 1504 # When this happens, bookmarks against nodes missing from A are dropped.
1490 1505 #
1491 1506 # Having this happening during read is not great, but it become worse
1492 1507 # when this happen during write because the bookmarks to the "unknown"
1493 1508 # nodes will be dropped for good. However, writes happen within locks.
1494 1509 # This locking makes it possible to have a race free consistent read.
1495 1510 # For this purpose data read from disc before locking are
1496 1511 # "invalidated" right after the locks are taken. This invalidations are
1497 1512 # "light", the `filecache` mechanism keep the data in memory and will
1498 1513 # reuse them if the underlying files did not changed. Not parsing the
1499 1514 # same data multiple times helps performances.
1500 1515 #
1501 1516 # Unfortunately in the case describe above, the files tracked by the
1502 1517 # bookmarks file cache might not have changed, but the in-memory
1503 1518 # content is still "wrong" because we used an older changelog content
1504 1519 # to process the on-disk data. So after locking, the changelog would be
1505 1520 # refreshed but `_bookmarks` would be preserved.
1506 1521 # Adding `00changelog.i` to the list of tracked file is not
1507 1522 # enough, because at the time we build the content for `_bookmarks` in
1508 1523 # (4), the changelog file has already diverged from the content used
1509 1524 # for loading `changelog` in (1)
1510 1525 #
1511 1526 # To prevent the issue, we force the changelog to be explicitly
1512 1527 # reloaded while computing `_bookmarks`. The data race can still happen
1513 1528 # without the lock (with a narrower window), but it would no longer go
1514 1529 # undetected during the lock time refresh.
1515 1530 #
1516 1531 # The new schedule is as follow
1517 1532 #
1518 1533 # 1) filecache logic detect that `_bookmarks` needs to be computed
1519 1534 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1520 1535 # 3) We force `changelog` filecache to be tested
1521 1536 # 4) cachestat for `changelog` are captured (for changelog)
1522 1537 # 5) `_bookmarks` is computed and cached
1523 1538 #
1524 1539 # The step in (3) ensure we have a changelog at least as recent as the
1525 1540 # cache stat computed in (1). As a result at locking time:
1526 1541 # * if the changelog did not changed since (1) -> we can reuse the data
1527 1542 # * otherwise -> the bookmarks get refreshed.
1528 1543 self._refreshchangelog()
1529 1544 return bookmarks.bmstore(self)
1530 1545
1531 1546 def _refreshchangelog(self):
1532 1547 """make sure the in memory changelog match the on-disk one"""
1533 1548 if 'changelog' in vars(self) and self.currenttransaction() is None:
1534 1549 del self.changelog
1535 1550
1536 1551 @property
1537 1552 def _activebookmark(self):
1538 1553 return self._bookmarks.active
1539 1554
1540 1555 # _phasesets depend on changelog. what we need is to call
1541 1556 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1542 1557 # can't be easily expressed in filecache mechanism.
1543 1558 @storecache(b'phaseroots', b'00changelog.i')
1544 1559 def _phasecache(self):
1545 1560 return phases.phasecache(self, self._phasedefaults)
1546 1561
1547 1562 @storecache(b'obsstore')
1548 1563 def obsstore(self):
1549 1564 return obsolete.makestore(self.ui, self)
1550 1565
1551 1566 @storecache(b'00changelog.i')
1552 1567 def changelog(self):
1553 1568 # load dirstate before changelog to avoid race see issue6303
1554 1569 self.dirstate.prefetch_parents()
1555 1570 return self.store.changelog(txnutil.mayhavepending(self.root))
1556 1571
1557 1572 @storecache(b'00manifest.i')
1558 1573 def manifestlog(self):
1559 1574 return self.store.manifestlog(self, self._storenarrowmatch)
1560 1575
1561 1576 @repofilecache(b'dirstate')
1562 1577 def dirstate(self):
1563 1578 return self._makedirstate()
1564 1579
1565 1580 def _makedirstate(self):
1566 1581 """Extension point for wrapping the dirstate per-repo."""
1567 1582 sparsematchfn = lambda: sparse.matcher(self)
1568 1583
1569 1584 return dirstate.dirstate(
1570 1585 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1571 1586 )
1572 1587
1573 1588 def _dirstatevalidate(self, node):
1574 1589 try:
1575 1590 self.changelog.rev(node)
1576 1591 return node
1577 1592 except error.LookupError:
1578 1593 if not self._dirstatevalidatewarned:
1579 1594 self._dirstatevalidatewarned = True
1580 1595 self.ui.warn(
1581 1596 _(b"warning: ignoring unknown working parent %s!\n")
1582 1597 % short(node)
1583 1598 )
1584 1599 return nullid
1585 1600
1586 1601 @storecache(narrowspec.FILENAME)
1587 1602 def narrowpats(self):
1588 1603 """matcher patterns for this repository's narrowspec
1589 1604
1590 1605 A tuple of (includes, excludes).
1591 1606 """
1592 1607 return narrowspec.load(self)
1593 1608
1594 1609 @storecache(narrowspec.FILENAME)
1595 1610 def _storenarrowmatch(self):
1596 1611 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1597 1612 return matchmod.always()
1598 1613 include, exclude = self.narrowpats
1599 1614 return narrowspec.match(self.root, include=include, exclude=exclude)
1600 1615
1601 1616 @storecache(narrowspec.FILENAME)
1602 1617 def _narrowmatch(self):
1603 1618 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1604 1619 return matchmod.always()
1605 1620 narrowspec.checkworkingcopynarrowspec(self)
1606 1621 include, exclude = self.narrowpats
1607 1622 return narrowspec.match(self.root, include=include, exclude=exclude)
1608 1623
1609 1624 def narrowmatch(self, match=None, includeexact=False):
1610 1625 """matcher corresponding the the repo's narrowspec
1611 1626
1612 1627 If `match` is given, then that will be intersected with the narrow
1613 1628 matcher.
1614 1629
1615 1630 If `includeexact` is True, then any exact matches from `match` will
1616 1631 be included even if they're outside the narrowspec.
1617 1632 """
1618 1633 if match:
1619 1634 if includeexact and not self._narrowmatch.always():
1620 1635 # do not exclude explicitly-specified paths so that they can
1621 1636 # be warned later on
1622 1637 em = matchmod.exact(match.files())
1623 1638 nm = matchmod.unionmatcher([self._narrowmatch, em])
1624 1639 return matchmod.intersectmatchers(match, nm)
1625 1640 return matchmod.intersectmatchers(match, self._narrowmatch)
1626 1641 return self._narrowmatch
1627 1642
1628 1643 def setnarrowpats(self, newincludes, newexcludes):
1629 1644 narrowspec.save(self, newincludes, newexcludes)
1630 1645 self.invalidate(clearfilecache=True)
1631 1646
1632 1647 @unfilteredpropertycache
1633 1648 def _quick_access_changeid_null(self):
1634 1649 return {
1635 1650 b'null': (nullrev, nullid),
1636 1651 nullrev: (nullrev, nullid),
1637 1652 nullid: (nullrev, nullid),
1638 1653 }
1639 1654
1640 1655 @unfilteredpropertycache
1641 1656 def _quick_access_changeid_wc(self):
1642 1657 # also fast path access to the working copy parents
1643 1658 # however, only do it for filter that ensure wc is visible.
1644 1659 quick = self._quick_access_changeid_null.copy()
1645 1660 cl = self.unfiltered().changelog
1646 1661 for node in self.dirstate.parents():
1647 1662 if node == nullid:
1648 1663 continue
1649 1664 rev = cl.index.get_rev(node)
1650 1665 if rev is None:
1651 1666 # unknown working copy parent case:
1652 1667 #
1653 1668 # skip the fast path and let higher code deal with it
1654 1669 continue
1655 1670 pair = (rev, node)
1656 1671 quick[rev] = pair
1657 1672 quick[node] = pair
1658 1673 # also add the parents of the parents
1659 1674 for r in cl.parentrevs(rev):
1660 1675 if r == nullrev:
1661 1676 continue
1662 1677 n = cl.node(r)
1663 1678 pair = (r, n)
1664 1679 quick[r] = pair
1665 1680 quick[n] = pair
1666 1681 p1node = self.dirstate.p1()
1667 1682 if p1node != nullid:
1668 1683 quick[b'.'] = quick[p1node]
1669 1684 return quick
1670 1685
1671 1686 @unfilteredmethod
1672 1687 def _quick_access_changeid_invalidate(self):
1673 1688 if '_quick_access_changeid_wc' in vars(self):
1674 1689 del self.__dict__['_quick_access_changeid_wc']
1675 1690
1676 1691 @property
1677 1692 def _quick_access_changeid(self):
1678 1693 """an helper dictionnary for __getitem__ calls
1679 1694
1680 1695 This contains a list of symbol we can recognise right away without
1681 1696 further processing.
1682 1697 """
1683 1698 if self.filtername in repoview.filter_has_wc:
1684 1699 return self._quick_access_changeid_wc
1685 1700 return self._quick_access_changeid_null
1686 1701
1687 1702 def __getitem__(self, changeid):
1688 1703 # dealing with special cases
1689 1704 if changeid is None:
1690 1705 return context.workingctx(self)
1691 1706 if isinstance(changeid, context.basectx):
1692 1707 return changeid
1693 1708
1694 1709 # dealing with multiple revisions
1695 1710 if isinstance(changeid, slice):
1696 1711 # wdirrev isn't contiguous so the slice shouldn't include it
1697 1712 return [
1698 1713 self[i]
1699 1714 for i in pycompat.xrange(*changeid.indices(len(self)))
1700 1715 if i not in self.changelog.filteredrevs
1701 1716 ]
1702 1717
1703 1718 # dealing with some special values
1704 1719 quick_access = self._quick_access_changeid.get(changeid)
1705 1720 if quick_access is not None:
1706 1721 rev, node = quick_access
1707 1722 return context.changectx(self, rev, node, maybe_filtered=False)
1708 1723 if changeid == b'tip':
1709 1724 node = self.changelog.tip()
1710 1725 rev = self.changelog.rev(node)
1711 1726 return context.changectx(self, rev, node)
1712 1727
1713 1728 # dealing with arbitrary values
1714 1729 try:
1715 1730 if isinstance(changeid, int):
1716 1731 node = self.changelog.node(changeid)
1717 1732 rev = changeid
1718 1733 elif changeid == b'.':
1719 1734 # this is a hack to delay/avoid loading obsmarkers
1720 1735 # when we know that '.' won't be hidden
1721 1736 node = self.dirstate.p1()
1722 1737 rev = self.unfiltered().changelog.rev(node)
1723 1738 elif len(changeid) == 20:
1724 1739 try:
1725 1740 node = changeid
1726 1741 rev = self.changelog.rev(changeid)
1727 1742 except error.FilteredLookupError:
1728 1743 changeid = hex(changeid) # for the error message
1729 1744 raise
1730 1745 except LookupError:
1731 1746 # check if it might have come from damaged dirstate
1732 1747 #
1733 1748 # XXX we could avoid the unfiltered if we had a recognizable
1734 1749 # exception for filtered changeset access
1735 1750 if (
1736 1751 self.local()
1737 1752 and changeid in self.unfiltered().dirstate.parents()
1738 1753 ):
1739 1754 msg = _(b"working directory has unknown parent '%s'!")
1740 1755 raise error.Abort(msg % short(changeid))
1741 1756 changeid = hex(changeid) # for the error message
1742 1757 raise
1743 1758
1744 1759 elif len(changeid) == 40:
1745 1760 node = bin(changeid)
1746 1761 rev = self.changelog.rev(node)
1747 1762 else:
1748 1763 raise error.ProgrammingError(
1749 1764 b"unsupported changeid '%s' of type %s"
1750 1765 % (changeid, pycompat.bytestr(type(changeid)))
1751 1766 )
1752 1767
1753 1768 return context.changectx(self, rev, node)
1754 1769
1755 1770 except (error.FilteredIndexError, error.FilteredLookupError):
1756 1771 raise error.FilteredRepoLookupError(
1757 1772 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1758 1773 )
1759 1774 except (IndexError, LookupError):
1760 1775 raise error.RepoLookupError(
1761 1776 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1762 1777 )
1763 1778 except error.WdirUnsupported:
1764 1779 return context.workingctx(self)
1765 1780
1766 1781 def __contains__(self, changeid):
1767 1782 """True if the given changeid exists"""
1768 1783 try:
1769 1784 self[changeid]
1770 1785 return True
1771 1786 except error.RepoLookupError:
1772 1787 return False
1773 1788
1774 1789 def __nonzero__(self):
1775 1790 return True
1776 1791
1777 1792 __bool__ = __nonzero__
1778 1793
1779 1794 def __len__(self):
1780 1795 # no need to pay the cost of repoview.changelog
1781 1796 unfi = self.unfiltered()
1782 1797 return len(unfi.changelog)
1783 1798
1784 1799 def __iter__(self):
1785 1800 return iter(self.changelog)
1786 1801
1787 1802 def revs(self, expr, *args):
1788 1803 """Find revisions matching a revset.
1789 1804
1790 1805 The revset is specified as a string ``expr`` that may contain
1791 1806 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1792 1807
1793 1808 Revset aliases from the configuration are not expanded. To expand
1794 1809 user aliases, consider calling ``scmutil.revrange()`` or
1795 1810 ``repo.anyrevs([expr], user=True)``.
1796 1811
1797 1812 Returns a smartset.abstractsmartset, which is a list-like interface
1798 1813 that contains integer revisions.
1799 1814 """
1800 1815 tree = revsetlang.spectree(expr, *args)
1801 1816 return revset.makematcher(tree)(self)
1802 1817
1803 1818 def set(self, expr, *args):
1804 1819 """Find revisions matching a revset and emit changectx instances.
1805 1820
1806 1821 This is a convenience wrapper around ``revs()`` that iterates the
1807 1822 result and is a generator of changectx instances.
1808 1823
1809 1824 Revset aliases from the configuration are not expanded. To expand
1810 1825 user aliases, consider calling ``scmutil.revrange()``.
1811 1826 """
1812 1827 for r in self.revs(expr, *args):
1813 1828 yield self[r]
1814 1829
1815 1830 def anyrevs(self, specs, user=False, localalias=None):
1816 1831 """Find revisions matching one of the given revsets.
1817 1832
1818 1833 Revset aliases from the configuration are not expanded by default. To
1819 1834 expand user aliases, specify ``user=True``. To provide some local
1820 1835 definitions overriding user aliases, set ``localalias`` to
1821 1836 ``{name: definitionstring}``.
1822 1837 """
1823 1838 if specs == [b'null']:
1824 1839 return revset.baseset([nullrev])
1825 1840 if specs == [b'.']:
1826 1841 quick_data = self._quick_access_changeid.get(b'.')
1827 1842 if quick_data is not None:
1828 1843 return revset.baseset([quick_data[0]])
1829 1844 if user:
1830 1845 m = revset.matchany(
1831 1846 self.ui,
1832 1847 specs,
1833 1848 lookup=revset.lookupfn(self),
1834 1849 localalias=localalias,
1835 1850 )
1836 1851 else:
1837 1852 m = revset.matchany(None, specs, localalias=localalias)
1838 1853 return m(self)
1839 1854
1840 1855 def url(self):
1841 1856 return b'file:' + self.root
1842 1857
1843 1858 def hook(self, name, throw=False, **args):
1844 1859 """Call a hook, passing this repo instance.
1845 1860
1846 1861 This a convenience method to aid invoking hooks. Extensions likely
1847 1862 won't call this unless they have registered a custom hook or are
1848 1863 replacing code that is expected to call a hook.
1849 1864 """
1850 1865 return hook.hook(self.ui, self, name, throw, **args)
1851 1866
1852 1867 @filteredpropertycache
1853 1868 def _tagscache(self):
1854 1869 """Returns a tagscache object that contains various tags related
1855 1870 caches."""
1856 1871
1857 1872 # This simplifies its cache management by having one decorated
1858 1873 # function (this one) and the rest simply fetch things from it.
1859 1874 class tagscache(object):
1860 1875 def __init__(self):
1861 1876 # These two define the set of tags for this repository. tags
1862 1877 # maps tag name to node; tagtypes maps tag name to 'global' or
1863 1878 # 'local'. (Global tags are defined by .hgtags across all
1864 1879 # heads, and local tags are defined in .hg/localtags.)
1865 1880 # They constitute the in-memory cache of tags.
1866 1881 self.tags = self.tagtypes = None
1867 1882
1868 1883 self.nodetagscache = self.tagslist = None
1869 1884
1870 1885 cache = tagscache()
1871 1886 cache.tags, cache.tagtypes = self._findtags()
1872 1887
1873 1888 return cache
1874 1889
1875 1890 def tags(self):
1876 1891 '''return a mapping of tag to node'''
1877 1892 t = {}
1878 1893 if self.changelog.filteredrevs:
1879 1894 tags, tt = self._findtags()
1880 1895 else:
1881 1896 tags = self._tagscache.tags
1882 1897 rev = self.changelog.rev
1883 1898 for k, v in pycompat.iteritems(tags):
1884 1899 try:
1885 1900 # ignore tags to unknown nodes
1886 1901 rev(v)
1887 1902 t[k] = v
1888 1903 except (error.LookupError, ValueError):
1889 1904 pass
1890 1905 return t
1891 1906
1892 1907 def _findtags(self):
1893 1908 """Do the hard work of finding tags. Return a pair of dicts
1894 1909 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1895 1910 maps tag name to a string like \'global\' or \'local\'.
1896 1911 Subclasses or extensions are free to add their own tags, but
1897 1912 should be aware that the returned dicts will be retained for the
1898 1913 duration of the localrepo object."""
1899 1914
1900 1915 # XXX what tagtype should subclasses/extensions use? Currently
1901 1916 # mq and bookmarks add tags, but do not set the tagtype at all.
1902 1917 # Should each extension invent its own tag type? Should there
1903 1918 # be one tagtype for all such "virtual" tags? Or is the status
1904 1919 # quo fine?
1905 1920
1906 1921 # map tag name to (node, hist)
1907 1922 alltags = tagsmod.findglobaltags(self.ui, self)
1908 1923 # map tag name to tag type
1909 1924 tagtypes = {tag: b'global' for tag in alltags}
1910 1925
1911 1926 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1912 1927
1913 1928 # Build the return dicts. Have to re-encode tag names because
1914 1929 # the tags module always uses UTF-8 (in order not to lose info
1915 1930 # writing to the cache), but the rest of Mercurial wants them in
1916 1931 # local encoding.
1917 1932 tags = {}
1918 1933 for (name, (node, hist)) in pycompat.iteritems(alltags):
1919 1934 if node != nullid:
1920 1935 tags[encoding.tolocal(name)] = node
1921 1936 tags[b'tip'] = self.changelog.tip()
1922 1937 tagtypes = {
1923 1938 encoding.tolocal(name): value
1924 1939 for (name, value) in pycompat.iteritems(tagtypes)
1925 1940 }
1926 1941 return (tags, tagtypes)
1927 1942
1928 1943 def tagtype(self, tagname):
1929 1944 """
1930 1945 return the type of the given tag. result can be:
1931 1946
1932 1947 'local' : a local tag
1933 1948 'global' : a global tag
1934 1949 None : tag does not exist
1935 1950 """
1936 1951
1937 1952 return self._tagscache.tagtypes.get(tagname)
1938 1953
1939 1954 def tagslist(self):
1940 1955 '''return a list of tags ordered by revision'''
1941 1956 if not self._tagscache.tagslist:
1942 1957 l = []
1943 1958 for t, n in pycompat.iteritems(self.tags()):
1944 1959 l.append((self.changelog.rev(n), t, n))
1945 1960 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1946 1961
1947 1962 return self._tagscache.tagslist
1948 1963
1949 1964 def nodetags(self, node):
1950 1965 '''return the tags associated with a node'''
1951 1966 if not self._tagscache.nodetagscache:
1952 1967 nodetagscache = {}
1953 1968 for t, n in pycompat.iteritems(self._tagscache.tags):
1954 1969 nodetagscache.setdefault(n, []).append(t)
1955 1970 for tags in pycompat.itervalues(nodetagscache):
1956 1971 tags.sort()
1957 1972 self._tagscache.nodetagscache = nodetagscache
1958 1973 return self._tagscache.nodetagscache.get(node, [])
1959 1974
1960 1975 def nodebookmarks(self, node):
1961 1976 """return the list of bookmarks pointing to the specified node"""
1962 1977 return self._bookmarks.names(node)
1963 1978
1964 1979 def branchmap(self):
1965 1980 """returns a dictionary {branch: [branchheads]} with branchheads
1966 1981 ordered by increasing revision number"""
1967 1982 return self._branchcaches[self]
1968 1983
1969 1984 @unfilteredmethod
1970 1985 def revbranchcache(self):
1971 1986 if not self._revbranchcache:
1972 1987 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1973 1988 return self._revbranchcache
1974 1989
1975 1990 def branchtip(self, branch, ignoremissing=False):
1976 1991 """return the tip node for a given branch
1977 1992
1978 1993 If ignoremissing is True, then this method will not raise an error.
1979 1994 This is helpful for callers that only expect None for a missing branch
1980 1995 (e.g. namespace).
1981 1996
1982 1997 """
1983 1998 try:
1984 1999 return self.branchmap().branchtip(branch)
1985 2000 except KeyError:
1986 2001 if not ignoremissing:
1987 2002 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1988 2003 else:
1989 2004 pass
1990 2005
1991 2006 def lookup(self, key):
1992 2007 node = scmutil.revsymbol(self, key).node()
1993 2008 if node is None:
1994 2009 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1995 2010 return node
1996 2011
1997 2012 def lookupbranch(self, key):
1998 2013 if self.branchmap().hasbranch(key):
1999 2014 return key
2000 2015
2001 2016 return scmutil.revsymbol(self, key).branch()
2002 2017
2003 2018 def known(self, nodes):
2004 2019 cl = self.changelog
2005 2020 get_rev = cl.index.get_rev
2006 2021 filtered = cl.filteredrevs
2007 2022 result = []
2008 2023 for n in nodes:
2009 2024 r = get_rev(n)
2010 2025 resp = not (r is None or r in filtered)
2011 2026 result.append(resp)
2012 2027 return result
2013 2028
2014 2029 def local(self):
2015 2030 return self
2016 2031
2017 2032 def publishing(self):
2018 2033 # it's safe (and desirable) to trust the publish flag unconditionally
2019 2034 # so that we don't finalize changes shared between users via ssh or nfs
2020 2035 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2021 2036
2022 2037 def cancopy(self):
2023 2038 # so statichttprepo's override of local() works
2024 2039 if not self.local():
2025 2040 return False
2026 2041 if not self.publishing():
2027 2042 return True
2028 2043 # if publishing we can't copy if there is filtered content
2029 2044 return not self.filtered(b'visible').changelog.filteredrevs
2030 2045
2031 2046 def shared(self):
2032 2047 '''the type of shared repository (None if not shared)'''
2033 2048 if self.sharedpath != self.path:
2034 2049 return b'store'
2035 2050 return None
2036 2051
2037 2052 def wjoin(self, f, *insidef):
2038 2053 return self.vfs.reljoin(self.root, f, *insidef)
2039 2054
2040 2055 def setparents(self, p1, p2=nullid):
2041 2056 self[None].setparents(p1, p2)
2042 2057 self._quick_access_changeid_invalidate()
2043 2058
2044 2059 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2045 2060 """changeid must be a changeset revision, if specified.
2046 2061 fileid can be a file revision or node."""
2047 2062 return context.filectx(
2048 2063 self, path, changeid, fileid, changectx=changectx
2049 2064 )
2050 2065
2051 2066 def getcwd(self):
2052 2067 return self.dirstate.getcwd()
2053 2068
2054 2069 def pathto(self, f, cwd=None):
2055 2070 return self.dirstate.pathto(f, cwd)
2056 2071
2057 2072 def _loadfilter(self, filter):
2058 2073 if filter not in self._filterpats:
2059 2074 l = []
2060 2075 for pat, cmd in self.ui.configitems(filter):
2061 2076 if cmd == b'!':
2062 2077 continue
2063 2078 mf = matchmod.match(self.root, b'', [pat])
2064 2079 fn = None
2065 2080 params = cmd
2066 2081 for name, filterfn in pycompat.iteritems(self._datafilters):
2067 2082 if cmd.startswith(name):
2068 2083 fn = filterfn
2069 2084 params = cmd[len(name) :].lstrip()
2070 2085 break
2071 2086 if not fn:
2072 2087 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2073 2088 fn.__name__ = 'commandfilter'
2074 2089 # Wrap old filters not supporting keyword arguments
2075 2090 if not pycompat.getargspec(fn)[2]:
2076 2091 oldfn = fn
2077 2092 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2078 2093 fn.__name__ = 'compat-' + oldfn.__name__
2079 2094 l.append((mf, fn, params))
2080 2095 self._filterpats[filter] = l
2081 2096 return self._filterpats[filter]
2082 2097
2083 2098 def _filter(self, filterpats, filename, data):
2084 2099 for mf, fn, cmd in filterpats:
2085 2100 if mf(filename):
2086 2101 self.ui.debug(
2087 2102 b"filtering %s through %s\n"
2088 2103 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2089 2104 )
2090 2105 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2091 2106 break
2092 2107
2093 2108 return data
2094 2109
2095 2110 @unfilteredpropertycache
2096 2111 def _encodefilterpats(self):
2097 2112 return self._loadfilter(b'encode')
2098 2113
2099 2114 @unfilteredpropertycache
2100 2115 def _decodefilterpats(self):
2101 2116 return self._loadfilter(b'decode')
2102 2117
2103 2118 def adddatafilter(self, name, filter):
2104 2119 self._datafilters[name] = filter
2105 2120
2106 2121 def wread(self, filename):
2107 2122 if self.wvfs.islink(filename):
2108 2123 data = self.wvfs.readlink(filename)
2109 2124 else:
2110 2125 data = self.wvfs.read(filename)
2111 2126 return self._filter(self._encodefilterpats, filename, data)
2112 2127
2113 2128 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2114 2129 """write ``data`` into ``filename`` in the working directory
2115 2130
2116 2131 This returns length of written (maybe decoded) data.
2117 2132 """
2118 2133 data = self._filter(self._decodefilterpats, filename, data)
2119 2134 if b'l' in flags:
2120 2135 self.wvfs.symlink(data, filename)
2121 2136 else:
2122 2137 self.wvfs.write(
2123 2138 filename, data, backgroundclose=backgroundclose, **kwargs
2124 2139 )
2125 2140 if b'x' in flags:
2126 2141 self.wvfs.setflags(filename, False, True)
2127 2142 else:
2128 2143 self.wvfs.setflags(filename, False, False)
2129 2144 return len(data)
2130 2145
2131 2146 def wwritedata(self, filename, data):
2132 2147 return self._filter(self._decodefilterpats, filename, data)
2133 2148
2134 2149 def currenttransaction(self):
2135 2150 """return the current transaction or None if non exists"""
2136 2151 if self._transref:
2137 2152 tr = self._transref()
2138 2153 else:
2139 2154 tr = None
2140 2155
2141 2156 if tr and tr.running():
2142 2157 return tr
2143 2158 return None
2144 2159
2145 2160 def transaction(self, desc, report=None):
2146 2161 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2147 2162 b'devel', b'check-locks'
2148 2163 ):
2149 2164 if self._currentlock(self._lockref) is None:
2150 2165 raise error.ProgrammingError(b'transaction requires locking')
2151 2166 tr = self.currenttransaction()
2152 2167 if tr is not None:
2153 2168 return tr.nest(name=desc)
2154 2169
2155 2170 # abort here if the journal already exists
2156 2171 if self.svfs.exists(b"journal"):
2157 2172 raise error.RepoError(
2158 2173 _(b"abandoned transaction found"),
2159 2174 hint=_(b"run 'hg recover' to clean up transaction"),
2160 2175 )
2161 2176
2162 2177 idbase = b"%.40f#%f" % (random.random(), time.time())
2163 2178 ha = hex(hashutil.sha1(idbase).digest())
2164 2179 txnid = b'TXN:' + ha
2165 2180 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2166 2181
2167 2182 self._writejournal(desc)
2168 2183 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2169 2184 if report:
2170 2185 rp = report
2171 2186 else:
2172 2187 rp = self.ui.warn
2173 2188 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2174 2189 # we must avoid cyclic reference between repo and transaction.
2175 2190 reporef = weakref.ref(self)
2176 2191 # Code to track tag movement
2177 2192 #
2178 2193 # Since tags are all handled as file content, it is actually quite hard
2179 2194 # to track these movement from a code perspective. So we fallback to a
2180 2195 # tracking at the repository level. One could envision to track changes
2181 2196 # to the '.hgtags' file through changegroup apply but that fails to
2182 2197 # cope with case where transaction expose new heads without changegroup
2183 2198 # being involved (eg: phase movement).
2184 2199 #
2185 2200 # For now, We gate the feature behind a flag since this likely comes
2186 2201 # with performance impacts. The current code run more often than needed
2187 2202 # and do not use caches as much as it could. The current focus is on
2188 2203 # the behavior of the feature so we disable it by default. The flag
2189 2204 # will be removed when we are happy with the performance impact.
2190 2205 #
2191 2206 # Once this feature is no longer experimental move the following
2192 2207 # documentation to the appropriate help section:
2193 2208 #
2194 2209 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2195 2210 # tags (new or changed or deleted tags). In addition the details of
2196 2211 # these changes are made available in a file at:
2197 2212 # ``REPOROOT/.hg/changes/tags.changes``.
2198 2213 # Make sure you check for HG_TAG_MOVED before reading that file as it
2199 2214 # might exist from a previous transaction even if no tag were touched
2200 2215 # in this one. Changes are recorded in a line base format::
2201 2216 #
2202 2217 # <action> <hex-node> <tag-name>\n
2203 2218 #
2204 2219 # Actions are defined as follow:
2205 2220 # "-R": tag is removed,
2206 2221 # "+A": tag is added,
2207 2222 # "-M": tag is moved (old value),
2208 2223 # "+M": tag is moved (new value),
2209 2224 tracktags = lambda x: None
2210 2225 # experimental config: experimental.hook-track-tags
2211 2226 shouldtracktags = self.ui.configbool(
2212 2227 b'experimental', b'hook-track-tags'
2213 2228 )
2214 2229 if desc != b'strip' and shouldtracktags:
2215 2230 oldheads = self.changelog.headrevs()
2216 2231
2217 2232 def tracktags(tr2):
2218 2233 repo = reporef()
2219 2234 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2220 2235 newheads = repo.changelog.headrevs()
2221 2236 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2222 2237 # notes: we compare lists here.
2223 2238 # As we do it only once buiding set would not be cheaper
2224 2239 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2225 2240 if changes:
2226 2241 tr2.hookargs[b'tag_moved'] = b'1'
2227 2242 with repo.vfs(
2228 2243 b'changes/tags.changes', b'w', atomictemp=True
2229 2244 ) as changesfile:
2230 2245 # note: we do not register the file to the transaction
2231 2246 # because we needs it to still exist on the transaction
2232 2247 # is close (for txnclose hooks)
2233 2248 tagsmod.writediff(changesfile, changes)
2234 2249
2235 2250 def validate(tr2):
2236 2251 """will run pre-closing hooks"""
2237 2252 # XXX the transaction API is a bit lacking here so we take a hacky
2238 2253 # path for now
2239 2254 #
2240 2255 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2241 2256 # dict is copied before these run. In addition we needs the data
2242 2257 # available to in memory hooks too.
2243 2258 #
2244 2259 # Moreover, we also need to make sure this runs before txnclose
2245 2260 # hooks and there is no "pending" mechanism that would execute
2246 2261 # logic only if hooks are about to run.
2247 2262 #
2248 2263 # Fixing this limitation of the transaction is also needed to track
2249 2264 # other families of changes (bookmarks, phases, obsolescence).
2250 2265 #
2251 2266 # This will have to be fixed before we remove the experimental
2252 2267 # gating.
2253 2268 tracktags(tr2)
2254 2269 repo = reporef()
2255 2270
2256 2271 singleheadopt = (b'experimental', b'single-head-per-branch')
2257 2272 singlehead = repo.ui.configbool(*singleheadopt)
2258 2273 if singlehead:
2259 2274 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2260 2275 accountclosed = singleheadsub.get(
2261 2276 b"account-closed-heads", False
2262 2277 )
2263 2278 if singleheadsub.get(b"public-changes-only", False):
2264 2279 filtername = b"immutable"
2265 2280 else:
2266 2281 filtername = b"visible"
2267 2282 scmutil.enforcesinglehead(
2268 2283 repo, tr2, desc, accountclosed, filtername
2269 2284 )
2270 2285 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2271 2286 for name, (old, new) in sorted(
2272 2287 tr.changes[b'bookmarks'].items()
2273 2288 ):
2274 2289 args = tr.hookargs.copy()
2275 2290 args.update(bookmarks.preparehookargs(name, old, new))
2276 2291 repo.hook(
2277 2292 b'pretxnclose-bookmark',
2278 2293 throw=True,
2279 2294 **pycompat.strkwargs(args)
2280 2295 )
2281 2296 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2282 2297 cl = repo.unfiltered().changelog
2283 2298 for revs, (old, new) in tr.changes[b'phases']:
2284 2299 for rev in revs:
2285 2300 args = tr.hookargs.copy()
2286 2301 node = hex(cl.node(rev))
2287 2302 args.update(phases.preparehookargs(node, old, new))
2288 2303 repo.hook(
2289 2304 b'pretxnclose-phase',
2290 2305 throw=True,
2291 2306 **pycompat.strkwargs(args)
2292 2307 )
2293 2308
2294 2309 repo.hook(
2295 2310 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2296 2311 )
2297 2312
2298 2313 def releasefn(tr, success):
2299 2314 repo = reporef()
2300 2315 if repo is None:
2301 2316 # If the repo has been GC'd (and this release function is being
2302 2317 # called from transaction.__del__), there's not much we can do,
2303 2318 # so just leave the unfinished transaction there and let the
2304 2319 # user run `hg recover`.
2305 2320 return
2306 2321 if success:
2307 2322 # this should be explicitly invoked here, because
2308 2323 # in-memory changes aren't written out at closing
2309 2324 # transaction, if tr.addfilegenerator (via
2310 2325 # dirstate.write or so) isn't invoked while
2311 2326 # transaction running
2312 2327 repo.dirstate.write(None)
2313 2328 else:
2314 2329 # discard all changes (including ones already written
2315 2330 # out) in this transaction
2316 2331 narrowspec.restorebackup(self, b'journal.narrowspec')
2317 2332 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2318 2333 repo.dirstate.restorebackup(None, b'journal.dirstate')
2319 2334
2320 2335 repo.invalidate(clearfilecache=True)
2321 2336
2322 2337 tr = transaction.transaction(
2323 2338 rp,
2324 2339 self.svfs,
2325 2340 vfsmap,
2326 2341 b"journal",
2327 2342 b"undo",
2328 2343 aftertrans(renames),
2329 2344 self.store.createmode,
2330 2345 validator=validate,
2331 2346 releasefn=releasefn,
2332 2347 checkambigfiles=_cachedfiles,
2333 2348 name=desc,
2334 2349 )
2335 2350 tr.changes[b'origrepolen'] = len(self)
2336 2351 tr.changes[b'obsmarkers'] = set()
2337 2352 tr.changes[b'phases'] = []
2338 2353 tr.changes[b'bookmarks'] = {}
2339 2354
2340 2355 tr.hookargs[b'txnid'] = txnid
2341 2356 tr.hookargs[b'txnname'] = desc
2342 2357 tr.hookargs[b'changes'] = tr.changes
2343 2358 # note: writing the fncache only during finalize mean that the file is
2344 2359 # outdated when running hooks. As fncache is used for streaming clone,
2345 2360 # this is not expected to break anything that happen during the hooks.
2346 2361 tr.addfinalize(b'flush-fncache', self.store.write)
2347 2362
2348 2363 def txnclosehook(tr2):
2349 2364 """To be run if transaction is successful, will schedule a hook run"""
2350 2365 # Don't reference tr2 in hook() so we don't hold a reference.
2351 2366 # This reduces memory consumption when there are multiple
2352 2367 # transactions per lock. This can likely go away if issue5045
2353 2368 # fixes the function accumulation.
2354 2369 hookargs = tr2.hookargs
2355 2370
2356 2371 def hookfunc(unused_success):
2357 2372 repo = reporef()
2358 2373 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2359 2374 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2360 2375 for name, (old, new) in bmchanges:
2361 2376 args = tr.hookargs.copy()
2362 2377 args.update(bookmarks.preparehookargs(name, old, new))
2363 2378 repo.hook(
2364 2379 b'txnclose-bookmark',
2365 2380 throw=False,
2366 2381 **pycompat.strkwargs(args)
2367 2382 )
2368 2383
2369 2384 if hook.hashook(repo.ui, b'txnclose-phase'):
2370 2385 cl = repo.unfiltered().changelog
2371 2386 phasemv = sorted(
2372 2387 tr.changes[b'phases'], key=lambda r: r[0][0]
2373 2388 )
2374 2389 for revs, (old, new) in phasemv:
2375 2390 for rev in revs:
2376 2391 args = tr.hookargs.copy()
2377 2392 node = hex(cl.node(rev))
2378 2393 args.update(phases.preparehookargs(node, old, new))
2379 2394 repo.hook(
2380 2395 b'txnclose-phase',
2381 2396 throw=False,
2382 2397 **pycompat.strkwargs(args)
2383 2398 )
2384 2399
2385 2400 repo.hook(
2386 2401 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2387 2402 )
2388 2403
2389 2404 reporef()._afterlock(hookfunc)
2390 2405
2391 2406 tr.addfinalize(b'txnclose-hook', txnclosehook)
2392 2407 # Include a leading "-" to make it happen before the transaction summary
2393 2408 # reports registered via scmutil.registersummarycallback() whose names
2394 2409 # are 00-txnreport etc. That way, the caches will be warm when the
2395 2410 # callbacks run.
2396 2411 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2397 2412
2398 2413 def txnaborthook(tr2):
2399 2414 """To be run if transaction is aborted"""
2400 2415 reporef().hook(
2401 2416 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2402 2417 )
2403 2418
2404 2419 tr.addabort(b'txnabort-hook', txnaborthook)
2405 2420 # avoid eager cache invalidation. in-memory data should be identical
2406 2421 # to stored data if transaction has no error.
2407 2422 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2408 2423 self._transref = weakref.ref(tr)
2409 2424 scmutil.registersummarycallback(self, tr, desc)
2410 2425 return tr
2411 2426
2412 2427 def _journalfiles(self):
2413 2428 return (
2414 2429 (self.svfs, b'journal'),
2415 2430 (self.svfs, b'journal.narrowspec'),
2416 2431 (self.vfs, b'journal.narrowspec.dirstate'),
2417 2432 (self.vfs, b'journal.dirstate'),
2418 2433 (self.vfs, b'journal.branch'),
2419 2434 (self.vfs, b'journal.desc'),
2420 2435 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2421 2436 (self.svfs, b'journal.phaseroots'),
2422 2437 )
2423 2438
2424 2439 def undofiles(self):
2425 2440 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2426 2441
2427 2442 @unfilteredmethod
2428 2443 def _writejournal(self, desc):
2429 2444 self.dirstate.savebackup(None, b'journal.dirstate')
2430 2445 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2431 2446 narrowspec.savebackup(self, b'journal.narrowspec')
2432 2447 self.vfs.write(
2433 2448 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2434 2449 )
2435 2450 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2436 2451 bookmarksvfs = bookmarks.bookmarksvfs(self)
2437 2452 bookmarksvfs.write(
2438 2453 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2439 2454 )
2440 2455 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2441 2456
2442 2457 def recover(self):
2443 2458 with self.lock():
2444 2459 if self.svfs.exists(b"journal"):
2445 2460 self.ui.status(_(b"rolling back interrupted transaction\n"))
2446 2461 vfsmap = {
2447 2462 b'': self.svfs,
2448 2463 b'plain': self.vfs,
2449 2464 }
2450 2465 transaction.rollback(
2451 2466 self.svfs,
2452 2467 vfsmap,
2453 2468 b"journal",
2454 2469 self.ui.warn,
2455 2470 checkambigfiles=_cachedfiles,
2456 2471 )
2457 2472 self.invalidate()
2458 2473 return True
2459 2474 else:
2460 2475 self.ui.warn(_(b"no interrupted transaction available\n"))
2461 2476 return False
2462 2477
2463 2478 def rollback(self, dryrun=False, force=False):
2464 2479 wlock = lock = dsguard = None
2465 2480 try:
2466 2481 wlock = self.wlock()
2467 2482 lock = self.lock()
2468 2483 if self.svfs.exists(b"undo"):
2469 2484 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2470 2485
2471 2486 return self._rollback(dryrun, force, dsguard)
2472 2487 else:
2473 2488 self.ui.warn(_(b"no rollback information available\n"))
2474 2489 return 1
2475 2490 finally:
2476 2491 release(dsguard, lock, wlock)
2477 2492
2478 2493 @unfilteredmethod # Until we get smarter cache management
2479 2494 def _rollback(self, dryrun, force, dsguard):
2480 2495 ui = self.ui
2481 2496 try:
2482 2497 args = self.vfs.read(b'undo.desc').splitlines()
2483 2498 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2484 2499 if len(args) >= 3:
2485 2500 detail = args[2]
2486 2501 oldtip = oldlen - 1
2487 2502
2488 2503 if detail and ui.verbose:
2489 2504 msg = _(
2490 2505 b'repository tip rolled back to revision %d'
2491 2506 b' (undo %s: %s)\n'
2492 2507 ) % (oldtip, desc, detail)
2493 2508 else:
2494 2509 msg = _(
2495 2510 b'repository tip rolled back to revision %d (undo %s)\n'
2496 2511 ) % (oldtip, desc)
2497 2512 except IOError:
2498 2513 msg = _(b'rolling back unknown transaction\n')
2499 2514 desc = None
2500 2515
2501 2516 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2502 2517 raise error.Abort(
2503 2518 _(
2504 2519 b'rollback of last commit while not checked out '
2505 2520 b'may lose data'
2506 2521 ),
2507 2522 hint=_(b'use -f to force'),
2508 2523 )
2509 2524
2510 2525 ui.status(msg)
2511 2526 if dryrun:
2512 2527 return 0
2513 2528
2514 2529 parents = self.dirstate.parents()
2515 2530 self.destroying()
2516 2531 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2517 2532 transaction.rollback(
2518 2533 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2519 2534 )
2520 2535 bookmarksvfs = bookmarks.bookmarksvfs(self)
2521 2536 if bookmarksvfs.exists(b'undo.bookmarks'):
2522 2537 bookmarksvfs.rename(
2523 2538 b'undo.bookmarks', b'bookmarks', checkambig=True
2524 2539 )
2525 2540 if self.svfs.exists(b'undo.phaseroots'):
2526 2541 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2527 2542 self.invalidate()
2528 2543
2529 2544 has_node = self.changelog.index.has_node
2530 2545 parentgone = any(not has_node(p) for p in parents)
2531 2546 if parentgone:
2532 2547 # prevent dirstateguard from overwriting already restored one
2533 2548 dsguard.close()
2534 2549
2535 2550 narrowspec.restorebackup(self, b'undo.narrowspec')
2536 2551 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2537 2552 self.dirstate.restorebackup(None, b'undo.dirstate')
2538 2553 try:
2539 2554 branch = self.vfs.read(b'undo.branch')
2540 2555 self.dirstate.setbranch(encoding.tolocal(branch))
2541 2556 except IOError:
2542 2557 ui.warn(
2543 2558 _(
2544 2559 b'named branch could not be reset: '
2545 2560 b'current branch is still \'%s\'\n'
2546 2561 )
2547 2562 % self.dirstate.branch()
2548 2563 )
2549 2564
2550 2565 parents = tuple([p.rev() for p in self[None].parents()])
2551 2566 if len(parents) > 1:
2552 2567 ui.status(
2553 2568 _(
2554 2569 b'working directory now based on '
2555 2570 b'revisions %d and %d\n'
2556 2571 )
2557 2572 % parents
2558 2573 )
2559 2574 else:
2560 2575 ui.status(
2561 2576 _(b'working directory now based on revision %d\n') % parents
2562 2577 )
2563 2578 mergestatemod.mergestate.clean(self)
2564 2579
2565 2580 # TODO: if we know which new heads may result from this rollback, pass
2566 2581 # them to destroy(), which will prevent the branchhead cache from being
2567 2582 # invalidated.
2568 2583 self.destroyed()
2569 2584 return 0
2570 2585
2571 2586 def _buildcacheupdater(self, newtransaction):
2572 2587 """called during transaction to build the callback updating cache
2573 2588
2574 2589 Lives on the repository to help extension who might want to augment
2575 2590 this logic. For this purpose, the created transaction is passed to the
2576 2591 method.
2577 2592 """
2578 2593 # we must avoid cyclic reference between repo and transaction.
2579 2594 reporef = weakref.ref(self)
2580 2595
2581 2596 def updater(tr):
2582 2597 repo = reporef()
2583 2598 repo.updatecaches(tr)
2584 2599
2585 2600 return updater
2586 2601
2587 2602 @unfilteredmethod
2588 2603 def updatecaches(self, tr=None, full=False):
2589 2604 """warm appropriate caches
2590 2605
2591 2606 If this function is called after a transaction closed. The transaction
2592 2607 will be available in the 'tr' argument. This can be used to selectively
2593 2608 update caches relevant to the changes in that transaction.
2594 2609
2595 2610 If 'full' is set, make sure all caches the function knows about have
2596 2611 up-to-date data. Even the ones usually loaded more lazily.
2597 2612 """
2598 2613 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2599 2614 # During strip, many caches are invalid but
2600 2615 # later call to `destroyed` will refresh them.
2601 2616 return
2602 2617
2603 2618 if tr is None or tr.changes[b'origrepolen'] < len(self):
2604 2619 # accessing the 'ser ved' branchmap should refresh all the others,
2605 2620 self.ui.debug(b'updating the branch cache\n')
2606 2621 self.filtered(b'served').branchmap()
2607 2622 self.filtered(b'served.hidden').branchmap()
2608 2623
2609 2624 if full:
2610 2625 unfi = self.unfiltered()
2611 2626
2612 2627 self.changelog.update_caches(transaction=tr)
2613 2628 self.manifestlog.update_caches(transaction=tr)
2614 2629
2615 2630 rbc = unfi.revbranchcache()
2616 2631 for r in unfi.changelog:
2617 2632 rbc.branchinfo(r)
2618 2633 rbc.write()
2619 2634
2620 2635 # ensure the working copy parents are in the manifestfulltextcache
2621 2636 for ctx in self[b'.'].parents():
2622 2637 ctx.manifest() # accessing the manifest is enough
2623 2638
2624 2639 # accessing fnode cache warms the cache
2625 2640 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2626 2641 # accessing tags warm the cache
2627 2642 self.tags()
2628 2643 self.filtered(b'served').tags()
2629 2644
2630 2645 # The `full` arg is documented as updating even the lazily-loaded
2631 2646 # caches immediately, so we're forcing a write to cause these caches
2632 2647 # to be warmed up even if they haven't explicitly been requested
2633 2648 # yet (if they've never been used by hg, they won't ever have been
2634 2649 # written, even if they're a subset of another kind of cache that
2635 2650 # *has* been used).
2636 2651 for filt in repoview.filtertable.keys():
2637 2652 filtered = self.filtered(filt)
2638 2653 filtered.branchmap().write(filtered)
2639 2654
2640 2655 def invalidatecaches(self):
2641 2656
2642 2657 if '_tagscache' in vars(self):
2643 2658 # can't use delattr on proxy
2644 2659 del self.__dict__['_tagscache']
2645 2660
2646 2661 self._branchcaches.clear()
2647 2662 self.invalidatevolatilesets()
2648 2663 self._sparsesignaturecache.clear()
2649 2664
2650 2665 def invalidatevolatilesets(self):
2651 2666 self.filteredrevcache.clear()
2652 2667 obsolete.clearobscaches(self)
2653 2668 self._quick_access_changeid_invalidate()
2654 2669
2655 2670 def invalidatedirstate(self):
2656 2671 """Invalidates the dirstate, causing the next call to dirstate
2657 2672 to check if it was modified since the last time it was read,
2658 2673 rereading it if it has.
2659 2674
2660 2675 This is different to dirstate.invalidate() that it doesn't always
2661 2676 rereads the dirstate. Use dirstate.invalidate() if you want to
2662 2677 explicitly read the dirstate again (i.e. restoring it to a previous
2663 2678 known good state)."""
2664 2679 if hasunfilteredcache(self, 'dirstate'):
2665 2680 for k in self.dirstate._filecache:
2666 2681 try:
2667 2682 delattr(self.dirstate, k)
2668 2683 except AttributeError:
2669 2684 pass
2670 2685 delattr(self.unfiltered(), 'dirstate')
2671 2686
2672 2687 def invalidate(self, clearfilecache=False):
2673 2688 """Invalidates both store and non-store parts other than dirstate
2674 2689
2675 2690 If a transaction is running, invalidation of store is omitted,
2676 2691 because discarding in-memory changes might cause inconsistency
2677 2692 (e.g. incomplete fncache causes unintentional failure, but
2678 2693 redundant one doesn't).
2679 2694 """
2680 2695 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2681 2696 for k in list(self._filecache.keys()):
2682 2697 # dirstate is invalidated separately in invalidatedirstate()
2683 2698 if k == b'dirstate':
2684 2699 continue
2685 2700 if (
2686 2701 k == b'changelog'
2687 2702 and self.currenttransaction()
2688 2703 and self.changelog._delayed
2689 2704 ):
2690 2705 # The changelog object may store unwritten revisions. We don't
2691 2706 # want to lose them.
2692 2707 # TODO: Solve the problem instead of working around it.
2693 2708 continue
2694 2709
2695 2710 if clearfilecache:
2696 2711 del self._filecache[k]
2697 2712 try:
2698 2713 delattr(unfiltered, k)
2699 2714 except AttributeError:
2700 2715 pass
2701 2716 self.invalidatecaches()
2702 2717 if not self.currenttransaction():
2703 2718 # TODO: Changing contents of store outside transaction
2704 2719 # causes inconsistency. We should make in-memory store
2705 2720 # changes detectable, and abort if changed.
2706 2721 self.store.invalidatecaches()
2707 2722
2708 2723 def invalidateall(self):
2709 2724 """Fully invalidates both store and non-store parts, causing the
2710 2725 subsequent operation to reread any outside changes."""
2711 2726 # extension should hook this to invalidate its caches
2712 2727 self.invalidate()
2713 2728 self.invalidatedirstate()
2714 2729
2715 2730 @unfilteredmethod
2716 2731 def _refreshfilecachestats(self, tr):
2717 2732 """Reload stats of cached files so that they are flagged as valid"""
2718 2733 for k, ce in self._filecache.items():
2719 2734 k = pycompat.sysstr(k)
2720 2735 if k == 'dirstate' or k not in self.__dict__:
2721 2736 continue
2722 2737 ce.refresh()
2723 2738
2724 2739 def _lock(
2725 2740 self,
2726 2741 vfs,
2727 2742 lockname,
2728 2743 wait,
2729 2744 releasefn,
2730 2745 acquirefn,
2731 2746 desc,
2732 2747 ):
2733 2748 timeout = 0
2734 2749 warntimeout = 0
2735 2750 if wait:
2736 2751 timeout = self.ui.configint(b"ui", b"timeout")
2737 2752 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2738 2753 # internal config: ui.signal-safe-lock
2739 2754 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2740 2755
2741 2756 l = lockmod.trylock(
2742 2757 self.ui,
2743 2758 vfs,
2744 2759 lockname,
2745 2760 timeout,
2746 2761 warntimeout,
2747 2762 releasefn=releasefn,
2748 2763 acquirefn=acquirefn,
2749 2764 desc=desc,
2750 2765 signalsafe=signalsafe,
2751 2766 )
2752 2767 return l
2753 2768
2754 2769 def _afterlock(self, callback):
2755 2770 """add a callback to be run when the repository is fully unlocked
2756 2771
2757 2772 The callback will be executed when the outermost lock is released
2758 2773 (with wlock being higher level than 'lock')."""
2759 2774 for ref in (self._wlockref, self._lockref):
2760 2775 l = ref and ref()
2761 2776 if l and l.held:
2762 2777 l.postrelease.append(callback)
2763 2778 break
2764 2779 else: # no lock have been found.
2765 2780 callback(True)
2766 2781
2767 2782 def lock(self, wait=True):
2768 2783 """Lock the repository store (.hg/store) and return a weak reference
2769 2784 to the lock. Use this before modifying the store (e.g. committing or
2770 2785 stripping). If you are opening a transaction, get a lock as well.)
2771 2786
2772 2787 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2773 2788 'wlock' first to avoid a dead-lock hazard."""
2774 2789 l = self._currentlock(self._lockref)
2775 2790 if l is not None:
2776 2791 l.lock()
2777 2792 return l
2778 2793
2779 2794 l = self._lock(
2780 2795 vfs=self.svfs,
2781 2796 lockname=b"lock",
2782 2797 wait=wait,
2783 2798 releasefn=None,
2784 2799 acquirefn=self.invalidate,
2785 2800 desc=_(b'repository %s') % self.origroot,
2786 2801 )
2787 2802 self._lockref = weakref.ref(l)
2788 2803 return l
2789 2804
2790 2805 def wlock(self, wait=True):
2791 2806 """Lock the non-store parts of the repository (everything under
2792 2807 .hg except .hg/store) and return a weak reference to the lock.
2793 2808
2794 2809 Use this before modifying files in .hg.
2795 2810
2796 2811 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2797 2812 'wlock' first to avoid a dead-lock hazard."""
2798 2813 l = self._wlockref and self._wlockref()
2799 2814 if l is not None and l.held:
2800 2815 l.lock()
2801 2816 return l
2802 2817
2803 2818 # We do not need to check for non-waiting lock acquisition. Such
2804 2819 # acquisition would not cause dead-lock as they would just fail.
2805 2820 if wait and (
2806 2821 self.ui.configbool(b'devel', b'all-warnings')
2807 2822 or self.ui.configbool(b'devel', b'check-locks')
2808 2823 ):
2809 2824 if self._currentlock(self._lockref) is not None:
2810 2825 self.ui.develwarn(b'"wlock" acquired after "lock"')
2811 2826
2812 2827 def unlock():
2813 2828 if self.dirstate.pendingparentchange():
2814 2829 self.dirstate.invalidate()
2815 2830 else:
2816 2831 self.dirstate.write(None)
2817 2832
2818 2833 self._filecache[b'dirstate'].refresh()
2819 2834
2820 2835 l = self._lock(
2821 2836 self.vfs,
2822 2837 b"wlock",
2823 2838 wait,
2824 2839 unlock,
2825 2840 self.invalidatedirstate,
2826 2841 _(b'working directory of %s') % self.origroot,
2827 2842 )
2828 2843 self._wlockref = weakref.ref(l)
2829 2844 return l
2830 2845
2831 2846 def _currentlock(self, lockref):
2832 2847 """Returns the lock if it's held, or None if it's not."""
2833 2848 if lockref is None:
2834 2849 return None
2835 2850 l = lockref()
2836 2851 if l is None or not l.held:
2837 2852 return None
2838 2853 return l
2839 2854
2840 2855 def currentwlock(self):
2841 2856 """Returns the wlock if it's held, or None if it's not."""
2842 2857 return self._currentlock(self._wlockref)
2843 2858
2844 2859 def checkcommitpatterns(self, wctx, match, status, fail):
2845 2860 """check for commit arguments that aren't committable"""
2846 2861 if match.isexact() or match.prefix():
2847 2862 matched = set(status.modified + status.added + status.removed)
2848 2863
2849 2864 for f in match.files():
2850 2865 f = self.dirstate.normalize(f)
2851 2866 if f == b'.' or f in matched or f in wctx.substate:
2852 2867 continue
2853 2868 if f in status.deleted:
2854 2869 fail(f, _(b'file not found!'))
2855 2870 # Is it a directory that exists or used to exist?
2856 2871 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2857 2872 d = f + b'/'
2858 2873 for mf in matched:
2859 2874 if mf.startswith(d):
2860 2875 break
2861 2876 else:
2862 2877 fail(f, _(b"no match under directory!"))
2863 2878 elif f not in self.dirstate:
2864 2879 fail(f, _(b"file not tracked!"))
2865 2880
2866 2881 @unfilteredmethod
2867 2882 def commit(
2868 2883 self,
2869 2884 text=b"",
2870 2885 user=None,
2871 2886 date=None,
2872 2887 match=None,
2873 2888 force=False,
2874 2889 editor=None,
2875 2890 extra=None,
2876 2891 ):
2877 2892 """Add a new revision to current repository.
2878 2893
2879 2894 Revision information is gathered from the working directory,
2880 2895 match can be used to filter the committed files. If editor is
2881 2896 supplied, it is called to get a commit message.
2882 2897 """
2883 2898 if extra is None:
2884 2899 extra = {}
2885 2900
2886 2901 def fail(f, msg):
2887 2902 raise error.InputError(b'%s: %s' % (f, msg))
2888 2903
2889 2904 if not match:
2890 2905 match = matchmod.always()
2891 2906
2892 2907 if not force:
2893 2908 match.bad = fail
2894 2909
2895 2910 # lock() for recent changelog (see issue4368)
2896 2911 with self.wlock(), self.lock():
2897 2912 wctx = self[None]
2898 2913 merge = len(wctx.parents()) > 1
2899 2914
2900 2915 if not force and merge and not match.always():
2901 2916 raise error.Abort(
2902 2917 _(
2903 2918 b'cannot partially commit a merge '
2904 2919 b'(do not specify files or patterns)'
2905 2920 )
2906 2921 )
2907 2922
2908 2923 status = self.status(match=match, clean=force)
2909 2924 if force:
2910 2925 status.modified.extend(
2911 2926 status.clean
2912 2927 ) # mq may commit clean files
2913 2928
2914 2929 # check subrepos
2915 2930 subs, commitsubs, newstate = subrepoutil.precommit(
2916 2931 self.ui, wctx, status, match, force=force
2917 2932 )
2918 2933
2919 2934 # make sure all explicit patterns are matched
2920 2935 if not force:
2921 2936 self.checkcommitpatterns(wctx, match, status, fail)
2922 2937
2923 2938 cctx = context.workingcommitctx(
2924 2939 self, status, text, user, date, extra
2925 2940 )
2926 2941
2927 2942 ms = mergestatemod.mergestate.read(self)
2928 2943 mergeutil.checkunresolved(ms)
2929 2944
2930 2945 # internal config: ui.allowemptycommit
2931 2946 if cctx.isempty() and not self.ui.configbool(
2932 2947 b'ui', b'allowemptycommit'
2933 2948 ):
2934 2949 self.ui.debug(b'nothing to commit, clearing merge state\n')
2935 2950 ms.reset()
2936 2951 return None
2937 2952
2938 2953 if merge and cctx.deleted():
2939 2954 raise error.Abort(_(b"cannot commit merge with missing files"))
2940 2955
2941 2956 if editor:
2942 2957 cctx._text = editor(self, cctx, subs)
2943 2958 edited = text != cctx._text
2944 2959
2945 2960 # Save commit message in case this transaction gets rolled back
2946 2961 # (e.g. by a pretxncommit hook). Leave the content alone on
2947 2962 # the assumption that the user will use the same editor again.
2948 2963 msgfn = self.savecommitmessage(cctx._text)
2949 2964
2950 2965 # commit subs and write new state
2951 2966 if subs:
2952 2967 uipathfn = scmutil.getuipathfn(self)
2953 2968 for s in sorted(commitsubs):
2954 2969 sub = wctx.sub(s)
2955 2970 self.ui.status(
2956 2971 _(b'committing subrepository %s\n')
2957 2972 % uipathfn(subrepoutil.subrelpath(sub))
2958 2973 )
2959 2974 sr = sub.commit(cctx._text, user, date)
2960 2975 newstate[s] = (newstate[s][0], sr)
2961 2976 subrepoutil.writestate(self, newstate)
2962 2977
2963 2978 p1, p2 = self.dirstate.parents()
2964 2979 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2965 2980 try:
2966 2981 self.hook(
2967 2982 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2968 2983 )
2969 2984 with self.transaction(b'commit'):
2970 2985 ret = self.commitctx(cctx, True)
2971 2986 # update bookmarks, dirstate and mergestate
2972 2987 bookmarks.update(self, [p1, p2], ret)
2973 2988 cctx.markcommitted(ret)
2974 2989 ms.reset()
2975 2990 except: # re-raises
2976 2991 if edited:
2977 2992 self.ui.write(
2978 2993 _(b'note: commit message saved in %s\n') % msgfn
2979 2994 )
2980 2995 self.ui.write(
2981 2996 _(
2982 2997 b"note: use 'hg commit --logfile "
2983 2998 b".hg/last-message.txt --edit' to reuse it\n"
2984 2999 )
2985 3000 )
2986 3001 raise
2987 3002
2988 3003 def commithook(unused_success):
2989 3004 # hack for command that use a temporary commit (eg: histedit)
2990 3005 # temporary commit got stripped before hook release
2991 3006 if self.changelog.hasnode(ret):
2992 3007 self.hook(
2993 3008 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2994 3009 )
2995 3010
2996 3011 self._afterlock(commithook)
2997 3012 return ret
2998 3013
2999 3014 @unfilteredmethod
3000 3015 def commitctx(self, ctx, error=False, origctx=None):
3001 3016 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3002 3017
3003 3018 @unfilteredmethod
3004 3019 def destroying(self):
3005 3020 """Inform the repository that nodes are about to be destroyed.
3006 3021 Intended for use by strip and rollback, so there's a common
3007 3022 place for anything that has to be done before destroying history.
3008 3023
3009 3024 This is mostly useful for saving state that is in memory and waiting
3010 3025 to be flushed when the current lock is released. Because a call to
3011 3026 destroyed is imminent, the repo will be invalidated causing those
3012 3027 changes to stay in memory (waiting for the next unlock), or vanish
3013 3028 completely.
3014 3029 """
3015 3030 # When using the same lock to commit and strip, the phasecache is left
3016 3031 # dirty after committing. Then when we strip, the repo is invalidated,
3017 3032 # causing those changes to disappear.
3018 3033 if '_phasecache' in vars(self):
3019 3034 self._phasecache.write()
3020 3035
3021 3036 @unfilteredmethod
3022 3037 def destroyed(self):
3023 3038 """Inform the repository that nodes have been destroyed.
3024 3039 Intended for use by strip and rollback, so there's a common
3025 3040 place for anything that has to be done after destroying history.
3026 3041 """
3027 3042 # When one tries to:
3028 3043 # 1) destroy nodes thus calling this method (e.g. strip)
3029 3044 # 2) use phasecache somewhere (e.g. commit)
3030 3045 #
3031 3046 # then 2) will fail because the phasecache contains nodes that were
3032 3047 # removed. We can either remove phasecache from the filecache,
3033 3048 # causing it to reload next time it is accessed, or simply filter
3034 3049 # the removed nodes now and write the updated cache.
3035 3050 self._phasecache.filterunknown(self)
3036 3051 self._phasecache.write()
3037 3052
3038 3053 # refresh all repository caches
3039 3054 self.updatecaches()
3040 3055
3041 3056 # Ensure the persistent tag cache is updated. Doing it now
3042 3057 # means that the tag cache only has to worry about destroyed
3043 3058 # heads immediately after a strip/rollback. That in turn
3044 3059 # guarantees that "cachetip == currenttip" (comparing both rev
3045 3060 # and node) always means no nodes have been added or destroyed.
3046 3061
3047 3062 # XXX this is suboptimal when qrefresh'ing: we strip the current
3048 3063 # head, refresh the tag cache, then immediately add a new head.
3049 3064 # But I think doing it this way is necessary for the "instant
3050 3065 # tag cache retrieval" case to work.
3051 3066 self.invalidate()
3052 3067
3053 3068 def status(
3054 3069 self,
3055 3070 node1=b'.',
3056 3071 node2=None,
3057 3072 match=None,
3058 3073 ignored=False,
3059 3074 clean=False,
3060 3075 unknown=False,
3061 3076 listsubrepos=False,
3062 3077 ):
3063 3078 '''a convenience method that calls node1.status(node2)'''
3064 3079 return self[node1].status(
3065 3080 node2, match, ignored, clean, unknown, listsubrepos
3066 3081 )
3067 3082
3068 3083 def addpostdsstatus(self, ps):
3069 3084 """Add a callback to run within the wlock, at the point at which status
3070 3085 fixups happen.
3071 3086
3072 3087 On status completion, callback(wctx, status) will be called with the
3073 3088 wlock held, unless the dirstate has changed from underneath or the wlock
3074 3089 couldn't be grabbed.
3075 3090
3076 3091 Callbacks should not capture and use a cached copy of the dirstate --
3077 3092 it might change in the meanwhile. Instead, they should access the
3078 3093 dirstate via wctx.repo().dirstate.
3079 3094
3080 3095 This list is emptied out after each status run -- extensions should
3081 3096 make sure it adds to this list each time dirstate.status is called.
3082 3097 Extensions should also make sure they don't call this for statuses
3083 3098 that don't involve the dirstate.
3084 3099 """
3085 3100
3086 3101 # The list is located here for uniqueness reasons -- it is actually
3087 3102 # managed by the workingctx, but that isn't unique per-repo.
3088 3103 self._postdsstatus.append(ps)
3089 3104
3090 3105 def postdsstatus(self):
3091 3106 """Used by workingctx to get the list of post-dirstate-status hooks."""
3092 3107 return self._postdsstatus
3093 3108
3094 3109 def clearpostdsstatus(self):
3095 3110 """Used by workingctx to clear post-dirstate-status hooks."""
3096 3111 del self._postdsstatus[:]
3097 3112
3098 3113 def heads(self, start=None):
3099 3114 if start is None:
3100 3115 cl = self.changelog
3101 3116 headrevs = reversed(cl.headrevs())
3102 3117 return [cl.node(rev) for rev in headrevs]
3103 3118
3104 3119 heads = self.changelog.heads(start)
3105 3120 # sort the output in rev descending order
3106 3121 return sorted(heads, key=self.changelog.rev, reverse=True)
3107 3122
3108 3123 def branchheads(self, branch=None, start=None, closed=False):
3109 3124 """return a (possibly filtered) list of heads for the given branch
3110 3125
3111 3126 Heads are returned in topological order, from newest to oldest.
3112 3127 If branch is None, use the dirstate branch.
3113 3128 If start is not None, return only heads reachable from start.
3114 3129 If closed is True, return heads that are marked as closed as well.
3115 3130 """
3116 3131 if branch is None:
3117 3132 branch = self[None].branch()
3118 3133 branches = self.branchmap()
3119 3134 if not branches.hasbranch(branch):
3120 3135 return []
3121 3136 # the cache returns heads ordered lowest to highest
3122 3137 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3123 3138 if start is not None:
3124 3139 # filter out the heads that cannot be reached from startrev
3125 3140 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3126 3141 bheads = [h for h in bheads if h in fbheads]
3127 3142 return bheads
3128 3143
3129 3144 def branches(self, nodes):
3130 3145 if not nodes:
3131 3146 nodes = [self.changelog.tip()]
3132 3147 b = []
3133 3148 for n in nodes:
3134 3149 t = n
3135 3150 while True:
3136 3151 p = self.changelog.parents(n)
3137 3152 if p[1] != nullid or p[0] == nullid:
3138 3153 b.append((t, n, p[0], p[1]))
3139 3154 break
3140 3155 n = p[0]
3141 3156 return b
3142 3157
3143 3158 def between(self, pairs):
3144 3159 r = []
3145 3160
3146 3161 for top, bottom in pairs:
3147 3162 n, l, i = top, [], 0
3148 3163 f = 1
3149 3164
3150 3165 while n != bottom and n != nullid:
3151 3166 p = self.changelog.parents(n)[0]
3152 3167 if i == f:
3153 3168 l.append(n)
3154 3169 f = f * 2
3155 3170 n = p
3156 3171 i += 1
3157 3172
3158 3173 r.append(l)
3159 3174
3160 3175 return r
3161 3176
3162 3177 def checkpush(self, pushop):
3163 3178 """Extensions can override this function if additional checks have
3164 3179 to be performed before pushing, or call it if they override push
3165 3180 command.
3166 3181 """
3167 3182
3168 3183 @unfilteredpropertycache
3169 3184 def prepushoutgoinghooks(self):
3170 3185 """Return util.hooks consists of a pushop with repo, remote, outgoing
3171 3186 methods, which are called before pushing changesets.
3172 3187 """
3173 3188 return util.hooks()
3174 3189
3175 3190 def pushkey(self, namespace, key, old, new):
3176 3191 try:
3177 3192 tr = self.currenttransaction()
3178 3193 hookargs = {}
3179 3194 if tr is not None:
3180 3195 hookargs.update(tr.hookargs)
3181 3196 hookargs = pycompat.strkwargs(hookargs)
3182 3197 hookargs['namespace'] = namespace
3183 3198 hookargs['key'] = key
3184 3199 hookargs['old'] = old
3185 3200 hookargs['new'] = new
3186 3201 self.hook(b'prepushkey', throw=True, **hookargs)
3187 3202 except error.HookAbort as exc:
3188 3203 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3189 3204 if exc.hint:
3190 3205 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3191 3206 return False
3192 3207 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3193 3208 ret = pushkey.push(self, namespace, key, old, new)
3194 3209
3195 3210 def runhook(unused_success):
3196 3211 self.hook(
3197 3212 b'pushkey',
3198 3213 namespace=namespace,
3199 3214 key=key,
3200 3215 old=old,
3201 3216 new=new,
3202 3217 ret=ret,
3203 3218 )
3204 3219
3205 3220 self._afterlock(runhook)
3206 3221 return ret
3207 3222
3208 3223 def listkeys(self, namespace):
3209 3224 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3210 3225 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3211 3226 values = pushkey.list(self, namespace)
3212 3227 self.hook(b'listkeys', namespace=namespace, values=values)
3213 3228 return values
3214 3229
3215 3230 def debugwireargs(self, one, two, three=None, four=None, five=None):
3216 3231 '''used to test argument passing over the wire'''
3217 3232 return b"%s %s %s %s %s" % (
3218 3233 one,
3219 3234 two,
3220 3235 pycompat.bytestr(three),
3221 3236 pycompat.bytestr(four),
3222 3237 pycompat.bytestr(five),
3223 3238 )
3224 3239
3225 3240 def savecommitmessage(self, text):
3226 3241 fp = self.vfs(b'last-message.txt', b'wb')
3227 3242 try:
3228 3243 fp.write(text)
3229 3244 finally:
3230 3245 fp.close()
3231 3246 return self.pathto(fp.name[len(self.root) + 1 :])
3232 3247
3233 3248
3234 3249 # used to avoid circular references so destructors work
3235 3250 def aftertrans(files):
3236 3251 renamefiles = [tuple(t) for t in files]
3237 3252
3238 3253 def a():
3239 3254 for vfs, src, dest in renamefiles:
3240 3255 # if src and dest refer to a same file, vfs.rename is a no-op,
3241 3256 # leaving both src and dest on disk. delete dest to make sure
3242 3257 # the rename couldn't be such a no-op.
3243 3258 vfs.tryunlink(dest)
3244 3259 try:
3245 3260 vfs.rename(src, dest)
3246 3261 except OSError: # journal file does not yet exist
3247 3262 pass
3248 3263
3249 3264 return a
3250 3265
3251 3266
3252 3267 def undoname(fn):
3253 3268 base, name = os.path.split(fn)
3254 3269 assert name.startswith(b'journal')
3255 3270 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3256 3271
3257 3272
3258 3273 def instance(ui, path, create, intents=None, createopts=None):
3259 3274 localpath = util.urllocalpath(path)
3260 3275 if create:
3261 3276 createrepository(ui, localpath, createopts=createopts)
3262 3277
3263 3278 return makelocalrepository(ui, localpath, intents=intents)
3264 3279
3265 3280
3266 3281 def islocal(path):
3267 3282 return True
3268 3283
3269 3284
3270 3285 def defaultcreateopts(ui, createopts=None):
3271 3286 """Populate the default creation options for a repository.
3272 3287
3273 3288 A dictionary of explicitly requested creation options can be passed
3274 3289 in. Missing keys will be populated.
3275 3290 """
3276 3291 createopts = dict(createopts or {})
3277 3292
3278 3293 if b'backend' not in createopts:
3279 3294 # experimental config: storage.new-repo-backend
3280 3295 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3281 3296
3282 3297 return createopts
3283 3298
3284 3299
3285 3300 def newreporequirements(ui, createopts):
3286 3301 """Determine the set of requirements for a new local repository.
3287 3302
3288 3303 Extensions can wrap this function to specify custom requirements for
3289 3304 new repositories.
3290 3305 """
3291 3306 # If the repo is being created from a shared repository, we copy
3292 3307 # its requirements.
3293 3308 if b'sharedrepo' in createopts:
3294 3309 requirements = set(createopts[b'sharedrepo'].requirements)
3295 3310 if createopts.get(b'sharedrelative'):
3296 3311 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3297 3312 else:
3298 3313 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3299 3314
3300 3315 return requirements
3301 3316
3302 3317 if b'backend' not in createopts:
3303 3318 raise error.ProgrammingError(
3304 3319 b'backend key not present in createopts; '
3305 3320 b'was defaultcreateopts() called?'
3306 3321 )
3307 3322
3308 3323 if createopts[b'backend'] != b'revlogv1':
3309 3324 raise error.Abort(
3310 3325 _(
3311 3326 b'unable to determine repository requirements for '
3312 3327 b'storage backend: %s'
3313 3328 )
3314 3329 % createopts[b'backend']
3315 3330 )
3316 3331
3317 3332 requirements = {b'revlogv1'}
3318 3333 if ui.configbool(b'format', b'usestore'):
3319 3334 requirements.add(b'store')
3320 3335 if ui.configbool(b'format', b'usefncache'):
3321 3336 requirements.add(b'fncache')
3322 3337 if ui.configbool(b'format', b'dotencode'):
3323 3338 requirements.add(b'dotencode')
3324 3339
3325 3340 compengines = ui.configlist(b'format', b'revlog-compression')
3326 3341 for compengine in compengines:
3327 3342 if compengine in util.compengines:
3328 3343 break
3329 3344 else:
3330 3345 raise error.Abort(
3331 3346 _(
3332 3347 b'compression engines %s defined by '
3333 3348 b'format.revlog-compression not available'
3334 3349 )
3335 3350 % b', '.join(b'"%s"' % e for e in compengines),
3336 3351 hint=_(
3337 3352 b'run "hg debuginstall" to list available '
3338 3353 b'compression engines'
3339 3354 ),
3340 3355 )
3341 3356
3342 3357 # zlib is the historical default and doesn't need an explicit requirement.
3343 3358 if compengine == b'zstd':
3344 3359 requirements.add(b'revlog-compression-zstd')
3345 3360 elif compengine != b'zlib':
3346 3361 requirements.add(b'exp-compression-%s' % compengine)
3347 3362
3348 3363 if scmutil.gdinitconfig(ui):
3349 3364 requirements.add(b'generaldelta')
3350 3365 if ui.configbool(b'format', b'sparse-revlog'):
3351 3366 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3352 3367
3353 3368 # experimental config: format.exp-use-side-data
3354 3369 if ui.configbool(b'format', b'exp-use-side-data'):
3355 3370 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3356 3371 # experimental config: format.exp-use-copies-side-data-changeset
3357 3372 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3358 3373 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3359 3374 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3360 3375 if ui.configbool(b'experimental', b'treemanifest'):
3361 3376 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3362 3377
3363 3378 revlogv2 = ui.config(b'experimental', b'revlogv2')
3364 3379 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3365 3380 requirements.remove(b'revlogv1')
3366 3381 # generaldelta is implied by revlogv2.
3367 3382 requirements.discard(b'generaldelta')
3368 3383 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3369 3384 # experimental config: format.internal-phase
3370 3385 if ui.configbool(b'format', b'internal-phase'):
3371 3386 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3372 3387
3373 3388 if createopts.get(b'narrowfiles'):
3374 3389 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3375 3390
3376 3391 if createopts.get(b'lfs'):
3377 3392 requirements.add(b'lfs')
3378 3393
3379 3394 if ui.configbool(b'format', b'bookmarks-in-store'):
3380 3395 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3381 3396
3382 3397 if ui.configbool(b'format', b'use-persistent-nodemap'):
3383 3398 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3384 3399
3385 3400 # if share-safe is enabled, let's create the new repository with the new
3386 3401 # requirement
3387 3402 if ui.configbool(b'format', b'exp-share-safe'):
3388 3403 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3389 3404
3390 3405 return requirements
3391 3406
3392 3407
3393 3408 def checkrequirementscompat(ui, requirements):
3394 3409 """Checks compatibility of repository requirements enabled and disabled.
3395 3410
3396 3411 Returns a set of requirements which needs to be dropped because dependend
3397 3412 requirements are not enabled. Also warns users about it"""
3398 3413
3399 3414 dropped = set()
3400 3415
3401 3416 if b'store' not in requirements:
3402 3417 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3403 3418 ui.warn(
3404 3419 _(
3405 3420 b'ignoring enabled \'format.bookmarks-in-store\' config '
3406 3421 b'beacuse it is incompatible with disabled '
3407 3422 b'\'format.usestore\' config\n'
3408 3423 )
3409 3424 )
3410 3425 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3411 3426
3412 3427 if (
3413 3428 requirementsmod.SHARED_REQUIREMENT in requirements
3414 3429 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3415 3430 ):
3416 3431 raise error.Abort(
3417 3432 _(
3418 3433 b"cannot create shared repository as source was created"
3419 3434 b" with 'format.usestore' config disabled"
3420 3435 )
3421 3436 )
3422 3437
3423 3438 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3424 3439 ui.warn(
3425 3440 _(
3426 3441 b"ignoring enabled 'format.exp-share-safe' config because "
3427 3442 b"it is incompatible with disabled 'format.usestore'"
3428 3443 b" config\n"
3429 3444 )
3430 3445 )
3431 3446 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3432 3447
3433 3448 return dropped
3434 3449
3435 3450
3436 3451 def filterknowncreateopts(ui, createopts):
3437 3452 """Filters a dict of repo creation options against options that are known.
3438 3453
3439 3454 Receives a dict of repo creation options and returns a dict of those
3440 3455 options that we don't know how to handle.
3441 3456
3442 3457 This function is called as part of repository creation. If the
3443 3458 returned dict contains any items, repository creation will not
3444 3459 be allowed, as it means there was a request to create a repository
3445 3460 with options not recognized by loaded code.
3446 3461
3447 3462 Extensions can wrap this function to filter out creation options
3448 3463 they know how to handle.
3449 3464 """
3450 3465 known = {
3451 3466 b'backend',
3452 3467 b'lfs',
3453 3468 b'narrowfiles',
3454 3469 b'sharedrepo',
3455 3470 b'sharedrelative',
3456 3471 b'shareditems',
3457 3472 b'shallowfilestore',
3458 3473 }
3459 3474
3460 3475 return {k: v for k, v in createopts.items() if k not in known}
3461 3476
3462 3477
3463 3478 def createrepository(ui, path, createopts=None):
3464 3479 """Create a new repository in a vfs.
3465 3480
3466 3481 ``path`` path to the new repo's working directory.
3467 3482 ``createopts`` options for the new repository.
3468 3483
3469 3484 The following keys for ``createopts`` are recognized:
3470 3485
3471 3486 backend
3472 3487 The storage backend to use.
3473 3488 lfs
3474 3489 Repository will be created with ``lfs`` requirement. The lfs extension
3475 3490 will automatically be loaded when the repository is accessed.
3476 3491 narrowfiles
3477 3492 Set up repository to support narrow file storage.
3478 3493 sharedrepo
3479 3494 Repository object from which storage should be shared.
3480 3495 sharedrelative
3481 3496 Boolean indicating if the path to the shared repo should be
3482 3497 stored as relative. By default, the pointer to the "parent" repo
3483 3498 is stored as an absolute path.
3484 3499 shareditems
3485 3500 Set of items to share to the new repository (in addition to storage).
3486 3501 shallowfilestore
3487 3502 Indicates that storage for files should be shallow (not all ancestor
3488 3503 revisions are known).
3489 3504 """
3490 3505 createopts = defaultcreateopts(ui, createopts=createopts)
3491 3506
3492 3507 unknownopts = filterknowncreateopts(ui, createopts)
3493 3508
3494 3509 if not isinstance(unknownopts, dict):
3495 3510 raise error.ProgrammingError(
3496 3511 b'filterknowncreateopts() did not return a dict'
3497 3512 )
3498 3513
3499 3514 if unknownopts:
3500 3515 raise error.Abort(
3501 3516 _(
3502 3517 b'unable to create repository because of unknown '
3503 3518 b'creation option: %s'
3504 3519 )
3505 3520 % b', '.join(sorted(unknownopts)),
3506 3521 hint=_(b'is a required extension not loaded?'),
3507 3522 )
3508 3523
3509 3524 requirements = newreporequirements(ui, createopts=createopts)
3510 3525 requirements -= checkrequirementscompat(ui, requirements)
3511 3526
3512 3527 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3513 3528
3514 3529 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3515 3530 if hgvfs.exists():
3516 3531 raise error.RepoError(_(b'repository %s already exists') % path)
3517 3532
3518 3533 if b'sharedrepo' in createopts:
3519 3534 sharedpath = createopts[b'sharedrepo'].sharedpath
3520 3535
3521 3536 if createopts.get(b'sharedrelative'):
3522 3537 try:
3523 3538 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3524 3539 except (IOError, ValueError) as e:
3525 3540 # ValueError is raised on Windows if the drive letters differ
3526 3541 # on each path.
3527 3542 raise error.Abort(
3528 3543 _(b'cannot calculate relative path'),
3529 3544 hint=stringutil.forcebytestr(e),
3530 3545 )
3531 3546
3532 3547 if not wdirvfs.exists():
3533 3548 wdirvfs.makedirs()
3534 3549
3535 3550 hgvfs.makedir(notindexed=True)
3536 3551 if b'sharedrepo' not in createopts:
3537 3552 hgvfs.mkdir(b'cache')
3538 3553 hgvfs.mkdir(b'wcache')
3539 3554
3540 3555 if b'store' in requirements and b'sharedrepo' not in createopts:
3541 3556 hgvfs.mkdir(b'store')
3542 3557
3543 3558 # We create an invalid changelog outside the store so very old
3544 3559 # Mercurial versions (which didn't know about the requirements
3545 3560 # file) encounter an error on reading the changelog. This
3546 3561 # effectively locks out old clients and prevents them from
3547 3562 # mucking with a repo in an unknown format.
3548 3563 #
3549 3564 # The revlog header has version 2, which won't be recognized by
3550 3565 # such old clients.
3551 3566 hgvfs.append(
3552 3567 b'00changelog.i',
3553 3568 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3554 3569 b'layout',
3555 3570 )
3556 3571
3557 3572 # Filter the requirements into working copy and store ones
3558 3573 wcreq, storereq = scmutil.filterrequirements(requirements)
3559 3574 # write working copy ones
3560 3575 scmutil.writerequires(hgvfs, wcreq)
3561 3576 # If there are store requirements and the current repository
3562 3577 # is not a shared one, write stored requirements
3563 3578 # For new shared repository, we don't need to write the store
3564 3579 # requirements as they are already present in store requires
3565 3580 if storereq and b'sharedrepo' not in createopts:
3566 3581 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3567 3582 scmutil.writerequires(storevfs, storereq)
3568 3583
3569 3584 # Write out file telling readers where to find the shared store.
3570 3585 if b'sharedrepo' in createopts:
3571 3586 hgvfs.write(b'sharedpath', sharedpath)
3572 3587
3573 3588 if createopts.get(b'shareditems'):
3574 3589 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3575 3590 hgvfs.write(b'shared', shared)
3576 3591
3577 3592
3578 3593 def poisonrepository(repo):
3579 3594 """Poison a repository instance so it can no longer be used."""
3580 3595 # Perform any cleanup on the instance.
3581 3596 repo.close()
3582 3597
3583 3598 # Our strategy is to replace the type of the object with one that
3584 3599 # has all attribute lookups result in error.
3585 3600 #
3586 3601 # But we have to allow the close() method because some constructors
3587 3602 # of repos call close() on repo references.
3588 3603 class poisonedrepository(object):
3589 3604 def __getattribute__(self, item):
3590 3605 if item == 'close':
3591 3606 return object.__getattribute__(self, item)
3592 3607
3593 3608 raise error.ProgrammingError(
3594 3609 b'repo instances should not be used after unshare'
3595 3610 )
3596 3611
3597 3612 def close(self):
3598 3613 pass
3599 3614
3600 3615 # We may have a repoview, which intercepts __setattr__. So be sure
3601 3616 # we operate at the lowest level possible.
3602 3617 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,266 +1,297 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 hg,
14 14 localrepo,
15 15 lock as lockmod,
16 16 pycompat,
17 17 requirements as requirementsmod,
18 18 scmutil,
19 19 )
20 20
21 21 from .upgrade_utils import (
22 22 actions as upgrade_actions,
23 23 engine as upgrade_engine,
24 24 )
25 25
26 26 from .utils import (
27 27 stringutil,
28 28 )
29 29
30 30 allformatvariant = upgrade_actions.allformatvariant
31 31
32 32
33 33 def upgraderepo(
34 34 ui,
35 35 repo,
36 36 run=False,
37 37 optimize=None,
38 38 backup=True,
39 39 manifest=None,
40 40 changelog=None,
41 41 filelogs=None,
42 42 ):
43 43 """Upgrade a repository in place."""
44 44 if optimize is None:
45 45 optimize = {}
46 46 repo = repo.unfiltered()
47 47
48 48 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
49 49 specentries = (
50 50 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
51 51 (upgrade_engine.UPGRADE_MANIFEST, manifest),
52 52 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
53 53 )
54 54 specified = [(y, x) for (y, x) in specentries if x is not None]
55 55 if specified:
56 56 # we have some limitation on revlogs to be recloned
57 57 if any(x for y, x in specified):
58 58 revlogs = set()
59 59 for upgrade, enabled in specified:
60 60 if enabled:
61 61 revlogs.add(upgrade)
62 62 else:
63 63 # none are enabled
64 64 for upgrade, __ in specified:
65 65 revlogs.discard(upgrade)
66 66
67 67 # Ensure the repository can be upgraded.
68 68 upgrade_actions.check_source_requirements(repo)
69 69
70 70 default_options = localrepo.defaultcreateopts(repo.ui)
71 71 newreqs = localrepo.newreporequirements(repo.ui, default_options)
72 72 newreqs.update(upgrade_actions.preservedrequirements(repo))
73 73
74 74 upgrade_actions.check_requirements_changes(repo, newreqs)
75 75
76 76 # Find and validate all improvements that can be made.
77 77 alloptimizations = upgrade_actions.findoptimizations(repo)
78 78
79 79 # Apply and Validate arguments.
80 80 optimizations = []
81 81 for o in alloptimizations:
82 82 if o.name in optimize:
83 83 optimizations.append(o)
84 84 optimize.discard(o.name)
85 85
86 86 if optimize: # anything left is unknown
87 87 raise error.Abort(
88 88 _(b'unknown optimization action requested: %s')
89 89 % b', '.join(sorted(optimize)),
90 90 hint=_(b'run without arguments to see valid optimizations'),
91 91 )
92 92
93 93 format_upgrades = upgrade_actions.find_format_upgrades(repo)
94 94 up_actions = upgrade_actions.determine_upgrade_actions(
95 95 repo, format_upgrades, optimizations, repo.requirements, newreqs
96 96 )
97 97 removed_actions = upgrade_actions.find_format_downgrades(repo)
98 98
99 99 removedreqs = repo.requirements - newreqs
100 100 addedreqs = newreqs - repo.requirements
101 101
102 102 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
103 103 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
104 104 removedreqs | addedreqs
105 105 )
106 106 if incompatible:
107 107 msg = _(
108 108 b'ignoring revlogs selection flags, format requirements '
109 109 b'change: %s\n'
110 110 )
111 111 ui.warn(msg % b', '.join(sorted(incompatible)))
112 112 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
113 113
114 114 upgrade_op = upgrade_actions.UpgradeOperation(
115 115 ui,
116 116 newreqs,
117 117 repo.requirements,
118 118 up_actions,
119 119 removed_actions,
120 120 revlogs,
121 121 )
122 122
123 123 if not run:
124 124 fromconfig = []
125 125 onlydefault = []
126 126
127 127 for d in format_upgrades:
128 128 if d.fromconfig(repo):
129 129 fromconfig.append(d)
130 130 elif d.default:
131 131 onlydefault.append(d)
132 132
133 133 if fromconfig or onlydefault:
134 134
135 135 if fromconfig:
136 136 ui.status(
137 137 _(
138 138 b'repository lacks features recommended by '
139 139 b'current config options:\n\n'
140 140 )
141 141 )
142 142 for i in fromconfig:
143 143 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
144 144
145 145 if onlydefault:
146 146 ui.status(
147 147 _(
148 148 b'repository lacks features used by the default '
149 149 b'config options:\n\n'
150 150 )
151 151 )
152 152 for i in onlydefault:
153 153 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
154 154
155 155 ui.status(b'\n')
156 156 else:
157 157 ui.status(_(b'(no format upgrades found in existing repository)\n'))
158 158
159 159 ui.status(
160 160 _(
161 161 b'performing an upgrade with "--run" will make the following '
162 162 b'changes:\n\n'
163 163 )
164 164 )
165 165
166 166 upgrade_op.print_requirements()
167 167 upgrade_op.print_optimisations()
168 168 upgrade_op.print_upgrade_actions()
169 169 upgrade_op.print_affected_revlogs()
170 170
171 171 if upgrade_op.unused_optimizations:
172 172 ui.status(
173 173 _(
174 174 b'additional optimizations are available by specifying '
175 175 b'"--optimize <name>":\n\n'
176 176 )
177 177 )
178 178 upgrade_op.print_unused_optimizations()
179 179 return
180 180
181 181 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
182 182 ui.status(_(b'nothing to do\n'))
183 183 return
184 184 # Else we're in the run=true case.
185 185 ui.write(_(b'upgrade will perform the following actions:\n\n'))
186 186 upgrade_op.print_requirements()
187 187 upgrade_op.print_optimisations()
188 188 upgrade_op.print_upgrade_actions()
189 189 upgrade_op.print_affected_revlogs()
190 190
191 191 ui.status(_(b'beginning upgrade...\n'))
192 192 with repo.wlock(), repo.lock():
193 193 ui.status(_(b'repository locked and read-only\n'))
194 194 # Our strategy for upgrading the repository is to create a new,
195 195 # temporary repository, write data to it, then do a swap of the
196 196 # data. There are less heavyweight ways to do this, but it is easier
197 197 # to create a new repo object than to instantiate all the components
198 198 # (like the store) separately.
199 199 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
200 200 backuppath = None
201 201 try:
202 202 ui.status(
203 203 _(
204 204 b'creating temporary repository to stage upgraded '
205 205 b'data: %s\n'
206 206 )
207 207 % tmppath
208 208 )
209 209
210 210 # clone ui without using ui.copy because repo.ui is protected
211 211 repoui = repo.ui.__class__(repo.ui)
212 212 dstrepo = hg.repository(repoui, path=tmppath, create=True)
213 213
214 214 with dstrepo.wlock(), dstrepo.lock():
215 215 backuppath = upgrade_engine.upgrade(
216 216 ui, repo, dstrepo, upgrade_op
217 217 )
218 218 if not backup:
219 219 ui.status(
220 220 _(b'removing old repository content %s\n') % backuppath
221 221 )
222 222 repo.vfs.rmtree(backuppath, forcibly=True)
223 223 backuppath = None
224 224
225 225 finally:
226 226 ui.status(_(b'removing temporary repository %s\n') % tmppath)
227 227 repo.vfs.rmtree(tmppath, forcibly=True)
228 228
229 229 if backuppath and not ui.quiet:
230 230 ui.warn(
231 231 _(b'copy of old repository backed up at %s\n') % backuppath
232 232 )
233 233 ui.warn(
234 234 _(
235 235 b'the old repository will not be deleted; remove '
236 236 b'it to free up disk space once the upgraded '
237 237 b'repository is verified\n'
238 238 )
239 239 )
240 240
241 241 upgrade_op.print_post_op_messages()
242 242
243 243
244 244 def upgrade_share_to_safe(ui, hgvfs, storevfs, current_requirements):
245 245 """Upgrades a share to use share-safe mechanism"""
246 246 wlock = None
247 247 try:
248 248 wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
249 249 store_requirements = localrepo._readrequires(storevfs, False)
250 250 # after upgrade, store requires will be shared, so lets find
251 251 # the requirements which are not present in store and
252 252 # write them to share's .hg/requires
253 253 diffrequires = current_requirements - store_requirements
254 254 # add share-safe requirement as it will mark the share as share-safe
255 255 diffrequires.add(requirementsmod.SHARESAFE_REQUIREMENT)
256 256 scmutil.writerequires(hgvfs, diffrequires)
257 257 current_requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
258 258 ui.warn(_(b'repository upgraded to use share-safe mode\n'))
259 259 except error.LockError as e:
260 260 ui.warn(
261 261 _(b'failed to upgrade share, got error: %s\n')
262 262 % stringutil.forcebytestr(e.strerror)
263 263 )
264 264 finally:
265 265 if wlock:
266 266 wlock.release()
267
268
269 def downgrade_share_to_non_safe(
270 ui,
271 hgvfs,
272 sharedvfs,
273 current_requirements,
274 ):
275 """Downgrades a share which use share-safe to not use it"""
276 wlock = None
277 try:
278 wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
279 source_requirements = localrepo._readrequires(sharedvfs, True)
280 # we cannot be 100% sure on which requirements were present in store when
281 # the source supported share-safe. However, we do know that working
282 # directory requirements were not there. Hence we remove them
283 source_requirements -= requirementsmod.WORKING_DIR_REQUIREMENTS
284 current_requirements |= source_requirements
285 current_requirements.remove(requirementsmod.SHARESAFE_REQUIREMENT)
286 scmutil.writerequires(hgvfs, current_requirements)
287 ui.warn(_(b'repository downgraded to not use share-safe mode\n'))
288 except error.LockError as e:
289 # raise error right away because if downgrade failed, we cannot load
290 # the repository because it does not have complete set of requirements
291 raise error.Abort(
292 _(b'failed to downgrade share, got error: %s')
293 % stringutil.forcebytestr(e.strerror)
294 )
295 finally:
296 if wlock:
297 wlock.release()
@@ -1,541 +1,562 b''
1 1 setup
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > share =
6 6 > [format]
7 7 > exp-share-safe = True
8 8 > EOF
9 9
10 10 prepare source repo
11 11
12 12 $ hg init source
13 13 $ cd source
14 14 $ cat .hg/requires
15 15 exp-sharesafe
16 16 $ cat .hg/store/requires
17 17 dotencode
18 18 fncache
19 19 generaldelta
20 20 revlogv1
21 21 sparserevlog
22 22 store
23 23 $ hg debugrequirements
24 24 dotencode
25 25 exp-sharesafe
26 26 fncache
27 27 generaldelta
28 28 revlogv1
29 29 sparserevlog
30 30 store
31 31
32 32 $ echo a > a
33 33 $ hg ci -Aqm "added a"
34 34 $ echo b > b
35 35 $ hg ci -Aqm "added b"
36 36
37 37 $ HGEDITOR=cat hg config --shared
38 38 abort: repository is not shared; can't use --shared
39 39 [10]
40 40 $ cd ..
41 41
42 42 Create a shared repo and check the requirements are shared and read correctly
43 43 $ hg share source shared1
44 44 updating working directory
45 45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 46 $ cd shared1
47 47 $ cat .hg/requires
48 48 exp-sharesafe
49 49 shared
50 50
51 51 $ hg debugrequirements -R ../source
52 52 dotencode
53 53 exp-sharesafe
54 54 fncache
55 55 generaldelta
56 56 revlogv1
57 57 sparserevlog
58 58 store
59 59
60 60 $ hg debugrequirements
61 61 dotencode
62 62 exp-sharesafe
63 63 fncache
64 64 generaldelta
65 65 revlogv1
66 66 shared
67 67 sparserevlog
68 68 store
69 69
70 70 $ echo c > c
71 71 $ hg ci -Aqm "added c"
72 72
73 73 Check that config of the source repository is also loaded
74 74
75 75 $ hg showconfig ui.curses
76 76 [1]
77 77
78 78 $ echo "[ui]" >> ../source/.hg/hgrc
79 79 $ echo "curses=true" >> ../source/.hg/hgrc
80 80
81 81 $ hg showconfig ui.curses
82 82 true
83 83
84 84 Test that extensions of source repository are also loaded
85 85
86 86 $ hg debugextensions
87 87 share
88 88 $ hg extdiff -p echo
89 89 hg: unknown command 'extdiff'
90 90 'extdiff' is provided by the following extension:
91 91
92 92 extdiff command to allow external programs to compare revisions
93 93
94 94 (use 'hg help extensions' for information on enabling extensions)
95 95 [255]
96 96
97 97 $ echo "[extensions]" >> ../source/.hg/hgrc
98 98 $ echo "extdiff=" >> ../source/.hg/hgrc
99 99
100 100 $ hg debugextensions -R ../source
101 101 extdiff
102 102 share
103 103 $ hg extdiff -R ../source -p echo
104 104
105 105 BROKEN: the command below will not work if config of shared source is not loaded
106 106 on dispatch but debugextensions says that extension
107 107 is loaded
108 108 $ hg debugextensions
109 109 extdiff
110 110 share
111 111
112 112 $ hg extdiff -p echo
113 113
114 114 However, local .hg/hgrc should override the config set by share source
115 115
116 116 $ echo "[ui]" >> .hg/hgrc
117 117 $ echo "curses=false" >> .hg/hgrc
118 118
119 119 $ hg showconfig ui.curses
120 120 false
121 121
122 122 $ HGEDITOR=cat hg config --shared
123 123 [ui]
124 124 curses=true
125 125 [extensions]
126 126 extdiff=
127 127
128 128 $ HGEDITOR=cat hg config --local
129 129 [ui]
130 130 curses=false
131 131
132 132 Testing that hooks set in source repository also runs in shared repo
133 133
134 134 $ cd ../source
135 135 $ cat <<EOF >> .hg/hgrc
136 136 > [extensions]
137 137 > hooklib=
138 138 > [hooks]
139 139 > pretxnchangegroup.reject_merge_commits = \
140 140 > python:hgext.hooklib.reject_merge_commits.hook
141 141 > EOF
142 142
143 143 $ cd ..
144 144 $ hg clone source cloned
145 145 updating to branch default
146 146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 147 $ cd cloned
148 148 $ hg up 0
149 149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 150 $ echo bar > bar
151 151 $ hg ci -Aqm "added bar"
152 152 $ hg merge
153 153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 154 (branch merge, don't forget to commit)
155 155 $ hg ci -m "merge commit"
156 156
157 157 $ hg push ../source
158 158 pushing to ../source
159 159 searching for changes
160 160 adding changesets
161 161 adding manifests
162 162 adding file changes
163 163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 164 transaction abort!
165 165 rollback completed
166 166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 167 [255]
168 168
169 169 $ hg push ../shared1
170 170 pushing to ../shared1
171 171 searching for changes
172 172 adding changesets
173 173 adding manifests
174 174 adding file changes
175 175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 176 transaction abort!
177 177 rollback completed
178 178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 179 [255]
180 180
181 181 Test that if share source config is untrusted, we dont read it
182 182
183 183 $ cd ../shared1
184 184
185 185 $ cat << EOF > $TESTTMP/untrusted.py
186 186 > from mercurial import scmutil, util
187 187 > def uisetup(ui):
188 188 > class untrustedui(ui.__class__):
189 189 > def _trusted(self, fp, f):
190 190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 191 > return False
192 192 > return super(untrustedui, self)._trusted(fp, f)
193 193 > ui.__class__ = untrustedui
194 194 > EOF
195 195
196 196 $ hg showconfig hooks
197 197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198 198
199 199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 200 [1]
201 201
202 202 Update the source repository format and check that shared repo works
203 203
204 204 $ cd ../source
205 205
206 206 Disable zstd related tests because its not present on pure version
207 207 #if zstd
208 208 $ echo "[format]" >> .hg/hgrc
209 209 $ echo "revlog-compression=zstd" >> .hg/hgrc
210 210
211 211 $ hg debugupgraderepo --run -q
212 212 upgrade will perform the following actions:
213 213
214 214 requirements
215 215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 216 added: revlog-compression-zstd
217 217
218 218 processed revlogs:
219 219 - all-filelogs
220 220 - changelog
221 221 - manifest
222 222
223 223 $ hg log -r .
224 224 changeset: 1:5f6d8a4bf34a
225 225 user: test
226 226 date: Thu Jan 01 00:00:00 1970 +0000
227 227 summary: added b
228 228
229 229 #endif
230 230 $ echo "[format]" >> .hg/hgrc
231 231 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
232 232
233 233 $ hg debugupgraderepo --run -q -R ../shared1
234 234 abort: cannot upgrade repository; unsupported source requirement: shared
235 235 [255]
236 236
237 237 $ hg debugupgraderepo --run -q
238 238 upgrade will perform the following actions:
239 239
240 240 requirements
241 241 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
242 242 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
243 243 added: persistent-nodemap
244 244
245 245 processed revlogs:
246 246 - all-filelogs
247 247 - changelog
248 248 - manifest
249 249
250 250 $ hg log -r .
251 251 changeset: 1:5f6d8a4bf34a
252 252 user: test
253 253 date: Thu Jan 01 00:00:00 1970 +0000
254 254 summary: added b
255 255
256 256
257 257 Shared one should work
258 258 $ cd ../shared1
259 259 $ hg log -r .
260 260 changeset: 2:155349b645be
261 261 tag: tip
262 262 user: test
263 263 date: Thu Jan 01 00:00:00 1970 +0000
264 264 summary: added c
265 265
266 266
267 267 Testing that nonsharedrc is loaded for source and not shared
268 268
269 269 $ cd ../source
270 270 $ touch .hg/hgrc-not-shared
271 271 $ echo "[ui]" >> .hg/hgrc-not-shared
272 272 $ echo "traceback=true" >> .hg/hgrc-not-shared
273 273
274 274 $ hg showconfig ui.traceback
275 275 true
276 276
277 277 $ HGEDITOR=cat hg config --non-shared
278 278 [ui]
279 279 traceback=true
280 280
281 281 $ cd ../shared1
282 282 $ hg showconfig ui.traceback
283 283 [1]
284 284
285 285 Unsharing works
286 286
287 287 $ hg unshare
288 288
289 289 Test that source config is added to the shared one after unshare, and the config
290 290 of current repo is still respected over the config which came from source config
291 291 $ cd ../cloned
292 292 $ hg push ../shared1
293 293 pushing to ../shared1
294 294 searching for changes
295 295 adding changesets
296 296 adding manifests
297 297 adding file changes
298 298 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
299 299 transaction abort!
300 300 rollback completed
301 301 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
302 302 [255]
303 303 $ hg showconfig ui.curses -R ../shared1
304 304 false
305 305
306 306 $ cd ../
307 307
308 308 Test that upgrading using debugupgraderepo works
309 309 =================================================
310 310
311 311 $ hg init non-share-safe --config format.exp-share-safe=false
312 312 $ cd non-share-safe
313 313 $ hg debugrequirements
314 314 dotencode
315 315 fncache
316 316 generaldelta
317 317 revlogv1
318 318 sparserevlog
319 319 store
320 320 $ echo foo > foo
321 321 $ hg ci -Aqm 'added foo'
322 322 $ echo bar > bar
323 323 $ hg ci -Aqm 'added bar'
324 324
325 325 Create a share before upgrading
326 326
327 327 $ cd ..
328 328 $ hg share non-share-safe nss-share
329 329 updating working directory
330 330 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 331 $ hg debugrequirements -R nss-share
332 332 dotencode
333 333 fncache
334 334 generaldelta
335 335 revlogv1
336 336 shared
337 337 sparserevlog
338 338 store
339 339 $ cd non-share-safe
340 340
341 341 Upgrade
342 342
343 343 $ hg debugupgraderepo -q
344 344 requirements
345 345 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
346 346 added: exp-sharesafe
347 347
348 348 processed revlogs:
349 349 - all-filelogs
350 350 - changelog
351 351 - manifest
352 352
353 353 $ hg debugupgraderepo --run -q
354 354 upgrade will perform the following actions:
355 355
356 356 requirements
357 357 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
358 358 added: exp-sharesafe
359 359
360 360 processed revlogs:
361 361 - all-filelogs
362 362 - changelog
363 363 - manifest
364 364
365 365 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
366 366
367 367 $ hg debugrequirements
368 368 dotencode
369 369 exp-sharesafe
370 370 fncache
371 371 generaldelta
372 372 revlogv1
373 373 sparserevlog
374 374 store
375 375
376 376 $ cat .hg/requires
377 377 exp-sharesafe
378 378
379 379 $ cat .hg/store/requires
380 380 dotencode
381 381 fncache
382 382 generaldelta
383 383 revlogv1
384 384 sparserevlog
385 385 store
386 386
387 387 $ hg log -GT "{node}: {desc}\n"
388 388 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
389 389 |
390 390 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
391 391
392 392
393 393 Make sure existing shares still works
394 394
395 395 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
396 396 warning: source repository supports share-safe functionality. Reshare to upgrade.
397 397 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
398 398 |
399 399 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
400 400
401 401
402 402
403 403 Create a safe share from upgrade one
404 404
405 405 $ cd ..
406 406 $ hg share non-share-safe ss-share
407 407 updating working directory
408 408 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
409 409 $ cd ss-share
410 410 $ hg log -GT "{node}: {desc}\n"
411 411 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
412 412 |
413 413 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
414 414
415 415 $ cd ../non-share-safe
416 416
417 417 Test that downgrading works too
418 418
419 419 $ cat >> $HGRCPATH <<EOF
420 420 > [extensions]
421 421 > share =
422 422 > [format]
423 423 > exp-share-safe = False
424 424 > EOF
425 425
426 426 $ hg debugupgraderepo -q
427 427 requirements
428 428 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
429 429 removed: exp-sharesafe
430 430
431 431 processed revlogs:
432 432 - all-filelogs
433 433 - changelog
434 434 - manifest
435 435
436 436 $ hg debugupgraderepo -q --run
437 437 upgrade will perform the following actions:
438 438
439 439 requirements
440 440 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
441 441 removed: exp-sharesafe
442 442
443 443 processed revlogs:
444 444 - all-filelogs
445 445 - changelog
446 446 - manifest
447 447
448 448 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
449 449
450 450 $ hg debugrequirements
451 451 dotencode
452 452 fncache
453 453 generaldelta
454 454 revlogv1
455 455 sparserevlog
456 456 store
457 457
458 458 $ cat .hg/requires
459 459 dotencode
460 460 fncache
461 461 generaldelta
462 462 revlogv1
463 463 sparserevlog
464 464 store
465 465
466 466 $ test -f .hg/store/requires
467 467 [1]
468 468
469 469 $ hg log -GT "{node}: {desc}\n"
470 470 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
471 471 |
472 472 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
473 473
474 474
475 475 Make sure existing shares still works
476 476
477 477 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
478 478 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
479 479 |
480 480 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
481 481
482 482
483 483 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
484 484 abort: share source does not support exp-sharesafe requirement
485 485 [255]
486 486
487 Testing automatic downgrade of shares when config is set
488
489 $ touch ../ss-share/.hg/wlock
490 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
491 abort: failed to downgrade share, got error: Lock held
492 [255]
493 $ rm ../ss-share/.hg/wlock
494
495 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
496 repository downgraded to not use share-safe mode
497 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
498 |
499 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
500
501
502 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
503 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
504 |
505 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
506
507
487 508
488 509 Testing automatic upgrade of shares when config is set
489 510
490 511 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
491 512 upgrade will perform the following actions:
492 513
493 514 requirements
494 515 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
495 516 added: exp-sharesafe
496 517
497 518 processed revlogs:
498 519 - all-filelogs
499 520 - changelog
500 521 - manifest
501 522
502 523 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
503 524 $ hg debugrequirements
504 525 dotencode
505 526 exp-sharesafe
506 527 fncache
507 528 generaldelta
508 529 revlogv1
509 530 sparserevlog
510 531 store
511 532 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
512 533 warning: source repository supports share-safe functionality. Reshare to upgrade.
513 534 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
514 535 |
515 536 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
516 537
517 538
518 539 Check that if lock is taken, upgrade fails but read operation are successful
519 540 $ touch ../nss-share/.hg/wlock
520 541 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
521 542 failed to upgrade share, got error: Lock held
522 543 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
523 544 |
524 545 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
525 546
526 547 $ rm ../nss-share/.hg/wlock
527 548 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
528 549 repository upgraded to use share-safe mode
529 550 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
530 551 |
531 552 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
532 553
533 554
534 555 Test that unshare works
535 556
536 557 $ hg unshare -R ../nss-share
537 558 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
538 559 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
539 560 |
540 561 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
541 562
General Comments 0
You need to be logged in to leave comments. Login now