##// END OF EJS Templates
test-lock: use synchronisation file instead of sleep...
marmoute -
r52390:9da3fcc5 stable
parent child Browse files
Show More
@@ -1,2942 +1,2947 b''
1 1 # configitems.toml - centralized declaration of configuration options
2 2 #
3 3 # This file contains declarations of the core Mercurial configuration options.
4 4 #
5 5 # # Structure
6 6 #
7 7 # items: array of config items
8 8 # templates: mapping of template name to template declaration
9 9 # template-applications: array of template applications
10 10 #
11 11 # # Elements
12 12 #
13 13 # ## Item
14 14 #
15 15 # Declares a core Mercurial option.
16 16 #
17 17 # - section: string (required)
18 18 # - name: string (required)
19 19 # - default-type: boolean, changes how `default` is read
20 20 # - default: any
21 21 # - generic: boolean
22 22 # - priority: integer, only if `generic` is true
23 23 # - alias: list of 2-tuples of strings
24 24 # - experimental: boolean
25 25 # - documentation: string
26 26 # - in_core_extension: string
27 27 #
28 28 # ## Template
29 29 #
30 30 # Declares a group of options to be re-used for multiple sections.
31 31 #
32 32 # - all the same fields as `Item`, except `section` and `name`
33 33 # - `suffix` (string, required)
34 34 #
35 35 # ## Template applications
36 36 #
37 37 # Uses a `Template` to instanciate its options in a given section.
38 38 #
39 39 # - template: string (required, must match a `Template` name)
40 40 # - section: string (required)
41 41
42 42 [[items]]
43 43 section = "alias"
44 44 name = ".*"
45 45 default-type = "dynamic"
46 46 generic = true
47 47
48 48 [[items]]
49 49 section = "auth"
50 50 name = "cookiefile"
51 51
52 52 # bookmarks.pushing: internal hack for discovery
53 53 [[items]]
54 54 section = "bookmarks"
55 55 name = "pushing"
56 56 default-type = "list_type"
57 57
58 58 # bundle.mainreporoot: internal hack for bundlerepo
59 59 [[items]]
60 60 section = "bundle"
61 61 name = "mainreporoot"
62 62 default = ""
63 63
64 64 [[items]]
65 65 section = "censor"
66 66 name = "policy"
67 67 default = "abort"
68 68 experimental = true
69 69
70 70 [[items]]
71 71 section = "chgserver"
72 72 name = "idletimeout"
73 73 default = 3600
74 74
75 75 [[items]]
76 76 section = "chgserver"
77 77 name = "skiphash"
78 78 default = false
79 79
80 80 [[items]]
81 81 section = "cmdserver"
82 82 name = "log"
83 83
84 84 [[items]]
85 85 section = "cmdserver"
86 86 name = "max-log-files"
87 87 default = 7
88 88
89 89 [[items]]
90 90 section = "cmdserver"
91 91 name = "max-log-size"
92 92 default = "1 MB"
93 93
94 94 [[items]]
95 95 section = "cmdserver"
96 96 name = "max-repo-cache"
97 97 default = 0
98 98 experimental = true
99 99
100 100 [[items]]
101 101 section = "cmdserver"
102 102 name = "message-encodings"
103 103 default-type = "list_type"
104 104
105 105 [[items]]
106 106 section = "cmdserver"
107 107 name = "shutdown-on-interrupt"
108 108 default = true
109 109
110 110 [[items]]
111 111 section = "cmdserver"
112 112 name = "track-log"
113 113 default-type = "lambda"
114 114 default = [ "chgserver", "cmdserver", "repocache",]
115 115
116 116 [[items]]
117 117 section = "color"
118 118 name = ".*"
119 119 generic = true
120 120
121 121 [[items]]
122 122 section = "color"
123 123 name = "mode"
124 124 default = "auto"
125 125
126 126 [[items]]
127 127 section = "color"
128 128 name = "pagermode"
129 129 default-type = "dynamic"
130 130
131 131 [[items]]
132 132 section = "command-templates"
133 133 name = "graphnode"
134 134 alias = [["ui", "graphnodetemplate"]]
135 135
136 136 [[items]]
137 137 section = "command-templates"
138 138 name = "log"
139 139 alias = [["ui", "logtemplate"]]
140 140
141 141 [[items]]
142 142 section = "command-templates"
143 143 name = "mergemarker"
144 144 default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
145 145 alias = [["ui", "mergemarkertemplate"]]
146 146
147 147 [[items]]
148 148 section = "command-templates"
149 149 name = "oneline-summary"
150 150
151 151 [[items]]
152 152 section = "command-templates"
153 153 name = "oneline-summary.*"
154 154 default-type = "dynamic"
155 155 generic = true
156 156
157 157 [[items]]
158 158 section = "command-templates"
159 159 name = "pre-merge-tool-output"
160 160 alias = [["ui", "pre-merge-tool-output-template"]]
161 161
162 162 [[items]]
163 163 section = "commands"
164 164 name = "commit.post-status"
165 165 default = false
166 166
167 167 [[items]]
168 168 section = "commands"
169 169 name = "grep.all-files"
170 170 default = false
171 171 experimental = true
172 172
173 173 [[items]]
174 174 section = "commands"
175 175 name = "merge.require-rev"
176 176 default = false
177 177
178 178 [[items]]
179 179 section = "commands"
180 180 name = "push.require-revs"
181 181 default = false
182 182
183 183 # Rebase related configuration moved to core because other extension are doing
184 184 # strange things. For example, shelve import the extensions to reuse some bit
185 185 # without formally loading it.
186 186 [[items]]
187 187 section = "commands"
188 188 name = "rebase.requiredest"
189 189 default = false
190 190
191 191 [[items]]
192 192 section = "commands"
193 193 name = "resolve.confirm"
194 194 default = false
195 195
196 196 [[items]]
197 197 section = "commands"
198 198 name = "resolve.explicit-re-merge"
199 199 default = false
200 200
201 201 [[items]]
202 202 section = "commands"
203 203 name = "resolve.mark-check"
204 204 default = "none"
205 205
206 206 [[items]]
207 207 section = "commands"
208 208 name = "show.aliasprefix"
209 209 default-type = "list_type"
210 210
211 211 [[items]]
212 212 section = "commands"
213 213 name = "status.relative"
214 214 default = false
215 215
216 216 [[items]]
217 217 section = "commands"
218 218 name = "status.skipstates"
219 219 default = []
220 220 experimental = true
221 221
222 222 [[items]]
223 223 section = "commands"
224 224 name = "status.terse"
225 225 default = ""
226 226
227 227 [[items]]
228 228 section = "commands"
229 229 name = "status.verbose"
230 230 default = false
231 231
232 232 [[items]]
233 233 section = "commands"
234 234 name = "update.check"
235 235
236 236 [[items]]
237 237 section = "commands"
238 238 name = "update.requiredest"
239 239 default = false
240 240
241 241 [[items]]
242 242 section = "committemplate"
243 243 name = ".*"
244 244 generic = true
245 245
246 246 [[items]]
247 247 section = "convert"
248 248 name = "bzr.saverev"
249 249 default = true
250 250
251 251 [[items]]
252 252 section = "convert"
253 253 name = "cvsps.cache"
254 254 default = true
255 255
256 256 [[items]]
257 257 section = "convert"
258 258 name = "cvsps.fuzz"
259 259 default = 60
260 260
261 261 [[items]]
262 262 section = "convert"
263 263 name = "cvsps.logencoding"
264 264
265 265 [[items]]
266 266 section = "convert"
267 267 name = "cvsps.mergefrom"
268 268
269 269 [[items]]
270 270 section = "convert"
271 271 name = "cvsps.mergeto"
272 272
273 273 [[items]]
274 274 section = "convert"
275 275 name = "git.committeractions"
276 276 default-type = "lambda"
277 277 default = [ "messagedifferent",]
278 278
279 279 [[items]]
280 280 section = "convert"
281 281 name = "git.extrakeys"
282 282 default-type = "list_type"
283 283
284 284 [[items]]
285 285 section = "convert"
286 286 name = "git.findcopiesharder"
287 287 default = false
288 288
289 289 [[items]]
290 290 section = "convert"
291 291 name = "git.remoteprefix"
292 292 default = "remote"
293 293
294 294 [[items]]
295 295 section = "convert"
296 296 name = "git.renamelimit"
297 297 default = 400
298 298
299 299 [[items]]
300 300 section = "convert"
301 301 name = "git.saverev"
302 302 default = true
303 303
304 304 [[items]]
305 305 section = "convert"
306 306 name = "git.similarity"
307 307 default = 50
308 308
309 309 [[items]]
310 310 section = "convert"
311 311 name = "git.skipsubmodules"
312 312 default = false
313 313
314 314 [[items]]
315 315 section = "convert"
316 316 name = "hg.clonebranches"
317 317 default = false
318 318
319 319 [[items]]
320 320 section = "convert"
321 321 name = "hg.ignoreerrors"
322 322 default = false
323 323
324 324 [[items]]
325 325 section = "convert"
326 326 name = "hg.preserve-hash"
327 327 default = false
328 328
329 329 [[items]]
330 330 section = "convert"
331 331 name = "hg.revs"
332 332
333 333 [[items]]
334 334 section = "convert"
335 335 name = "hg.saverev"
336 336 default = false
337 337
338 338 [[items]]
339 339 section = "convert"
340 340 name = "hg.sourcename"
341 341
342 342 [[items]]
343 343 section = "convert"
344 344 name = "hg.startrev"
345 345
346 346 [[items]]
347 347 section = "convert"
348 348 name = "hg.tagsbranch"
349 349 default = "default"
350 350
351 351 [[items]]
352 352 section = "convert"
353 353 name = "hg.usebranchnames"
354 354 default = true
355 355
356 356 [[items]]
357 357 section = "convert"
358 358 name = "ignoreancestorcheck"
359 359 default = false
360 360 experimental = true
361 361
362 362 [[items]]
363 363 section = "convert"
364 364 name = "localtimezone"
365 365 default = false
366 366
367 367 [[items]]
368 368 section = "convert"
369 369 name = "p4.encoding"
370 370 default-type = "dynamic"
371 371
372 372 [[items]]
373 373 section = "convert"
374 374 name = "p4.startrev"
375 375 default = 0
376 376
377 377 [[items]]
378 378 section = "convert"
379 379 name = "skiptags"
380 380 default = false
381 381
382 382 [[items]]
383 383 section = "convert"
384 384 name = "svn.branches"
385 385
386 386 [[items]]
387 387 section = "convert"
388 388 name = "svn.dangerous-set-commit-dates"
389 389 default = false
390 390
391 391 [[items]]
392 392 section = "convert"
393 393 name = "svn.debugsvnlog"
394 394 default = true
395 395
396 396 [[items]]
397 397 section = "convert"
398 398 name = "svn.startrev"
399 399 default = 0
400 400
401 401 [[items]]
402 402 section = "convert"
403 403 name = "svn.tags"
404 404
405 405 [[items]]
406 406 section = "convert"
407 407 name = "svn.trunk"
408 408
409 409 [[items]]
410 410 section = "debug"
411 411 name = "bundling-stats"
412 412 default = false
413 413 documentation = "Display extra information about the bundling process."
414 414
415 415 [[items]]
416 416 section = "debug"
417 417 name = "dirstate.delaywrite"
418 418 default = 0
419 419
420 420 [[items]]
421 421 section = "debug"
422 422 name = "revlog.debug-delta"
423 423 default = false
424 424
425 425 [[items]]
426 426 section = "debug"
427 427 name = "revlog.verifyposition.changelog"
428 428 default = ""
429 429
430 430 [[items]]
431 431 section = "debug"
432 432 name = "unbundling-stats"
433 433 default = false
434 434 documentation = "Display extra information about the unbundling process."
435 435
436 436 [[items]]
437 437 section = "defaults"
438 438 name = ".*"
439 439 generic = true
440 440
441 441 [[items]]
442 442 section = "devel"
443 443 name = "all-warnings"
444 444 default = false
445 445
446 446 [[items]]
447 447 section = "devel"
448 448 name = "bundle.delta"
449 449 default = ""
450 450
451 451 [[items]]
452 452 section = "devel"
453 453 name = "bundle2.debug"
454 454 default = false
455 455
456 456 [[items]]
457 457 section = "devel"
458 458 name = "cache-vfs"
459 459
460 460 [[items]]
461 461 section = "devel"
462 462 name = "check-locks"
463 463 default = false
464 464
465 465 [[items]]
466 466 section = "devel"
467 467 name = "check-relroot"
468 468 default = false
469 469
470 470 [[items]]
471 471 section = "devel"
472 472 name = "copy-tracing.multi-thread"
473 473 default = true
474 474
475 475 # Track copy information for all files, not just "added" ones (very slow)
476 476 [[items]]
477 477 section = "devel"
478 478 name = "copy-tracing.trace-all-files"
479 479 default = false
480 480
481 481 [[items]]
482 482 section = "devel"
483 483 name = "debug.abort-update"
484 484 default = false
485 485 documentation = """If true, then any merge with the working copy, \
486 486 e.g. [hg update], will be aborted after figuring out what needs to be done, \
487 487 but before spawning the parallel worker."""
488 488
489 489 [[items]]
490 490 section = "devel"
491 491 name = "debug.copies"
492 492 default = false
493 493
494 494 [[items]]
495 495 section = "devel"
496 496 name = "debug.extensions"
497 497 default = false
498 498
499 499 [[items]]
500 500 section = "devel"
501 501 name = "debug.peer-request"
502 502 default = false
503 503
504 504 [[items]]
505 505 section = "devel"
506 506 name = "debug.repo-filters"
507 507 default = false
508 508
509 509 [[items]]
510 510 section = "devel"
511 511 name = "default-date"
512 512
513 513 [[items]]
514 514 section = "devel"
515 515 name = "deprec-warn"
516 516 default = false
517 517
518 518 # possible values:
519 519 # - auto (the default)
520 520 # - force-append
521 521 # - force-new
522 522 [[items]]
523 523 section = "devel"
524 524 name = "dirstate.v2.data_update_mode"
525 525 default = "auto"
526 526
527 527 [[items]]
528 528 section = "devel"
529 529 name = "disableloaddefaultcerts"
530 530 default = false
531 531
532 532 [[items]]
533 533 section = "devel"
534 534 name = "discovery.exchange-heads"
535 535 default = true
536 536 documentation = """If false, the discovery will not start with remote \
537 537 head fetching and local head querying."""
538 538
539 539 [[items]]
540 540 section = "devel"
541 541 name = "discovery.grow-sample"
542 542 default = true
543 543 documentation = """If false, the sample size used in set discovery \
544 544 will not be increased through the process."""
545 545
546 546 [[items]]
547 547 section = "devel"
548 548 name = "discovery.grow-sample.dynamic"
549 549 default = true
550 550 documentation = """If true, the default, the sample size is adapted to the shape \
551 551 of the undecided set. It is set to the max of:
552 552 `<target-size>, len(roots(undecided)), len(heads(undecided))`"""
553 553
554 554 [[items]]
555 555 section = "devel"
556 556 name = "discovery.grow-sample.rate"
557 557 default = 1.05
558 558 documentation = "Controls the rate at which the sample grows."
559 559
560 560 [[items]]
561 561 section = "devel"
562 562 name = "discovery.randomize"
563 563 default = true
564 564 documentation = """If false, random samplings during discovery are deterministic. \
565 565 It is meant for integration tests."""
566 566
567 567 [[items]]
568 568 section = "devel"
569 569 name = "discovery.sample-size"
570 570 default = 200
571 571 documentation = "Controls the initial size of the discovery sample."
572 572
573 573 [[items]]
574 574 section = "devel"
575 575 name = "discovery.sample-size.initial"
576 576 default = 100
577 577 documentation = "Controls the initial size of the discovery for initial change."
578 578
579 579 [[items]]
580 580 section = "devel"
581 581 name = "legacy.exchange"
582 582 default-type = "list_type"
583 583
584 584 [[items]]
585 585 section = "devel"
586 name = "lock-wait-sync-file"
587 default = ""
588
589 [[items]]
590 section = "devel"
586 591 name = "persistent-nodemap"
587 592 default = false
588 593 documentation = """When true, revlogs use a special reference version of the \
589 594 nodemap, that is not performant but is "known" to behave properly."""
590 595
591 596 [[items]]
592 597 section = "devel"
593 598 name = "server-insecure-exact-protocol"
594 599 default = ""
595 600
596 601 [[items]]
597 602 section = "devel"
598 603 name = "servercafile"
599 604 default = ""
600 605
601 606 [[items]]
602 607 section = "devel"
603 608 name = "serverexactprotocol"
604 609 default = ""
605 610
606 611 [[items]]
607 612 section = "devel"
608 613 name = "serverrequirecert"
609 614 default = false
610 615
611 616 [[items]]
612 617 section = "devel"
613 618 name = "strip-obsmarkers"
614 619 default = true
615 620
616 621 [[items]]
617 622 section = 'devel'
618 623 name = 'sync.status.pre-dirstate-write-file'
619 624 documentation = """
620 625 Makes the status algorithm wait for the existence of this file \
621 626 (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
622 627 seconds) before taking the lock and writing the dirstate. \
623 628 Status signals that it's ready to wait by creating a file \
624 629 with the same name + `.waiting`. \
625 630 Useful when testing race conditions."""
626 631
627 632 [[items]]
628 633 section = 'devel'
629 634 name = 'sync.status.pre-dirstate-write-file-timeout'
630 635 default=2
631 636
632 637 [[items]]
633 638 section = 'devel'
634 639 name = 'sync.dirstate.post-docket-read-file'
635 640
636 641 [[items]]
637 642 section = 'devel'
638 643 name = 'sync.dirstate.post-docket-read-file-timeout'
639 644 default=2
640 645
641 646 [[items]]
642 647 section = 'devel'
643 648 name = 'sync.dirstate.pre-read-file'
644 649
645 650 [[items]]
646 651 section = 'devel'
647 652 name = 'sync.dirstate.pre-read-file-timeout'
648 653 default=2
649 654
650 655 [[items]]
651 656 section = "devel"
652 657 name = "user.obsmarker"
653 658
654 659 [[items]]
655 660 section = "devel"
656 661 name = "warn-config"
657 662
658 663 [[items]]
659 664 section = "devel"
660 665 name = "warn-config-default"
661 666
662 667 [[items]]
663 668 section = "devel"
664 669 name = "warn-config-unknown"
665 670
666 671 [[items]]
667 672 section = "devel"
668 673 name = "warn-empty-changegroup"
669 674 default = false
670 675
671 676 [[items]]
672 677 section = "diff"
673 678 name = "merge"
674 679 default = false
675 680 experimental = true
676 681
677 682 [[items]]
678 683 section = "email"
679 684 name = "bcc"
680 685
681 686 [[items]]
682 687 section = "email"
683 688 name = "cc"
684 689
685 690 [[items]]
686 691 section = "email"
687 692 name = "charsets"
688 693 default-type = "list_type"
689 694
690 695 [[items]]
691 696 section = "email"
692 697 name = "from"
693 698
694 699 [[items]]
695 700 section = "email"
696 701 name = "method"
697 702 default = "smtp"
698 703
699 704 [[items]]
700 705 section = "email"
701 706 name = "reply-to"
702 707
703 708 [[items]]
704 709 section = "email"
705 710 name = "to"
706 711
707 712 [[items]]
708 713 section = "experimental"
709 714 name = "archivemetatemplate"
710 715 default-type = "dynamic"
711 716
712 717 [[items]]
713 718 section = "experimental"
714 719 name = "auto-publish"
715 720 default = "publish"
716 721
717 722 [[items]]
718 723 section = "experimental"
719 724 name = "bundle-phases"
720 725 default = false
721 726
722 727 [[items]]
723 728 section = "experimental"
724 729 name = "bundle2-advertise"
725 730 default = true
726 731
727 732 [[items]]
728 733 section = "experimental"
729 734 name = "bundle2-output-capture"
730 735 default = false
731 736
732 737 [[items]]
733 738 section = "experimental"
734 739 name = "bundle2.pushback"
735 740 default = false
736 741
737 742 [[items]]
738 743 section = "experimental"
739 744 name = "bundle2lazylocking"
740 745 default = false
741 746
742 747 [[items]]
743 748 section = "experimental"
744 749 name = "bundlecomplevel"
745 750
746 751 [[items]]
747 752 section = "experimental"
748 753 name = "bundlecomplevel.bzip2"
749 754
750 755 [[items]]
751 756 section = "experimental"
752 757 name = "bundlecomplevel.gzip"
753 758
754 759 [[items]]
755 760 section = "experimental"
756 761 name = "bundlecomplevel.none"
757 762
758 763 [[items]]
759 764 section = "experimental"
760 765 name = "bundlecomplevel.zstd"
761 766
762 767 [[items]]
763 768 section = "experimental"
764 769 name = "bundlecompthreads"
765 770
766 771 [[items]]
767 772 section = "experimental"
768 773 name = "bundlecompthreads.bzip2"
769 774
770 775 [[items]]
771 776 section = "experimental"
772 777 name = "bundlecompthreads.gzip"
773 778
774 779 [[items]]
775 780 section = "experimental"
776 781 name = "bundlecompthreads.none"
777 782
778 783 [[items]]
779 784 section = "experimental"
780 785 name = "bundlecompthreads.zstd"
781 786
782 787 [[items]]
783 788 section = "experimental"
784 789 name = "changegroup3"
785 790 default = true
786 791
787 792 [[items]]
788 793 section = "experimental"
789 794 name = "changegroup4"
790 795 default = false
791 796
792 797 # might remove rank configuration once the computation has no impact
793 798 [[items]]
794 799 section = "experimental"
795 800 name = "changelog-v2.compute-rank"
796 801 default = true
797 802
798 803 [[items]]
799 804 section = "experimental"
800 805 name = "cleanup-as-archived"
801 806 default = false
802 807
803 808 [[items]]
804 809 section = "experimental"
805 810 name = "clientcompressionengines"
806 811 default-type = "list_type"
807 812
808 813 [[items]]
809 814 section = "experimental"
810 815 name = "copies.read-from"
811 816 default = "filelog-only"
812 817
813 818 [[items]]
814 819 section = "experimental"
815 820 name = "copies.write-to"
816 821 default = "filelog-only"
817 822
818 823 [[items]]
819 824 section = "experimental"
820 825 name = "copytrace"
821 826 default = "on"
822 827
823 828 [[items]]
824 829 section = "experimental"
825 830 name = "copytrace.movecandidateslimit"
826 831 default = 100
827 832
828 833 [[items]]
829 834 section = "experimental"
830 835 name = "copytrace.sourcecommitlimit"
831 836 default = 100
832 837
833 838 [[items]]
834 839 section = "experimental"
835 840 name = "crecordtest"
836 841
837 842 [[items]]
838 843 section = "experimental"
839 844 name = "directaccess"
840 845 default = false
841 846
842 847 [[items]]
843 848 section = "experimental"
844 849 name = "directaccess.revnums"
845 850 default = false
846 851
847 852 [[items]]
848 853 section = "experimental"
849 854 name = "editortmpinhg"
850 855 default = false
851 856
852 857 [[items]]
853 858 section = "experimental"
854 859 name = "evolution"
855 860 default-type = "list_type"
856 861
857 862 [[items]]
858 863 section = "experimental"
859 864 name = "evolution.allowdivergence"
860 865 default = false
861 866 alias = [["experimental", "allowdivergence"]]
862 867
863 868 [[items]]
864 869 section = "experimental"
865 870 name = "evolution.allowunstable"
866 871
867 872 [[items]]
868 873 section = "experimental"
869 874 name = "evolution.bundle-obsmarker"
870 875 default = false
871 876
872 877 [[items]]
873 878 section = "experimental"
874 879 name = "evolution.bundle-obsmarker:mandatory"
875 880 default = true
876 881
877 882 [[items]]
878 883 section = "experimental"
879 884 name = "evolution.createmarkers"
880 885
881 886 [[items]]
882 887 section = "experimental"
883 888 name = "evolution.effect-flags"
884 889 default = true
885 890 alias = [["experimental", "effect-flags"]]
886 891
887 892 [[items]]
888 893 section = "experimental"
889 894 name = "evolution.exchange"
890 895
891 896 [[items]]
892 897 section = "experimental"
893 898 name = "evolution.report-instabilities"
894 899 default = true
895 900
896 901 [[items]]
897 902 section = "experimental"
898 903 name = "evolution.track-operation"
899 904 default = true
900 905
901 906 [[items]]
902 907 section = "experimental"
903 908 name = "exportableenviron"
904 909 default-type = "list_type"
905 910
906 911 [[items]]
907 912 section = "experimental"
908 913 name = "extendedheader.index"
909 914
910 915 [[items]]
911 916 section = "experimental"
912 917 name = "extendedheader.similarity"
913 918 default = false
914 919
915 920 [[items]]
916 921 section = "experimental"
917 922 name = "extra-filter-revs"
918 923 documentation = """Repo-level config to prevent a revset from being visible.
919 924 The target use case is to use `share` to expose different subsets of the same \
920 925 repository, especially server side. See also `server.view`."""
921 926
922 927 [[items]]
923 928 section = "experimental"
924 929 name = "graphshorten"
925 930 default = false
926 931
927 932 [[items]]
928 933 section = "experimental"
929 934 name = "graphstyle.grandparent"
930 935 default-type = "dynamic"
931 936
932 937 [[items]]
933 938 section = "experimental"
934 939 name = "graphstyle.missing"
935 940 default-type = "dynamic"
936 941
937 942 [[items]]
938 943 section = "experimental"
939 944 name = "graphstyle.parent"
940 945 default-type = "dynamic"
941 946
942 947 [[items]]
943 948 section = "experimental"
944 949 name = "hook-track-tags"
945 950 default = false
946 951
947 952 [[items]]
948 953 section = "experimental"
949 954 name = "httppostargs"
950 955 default = false
951 956
952 957 [[items]]
953 958 section = "experimental"
954 959 name = "log.topo"
955 960 default = false
956 961
957 962 [[items]]
958 963 section = "experimental"
959 964 name = "maxdeltachainspan"
960 965 default = -1
961 966
962 967 [[items]]
963 968 section = "experimental"
964 969 name = "merge-track-salvaged"
965 970 default = false
966 971 documentation = """Tracks files which were undeleted (merge might delete them \
967 972 but we explicitly kept/undeleted them) and creates new filenodes for them."""
968 973
969 974 [[items]]
970 975 section = "experimental"
971 976 name = "merge.checkpathconflicts"
972 977 default = false
973 978
974 979 [[items]]
975 980 section = "experimental"
976 981 name = "mmapindexthreshold"
977 982
978 983 [[items]]
979 984 section = "experimental"
980 985 name = "narrow"
981 986 default = false
982 987
983 988 [[items]]
984 989 section = "experimental"
985 990 name = "nointerrupt"
986 991 default = false
987 992
988 993 [[items]]
989 994 section = "experimental"
990 995 name = "nointerrupt-interactiveonly"
991 996 default = true
992 997
993 998 [[items]]
994 999 section = "experimental"
995 1000 name = "nonnormalparanoidcheck"
996 1001 default = false
997 1002
998 1003 [[items]]
999 1004 section = "experimental"
1000 1005 name = "obsmarkers-exchange-debug"
1001 1006 default = false
1002 1007
1003 1008 [[items]]
1004 1009 section = "experimental"
1005 1010 name = "rebaseskipobsolete"
1006 1011 default = true
1007 1012
1008 1013 [[items]]
1009 1014 section = "experimental"
1010 1015 name = "remotenames"
1011 1016 default = false
1012 1017
1013 1018 [[items]]
1014 1019 section = "experimental"
1015 1020 name = "removeemptydirs"
1016 1021 default = true
1017 1022
1018 1023 [[items]]
1019 1024 section = "experimental"
1020 1025 name = "revert.interactive.select-to-keep"
1021 1026 default = false
1022 1027
1023 1028 [[items]]
1024 1029 section = "experimental"
1025 1030 name = "revisions.disambiguatewithin"
1026 1031
1027 1032 [[items]]
1028 1033 section = "experimental"
1029 1034 name = "revisions.prefixhexnode"
1030 1035 default = false
1031 1036
1032 1037 # "out of experimental" todo list.
1033 1038 #
1034 1039 # * include management of a persistent nodemap in the main docket
1035 1040 # * enforce a "no-truncate" policy for mmap safety
1036 1041 # - for censoring operation
1037 1042 # - for stripping operation
1038 1043 # - for rollback operation
1039 1044 # * proper streaming (race free) of the docket file
1040 1045 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1041 1046 # * Exchange-wise, we will also need to do something more efficient than
1042 1047 # keeping references to the affected revlogs, especially memory-wise when
1043 1048 # rewriting sidedata.
1044 1049 # * introduce a proper solution to reduce the number of filelog related files.
1045 1050 # * use caching for reading sidedata (similar to what we do for data).
1046 1051 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1047 1052 # * Improvement to consider
1048 1053 # - avoid compression header in chunk using the default compression?
1049 1054 # - forbid "inline" compression mode entirely?
1050 1055 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1051 1056 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1052 1057 # - keep track of chain base or size (probably not that useful anymore)
1053 1058 [[items]]
1054 1059 section = "experimental"
1055 1060 name = "revlogv2"
1056 1061
1057 1062 [[items]]
1058 1063 section = "experimental"
1059 1064 name = "rust.index"
1060 1065 default = false
1061 1066
1062 1067 [[items]]
1063 1068 section = "experimental"
1064 1069 name = "server.allow-hidden-access"
1065 1070 default-type = "list_type"
1066 1071
1067 1072 [[items]]
1068 1073 section = "experimental"
1069 1074 name = "server.filesdata.recommended-batch-size"
1070 1075 default = 50000
1071 1076
1072 1077 [[items]]
1073 1078 section = "experimental"
1074 1079 name = "server.manifestdata.recommended-batch-size"
1075 1080 default = 100000
1076 1081
1077 1082 [[items]]
1078 1083 section = "experimental"
1079 1084 name = "server.stream-narrow-clones"
1080 1085 default = false
1081 1086
1082 1087 [[items]]
1083 1088 section = "experimental"
1084 1089 name = "single-head-per-branch"
1085 1090 default = false
1086 1091
1087 1092 [[items]]
1088 1093 section = "experimental"
1089 1094 name = "single-head-per-branch:account-closed-heads"
1090 1095 default = false
1091 1096
1092 1097 [[items]]
1093 1098 section = "experimental"
1094 1099 name = "single-head-per-branch:public-changes-only"
1095 1100 default = false
1096 1101
1097 1102 [[items]]
1098 1103 section = "experimental"
1099 1104 name = "sparse-read"
1100 1105 default = false
1101 1106
1102 1107 [[items]]
1103 1108 section = "experimental"
1104 1109 name = "sparse-read.density-threshold"
1105 1110 default = 0.5
1106 1111
1107 1112 [[items]]
1108 1113 section = "experimental"
1109 1114 name = "sparse-read.min-gap-size"
1110 1115 default = "65K"
1111 1116
1112 1117 [[items]]
1113 1118 section = "experimental"
1114 1119 name = "stream-v3"
1115 1120 default = false
1116 1121
1117 1122 [[items]]
1118 1123 section = "experimental"
1119 1124 name = "treemanifest"
1120 1125 default = false
1121 1126
1122 1127 [[items]]
1123 1128 section = "experimental"
1124 1129 name = "update.atomic-file"
1125 1130 default = false
1126 1131
1127 1132 [[items]]
1128 1133 section = "experimental"
1129 1134 name = "web.full-garbage-collection-rate"
1130 1135 default = 1 # still forcing a full collection on each request
1131 1136
1132 1137 [[items]]
1133 1138 section = "experimental"
1134 1139 name = "worker.repository-upgrade"
1135 1140 default = false
1136 1141
1137 1142 [[items]]
1138 1143 section = "experimental"
1139 1144 name = "worker.wdir-get-thread-safe"
1140 1145 default = false
1141 1146
1142 1147 [[items]]
1143 1148 section = "experimental"
1144 1149 name = "xdiff"
1145 1150 default = false
1146 1151
1147 1152 [[items]]
1148 1153 section = "extdata"
1149 1154 name = ".*"
1150 1155 generic = true
1151 1156
1152 1157 [[items]]
1153 1158 section = "extensions"
1154 1159 name = "[^:]*"
1155 1160 generic = true
1156 1161
1157 1162 [[items]]
1158 1163 section = "extensions"
1159 1164 name = "[^:]*:required"
1160 1165 default = false
1161 1166 generic = true
1162 1167
1163 1168
1164 1169 # The format section is dedicated to control of the repository on disk format
1165 1170 # and constraints.
1166 1171 #
1167 1172 # A format change affects which data is expected to be stored in the repository
1168 1173 # and how. It impacts other client whichever their version are, format change
1169 1174 # often comes with an associated entry in the requirements.
1170 1175 #
1171 1176 # The option are usually in the form `use-xxx-yyy` (with xxx-yy the feature name).
1172 1177 #
1173 1178 # To configure details of how the repository is accessed, without affect the
1174 1179 # repository formats, see the `storage section`.
1175 1180
1176 1181 [[items]]
1177 1182 section = "format"
1178 1183 name = "bookmarks-in-store"
1179 1184 default = false
1180 1185
1181 1186 [[items]]
1182 1187 section = "format"
1183 1188 name = "chunkcachesize"
1184 1189 experimental = true
1185 1190
1186 1191 [[items]]
1187 1192 section = "format"
1188 1193 name = "dotencode"
1189 1194 default = true
1190 1195
1191 1196 # The interaction between the archived phase and obsolescence markers needs to
1192 1197 # be sorted out before wider usage of this are to be considered.
1193 1198 #
1194 1199 # At the time this message is written, behavior when archiving obsolete
1195 1200 # changeset differ significantly from stripping. As part of stripping, we also
1196 1201 # remove the obsolescence marker associated to the stripped changesets,
1197 1202 # revealing the precedecessors changesets when applicable. When archiving, we
1198 1203 # don't touch the obsolescence markers, keeping everything hidden. This can
1199 1204 # result in quite confusing situation for people combining exchanging draft
1200 1205 # with the archived phases. As some markers needed by others may be skipped
1201 1206 # during exchange.
1202 1207 [[items]]
1203 1208 section = "format"
1204 1209 name = "exp-archived-phase"
1205 1210 default = false
1206 1211 experimental = true
1207 1212
1208 1213 # Experimental TODOs:
1209 1214 #
1210 1215 # * Same as for revlogv2 (but for the reduction of the number of files)
1211 1216 # * Actually computing the rank of changesets
1212 1217 # * Improvement to investigate
1213 1218 # - storing .hgtags fnode
1214 1219 # - storing branch related identifier
1215 1220 [[items]]
1216 1221 section = "format"
1217 1222 name = "exp-use-changelog-v2"
1218 1223 experimental = true
1219 1224
1220 1225 [[items]]
1221 1226 section = "format"
1222 1227 name = "exp-use-copies-side-data-changeset"
1223 1228 default = false
1224 1229 experimental = true
1225 1230
1226 1231 [[items]]
1227 1232 section = "format"
1228 1233 name = "generaldelta"
1229 1234 default = false
1230 1235 experimental = true
1231 1236
1232 1237 [[items]]
1233 1238 section = "format"
1234 1239 name = "manifestcachesize"
1235 1240 experimental = true
1236 1241
1237 1242 [[items]]
1238 1243 section = "format"
1239 1244 name = "maxchainlen"
1240 1245 default-type = "dynamic"
1241 1246 experimental = true
1242 1247
1243 1248 [[items]]
1244 1249 section = "format"
1245 1250 name = "obsstore-version"
1246 1251
1247 1252 [[items]]
1248 1253 section = "format"
1249 1254 name = "revlog-compression"
1250 1255 default-type = "lambda"
1251 1256 alias = [["experimental", "format.compression"]]
1252 1257 default = [ "zstd", "zlib",]
1253 1258
1254 1259 [[items]]
1255 1260 section = "format"
1256 1261 name = "sparse-revlog"
1257 1262 default = true
1258 1263
1259 1264 [[items]]
1260 1265 section = "format"
1261 1266 name = "use-dirstate-tracked-hint"
1262 1267 default = false
1263 1268 experimental = true
1264 1269
1265 1270 [[items]]
1266 1271 section = "format"
1267 1272 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
1268 1273 default = false
1269 1274 experimental = true
1270 1275
1271 1276 [[items]]
1272 1277 section = "format"
1273 1278 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
1274 1279 default = false
1275 1280 experimental = true
1276 1281
1277 1282 [[items]]
1278 1283 section = "format"
1279 1284 name = "use-dirstate-tracked-hint.version"
1280 1285 default = 1
1281 1286 experimental = true
1282 1287
1283 1288 [[items]]
1284 1289 section = "format"
1285 1290 name = "use-dirstate-v2"
1286 1291 default = false
1287 1292 alias = [["format", "exp-rc-dirstate-v2"]]
1288 1293 experimental = true
1289 1294 documentation = """Enables dirstate-v2 format *when creating a new repository*.
1290 1295 Which format to use for existing repos is controlled by `.hg/requires`."""
1291 1296
1292 1297 [[items]]
1293 1298 section = "format"
1294 1299 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
1295 1300 default = false
1296 1301 experimental = true
1297 1302
1298 1303 [[items]]
1299 1304 section = "format"
1300 1305 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
1301 1306 default = false
1302 1307 experimental = true
1303 1308
1304 1309 # Having this on by default means we are confident about the scaling of phases.
1305 1310 # This is not garanteed to be the case at the time this message is written.
1306 1311 [[items]]
1307 1312 section = "format"
1308 1313 name = "use-internal-phase"
1309 1314 default = false
1310 1315 experimental = true
1311 1316
1312 1317 [[items]]
1313 1318 section = "format"
1314 1319 name = "use-persistent-nodemap"
1315 1320 default-type = "dynamic"
1316 1321
1317 1322 [[items]]
1318 1323 section = "format"
1319 1324 name = "use-share-safe"
1320 1325 default = true
1321 1326
1322 1327 [[items]]
1323 1328 section = "format"
1324 1329 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
1325 1330 default = false
1326 1331 experimental = true
1327 1332
1328 1333 [[items]]
1329 1334 section = "format"
1330 1335 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
1331 1336 default = false
1332 1337 experimental = true
1333 1338
1334 1339 [[items]]
1335 1340 section = "format"
1336 1341 name = "usefncache"
1337 1342 default = true
1338 1343
1339 1344 [[items]]
1340 1345 section = "format"
1341 1346 name = "usegeneraldelta"
1342 1347 default = true
1343 1348
1344 1349 [[items]]
1345 1350 section = "format"
1346 1351 name = "usestore"
1347 1352 default = true
1348 1353
1349 1354 [[items]]
1350 1355 section = "fsmonitor"
1351 1356 name = "warn_update_file_count"
1352 1357 default = 50000
1353 1358
1354 1359 [[items]]
1355 1360 section = "fsmonitor"
1356 1361 name = "warn_update_file_count_rust"
1357 1362 default = 400000
1358 1363
1359 1364 [[items]]
1360 1365 section = "fsmonitor"
1361 1366 name = "warn_when_unused"
1362 1367 default = true
1363 1368
1364 1369 [[items]]
1365 1370 section = "help"
1366 1371 name = 'hidden-command\..*'
1367 1372 default = false
1368 1373 generic = true
1369 1374
1370 1375 [[items]]
1371 1376 section = "help"
1372 1377 name = 'hidden-topic\..*'
1373 1378 default = false
1374 1379 generic = true
1375 1380
1376 1381 [[items]]
1377 1382 section = "hgweb-paths"
1378 1383 name = ".*"
1379 1384 default-type = "list_type"
1380 1385 generic = true
1381 1386
1382 1387 [[items]]
1383 1388 section = "hooks"
1384 1389 name = ".*:run-with-plain"
1385 1390 default = true
1386 1391 generic = true
1387 1392
1388 1393 [[items]]
1389 1394 section = "hooks"
1390 1395 name = "[^:]*"
1391 1396 default-type = "dynamic"
1392 1397 generic = true
1393 1398
1394 1399 [[items]]
1395 1400 section = "hostfingerprints"
1396 1401 name = ".*"
1397 1402 default-type = "list_type"
1398 1403 generic = true
1399 1404
1400 1405 [[items]]
1401 1406 section = "hostsecurity"
1402 1407 name = ".*:ciphers$"
1403 1408 default-type = "dynamic"
1404 1409 generic = true
1405 1410
1406 1411 [[items]]
1407 1412 section = "hostsecurity"
1408 1413 name = ".*:fingerprints$"
1409 1414 default-type = "list_type"
1410 1415 generic = true
1411 1416
1412 1417 [[items]]
1413 1418 section = "hostsecurity"
1414 1419 name = ".*:minimumprotocol$"
1415 1420 default-type = "dynamic"
1416 1421 generic = true
1417 1422
1418 1423 [[items]]
1419 1424 section = "hostsecurity"
1420 1425 name = ".*:verifycertsfile$"
1421 1426 generic = true
1422 1427
1423 1428 [[items]]
1424 1429 section = "hostsecurity"
1425 1430 name = "ciphers"
1426 1431
1427 1432 [[items]]
1428 1433 section = "hostsecurity"
1429 1434 name = "minimumprotocol"
1430 1435 default-type = "dynamic"
1431 1436
1432 1437 [[items]]
1433 1438 section = "http"
1434 1439 name = "timeout"
1435 1440
1436 1441 [[items]]
1437 1442 section = "http_proxy"
1438 1443 name = "always"
1439 1444 default = false
1440 1445
1441 1446 [[items]]
1442 1447 section = "http_proxy"
1443 1448 name = "host"
1444 1449
1445 1450 [[items]]
1446 1451 section = "http_proxy"
1447 1452 name = "no"
1448 1453 default-type = "list_type"
1449 1454
1450 1455 [[items]]
1451 1456 section = "http_proxy"
1452 1457 name = "passwd"
1453 1458
1454 1459 [[items]]
1455 1460 section = "http_proxy"
1456 1461 name = "user"
1457 1462
1458 1463 [[items]]
1459 1464 section = "logtoprocess"
1460 1465 name = "command"
1461 1466
1462 1467 [[items]]
1463 1468 section = "logtoprocess"
1464 1469 name = "commandexception"
1465 1470
1466 1471 [[items]]
1467 1472 section = "logtoprocess"
1468 1473 name = "commandfinish"
1469 1474
1470 1475 [[items]]
1471 1476 section = "logtoprocess"
1472 1477 name = "develwarn"
1473 1478
1474 1479 [[items]]
1475 1480 section = "logtoprocess"
1476 1481 name = "uiblocked"
1477 1482
1478 1483 [[items]]
1479 1484 section = "merge"
1480 1485 name = "checkignored"
1481 1486 default = "abort"
1482 1487
1483 1488 [[items]]
1484 1489 section = "merge"
1485 1490 name = "checkunknown"
1486 1491 default = "abort"
1487 1492
1488 1493 [[items]]
1489 1494 section = "merge"
1490 1495 name = "disable-partial-tools"
1491 1496 default = false
1492 1497 experimental = true
1493 1498
1494 1499 [[items]]
1495 1500 section = "merge"
1496 1501 name = "followcopies"
1497 1502 default = true
1498 1503
1499 1504 [[items]]
1500 1505 section = "merge"
1501 1506 name = "on-failure"
1502 1507 default = "continue"
1503 1508
1504 1509 [[items]]
1505 1510 section = "merge"
1506 1511 name = "preferancestor"
1507 1512 default-type = "lambda"
1508 1513 default = ["*"]
1509 1514 experimental = true
1510 1515
1511 1516 [[items]]
1512 1517 section = "merge"
1513 1518 name = "strict-capability-check"
1514 1519 default = false
1515 1520
1516 1521 [[items]]
1517 1522 section = "merge-tools"
1518 1523 name = ".*"
1519 1524 generic = true
1520 1525
1521 1526 [[items]]
1522 1527 section = "merge-tools"
1523 1528 name = '.*\.args$'
1524 1529 default = "$local $base $other"
1525 1530 generic = true
1526 1531 priority = -1
1527 1532
1528 1533 [[items]]
1529 1534 section = "merge-tools"
1530 1535 name = '.*\.binary$'
1531 1536 default = false
1532 1537 generic = true
1533 1538 priority = -1
1534 1539
1535 1540 [[items]]
1536 1541 section = "merge-tools"
1537 1542 name = '.*\.check$'
1538 1543 default-type = "list_type"
1539 1544 generic = true
1540 1545 priority = -1
1541 1546
1542 1547 [[items]]
1543 1548 section = "merge-tools"
1544 1549 name = '.*\.checkchanged$'
1545 1550 default = false
1546 1551 generic = true
1547 1552 priority = -1
1548 1553
1549 1554 [[items]]
1550 1555 section = "merge-tools"
1551 1556 name = '.*\.executable$'
1552 1557 default-type = "dynamic"
1553 1558 generic = true
1554 1559 priority = -1
1555 1560
1556 1561 [[items]]
1557 1562 section = "merge-tools"
1558 1563 name = '.*\.fixeol$'
1559 1564 default = false
1560 1565 generic = true
1561 1566 priority = -1
1562 1567
1563 1568 [[items]]
1564 1569 section = "merge-tools"
1565 1570 name = '.*\.gui$'
1566 1571 default = false
1567 1572 generic = true
1568 1573 priority = -1
1569 1574
1570 1575 [[items]]
1571 1576 section = "merge-tools"
1572 1577 name = '.*\.mergemarkers$'
1573 1578 default = "basic"
1574 1579 generic = true
1575 1580 priority = -1
1576 1581
1577 1582 [[items]]
1578 1583 section = "merge-tools"
1579 1584 name = '.*\.mergemarkertemplate$' # take from command-templates.mergemarker
1580 1585 default-type = "dynamic"
1581 1586 generic = true
1582 1587 priority = -1
1583 1588
1584 1589 [[items]]
1585 1590 section = "merge-tools"
1586 1591 name = '.*\.premerge$'
1587 1592 default-type = "dynamic"
1588 1593 generic = true
1589 1594 priority = -1
1590 1595
1591 1596 [[items]]
1592 1597 section = "merge-tools"
1593 1598 name = '.*\.priority$'
1594 1599 default = 0
1595 1600 generic = true
1596 1601 priority = -1
1597 1602
1598 1603 [[items]]
1599 1604 section = "merge-tools"
1600 1605 name = '.*\.regappend$'
1601 1606 default = ""
1602 1607 generic = true
1603 1608 priority = -1
1604 1609
1605 1610 [[items]]
1606 1611 section = "merge-tools"
1607 1612 name = '.*\.symlink$'
1608 1613 default = false
1609 1614 generic = true
1610 1615 priority = -1
1611 1616
1612 1617 [[items]]
1613 1618 section = "pager"
1614 1619 name = "attend-.*"
1615 1620 default-type = "dynamic"
1616 1621 generic = true
1617 1622
1618 1623 [[items]]
1619 1624 section = "pager"
1620 1625 name = "ignore"
1621 1626 default-type = "list_type"
1622 1627
1623 1628 [[items]]
1624 1629 section = "pager"
1625 1630 name = "pager"
1626 1631 default-type = "dynamic"
1627 1632
1628 1633 [[items]]
1629 1634 section = "partial-merge-tools"
1630 1635 name = ".*"
1631 1636 generic = true
1632 1637 experimental = true
1633 1638
1634 1639 [[items]]
1635 1640 section = "partial-merge-tools"
1636 1641 name = '.*\.args'
1637 1642 default = "$local $base $other"
1638 1643 generic = true
1639 1644 priority = -1
1640 1645 experimental = true
1641 1646
1642 1647 [[items]]
1643 1648 section = "partial-merge-tools"
1644 1649 name = '.*\.disable'
1645 1650 default = false
1646 1651 generic = true
1647 1652 priority = -1
1648 1653 experimental = true
1649 1654
1650 1655 [[items]]
1651 1656 section = "partial-merge-tools"
1652 1657 name = '.*\.executable$'
1653 1658 default-type = "dynamic"
1654 1659 generic = true
1655 1660 priority = -1
1656 1661 experimental = true
1657 1662
1658 1663 [[items]]
1659 1664 section = "partial-merge-tools"
1660 1665 name = '.*\.order'
1661 1666 default = 0
1662 1667 generic = true
1663 1668 priority = -1
1664 1669 experimental = true
1665 1670
1666 1671 [[items]]
1667 1672 section = "partial-merge-tools"
1668 1673 name = '.*\.patterns'
1669 1674 default-type = "dynamic"
1670 1675 generic = true
1671 1676 priority = -1
1672 1677 experimental = true
1673 1678
1674 1679 [[items]]
1675 1680 section = "patch"
1676 1681 name = "eol"
1677 1682 default = "strict"
1678 1683
1679 1684 [[items]]
1680 1685 section = "patch"
1681 1686 name = "fuzz"
1682 1687 default = 2
1683 1688
1684 1689 [[items]]
1685 1690 section = "paths"
1686 1691 name = "[^:]*"
1687 1692 generic = true
1688 1693
1689 1694 [[items]]
1690 1695 section = "paths"
1691 1696 name = ".*:bookmarks.mode"
1692 1697 default = "default"
1693 1698 generic = true
1694 1699
1695 1700 [[items]]
1696 1701 section = "paths"
1697 1702 name = ".*:multi-urls"
1698 1703 default = false
1699 1704 generic = true
1700 1705
1701 1706 [[items]]
1702 1707 section = "paths"
1703 1708 name = ".*:pulled-delta-reuse-policy"
1704 1709 generic = true
1705 1710
1706 1711 [[items]]
1707 1712 section = "paths"
1708 1713 name = ".*:pushrev"
1709 1714 generic = true
1710 1715
1711 1716 [[items]]
1712 1717 section = "paths"
1713 1718 name = ".*:pushurl"
1714 1719 generic = true
1715 1720
1716 1721 [[items]]
1717 1722 section = "paths"
1718 1723 name = "default"
1719 1724
1720 1725 [[items]]
1721 1726 section = "paths"
1722 1727 name = "default-push"
1723 1728
1724 1729 [[items]]
1725 1730 section = "phases"
1726 1731 name = "checksubrepos"
1727 1732 default = "follow"
1728 1733
1729 1734 [[items]]
1730 1735 section = "phases"
1731 1736 name = "new-commit"
1732 1737 default = "draft"
1733 1738
1734 1739 [[items]]
1735 1740 section = "phases"
1736 1741 name = "publish"
1737 1742 default = true
1738 1743
1739 1744 [[items]]
1740 1745 section = "profiling"
1741 1746 name = "enabled"
1742 1747 default = false
1743 1748
1744 1749 [[items]]
1745 1750 section = "profiling"
1746 1751 name = "format"
1747 1752 default = "text"
1748 1753
1749 1754 [[items]]
1750 1755 section = "profiling"
1751 1756 name = "freq"
1752 1757 default = 1000
1753 1758
1754 1759 [[items]]
1755 1760 section = "profiling"
1756 1761 name = "limit"
1757 1762 default = 30
1758 1763
1759 1764 [[items]]
1760 1765 section = "profiling"
1761 1766 name = "nested"
1762 1767 default = 0
1763 1768
1764 1769 [[items]]
1765 1770 section = "profiling"
1766 1771 name = "output"
1767 1772
1768 1773 [[items]]
1769 1774 section = "profiling"
1770 1775 name = "showmax"
1771 1776 default = 0.999
1772 1777
1773 1778 [[items]]
1774 1779 section = "profiling"
1775 1780 name = "showmin"
1776 1781 default-type = "dynamic"
1777 1782
1778 1783 [[items]]
1779 1784 section = "profiling"
1780 1785 name = "showtime"
1781 1786 default = true
1782 1787
1783 1788 [[items]]
1784 1789 section = "profiling"
1785 1790 name = "sort"
1786 1791 default = "inlinetime"
1787 1792
1788 1793 [[items]]
1789 1794 section = "profiling"
1790 1795 name = "statformat"
1791 1796 default = "hotpath"
1792 1797
1793 1798 [[items]]
1794 1799 section = "profiling"
1795 1800 name = "time-track"
1796 1801 default-type = "dynamic"
1797 1802
1798 1803 [[items]]
1799 1804 section = "profiling"
1800 1805 name = "type"
1801 1806 default = "stat"
1802 1807
1803 1808 [[items]]
1804 1809 section = "progress"
1805 1810 name = "assume-tty"
1806 1811 default = false
1807 1812
1808 1813 [[items]]
1809 1814 section = "progress"
1810 1815 name = "changedelay"
1811 1816 default = 1
1812 1817
1813 1818 [[items]]
1814 1819 section = "progress"
1815 1820 name = "clear-complete"
1816 1821 default = true
1817 1822
1818 1823 [[items]]
1819 1824 section = "progress"
1820 1825 name = "debug"
1821 1826 default = false
1822 1827
1823 1828 [[items]]
1824 1829 section = "progress"
1825 1830 name = "delay"
1826 1831 default = 3
1827 1832
1828 1833 [[items]]
1829 1834 section = "progress"
1830 1835 name = "disable"
1831 1836 default = false
1832 1837
1833 1838 [[items]]
1834 1839 section = "progress"
1835 1840 name = "estimateinterval"
1836 1841 default = 60.0
1837 1842
1838 1843 [[items]]
1839 1844 section = "progress"
1840 1845 name = "format"
1841 1846 default-type = "lambda"
1842 1847 default = [ "topic", "bar", "number", "estimate",]
1843 1848
1844 1849 [[items]]
1845 1850 section = "progress"
1846 1851 name = "refresh"
1847 1852 default = 0.1
1848 1853
1849 1854 [[items]]
1850 1855 section = "progress"
1851 1856 name = "width"
1852 1857 default-type = "dynamic"
1853 1858
1854 1859 [[items]]
1855 1860 section = "pull"
1856 1861 name = "confirm"
1857 1862 default = false
1858 1863
1859 1864 [[items]]
1860 1865 section = "push"
1861 1866 name = "pushvars.server"
1862 1867 default = false
1863 1868
1864 1869 [[items]]
1865 1870 section = "rebase"
1866 1871 name = "experimental.inmemory"
1867 1872 default = false
1868 1873
1869 1874 [[items]]
1870 1875 section = "rebase"
1871 1876 name = "singletransaction"
1872 1877 default = false
1873 1878
1874 1879 [[items]]
1875 1880 section = "rebase"
1876 1881 name = "store-source"
1877 1882 default = true
1878 1883 experimental = true
1879 1884 documentation = """Controls creation of a `rebase_source` extra field during rebase.
1880 1885 When false, no such field is created. This is useful e.g. for incrementally \
1881 1886 converting changesets and then rebasing them onto an existing repo.
1882 1887 WARNING: this is an advanced setting reserved for people who know \
1883 1888 exactly what they are doing. Misuse of this setting can easily \
1884 1889 result in obsmarker cycles and a vivid headache."""
1885 1890
1886 1891 [[items]]
1887 1892 section = "rewrite"
1888 1893 name = "backup-bundle"
1889 1894 default = true
1890 1895 alias = [["ui", "history-editing-backup"]]
1891 1896
1892 1897 [[items]]
1893 1898 section = "rewrite"
1894 1899 name = "empty-successor"
1895 1900 default = "skip"
1896 1901 experimental = true
1897 1902
1898 1903 [[items]]
1899 1904 section = "rewrite"
1900 1905 name = "update-timestamp"
1901 1906 default = false
1902 1907
1903 1908 [[items]]
1904 1909 section = "rhg"
1905 1910 name = "cat"
1906 1911 default = true
1907 1912 experimental = true
1908 1913 documentation = """rhg cat has some quirks that need to be ironed out. \
1909 1914 In particular, the `-r` argument accepts a partial hash, but does not \
1910 1915 correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
1911 1916
1912 1917 [[items]]
1913 1918 section = "rhg"
1914 1919 name = "fallback-exectutable"
1915 1920 experimental = true
1916 1921
1917 1922 [[items]]
1918 1923 section = "rhg"
1919 1924 name = "fallback-immediately"
1920 1925 default = false
1921 1926 experimental = true
1922 1927
1923 1928 [[items]]
1924 1929 section = "rhg"
1925 1930 name = "ignored-extensions"
1926 1931 default-type = "list_type"
1927 1932 experimental = true
1928 1933
1929 1934 [[items]]
1930 1935 section = "rhg"
1931 1936 name = "on-unsupported"
1932 1937 default = "abort"
1933 1938 experimental = true
1934 1939
1935 1940 [[items]]
1936 1941 section = "server"
1937 1942 name = "bookmarks-pushkey-compat"
1938 1943 default = true
1939 1944
1940 1945 [[items]]
1941 1946 section = "server"
1942 1947 name = "bundle1"
1943 1948 default = true
1944 1949
1945 1950 [[items]]
1946 1951 section = "server"
1947 1952 name = "bundle1.pull"
1948 1953
1949 1954 [[items]]
1950 1955 section = "server"
1951 1956 name = "bundle1.push"
1952 1957
1953 1958 [[items]]
1954 1959 section = "server"
1955 1960 name = "bundle1gd"
1956 1961
1957 1962 [[items]]
1958 1963 section = "server"
1959 1964 name = "bundle1gd.pull"
1960 1965
1961 1966 [[items]]
1962 1967 section = "server"
1963 1968 name = "bundle1gd.push"
1964 1969
1965 1970 [[items]]
1966 1971 section = "server"
1967 1972 name = "bundle2.stream"
1968 1973 default = true
1969 1974 alias = [["experimental", "bundle2.stream"]]
1970 1975
1971 1976 [[items]]
1972 1977 section = "server"
1973 1978 name = "compressionengines"
1974 1979 default-type = "list_type"
1975 1980
1976 1981 [[items]]
1977 1982 section = "server"
1978 1983 name = "concurrent-push-mode"
1979 1984 default = "check-related"
1980 1985
1981 1986 [[items]]
1982 1987 section = "server"
1983 1988 name = "disablefullbundle"
1984 1989 default = false
1985 1990
1986 1991 [[items]]
1987 1992 section = "server"
1988 1993 name = "maxhttpheaderlen"
1989 1994 default = 1024
1990 1995
1991 1996 [[items]]
1992 1997 section = "server"
1993 1998 name = "preferuncompressed"
1994 1999 default = false
1995 2000
1996 2001 [[items]]
1997 2002 section = "server"
1998 2003 name = "pullbundle"
1999 2004 default = true
2000 2005
2001 2006 [[items]]
2002 2007 section = "server"
2003 2008 name = "streamunbundle"
2004 2009 default = false
2005 2010
2006 2011 [[items]]
2007 2012 section = "server"
2008 2013 name = "uncompressed"
2009 2014 default = true
2010 2015
2011 2016 [[items]]
2012 2017 section = "server"
2013 2018 name = "uncompressedallowsecret"
2014 2019 default = false
2015 2020
2016 2021 [[items]]
2017 2022 section = "server"
2018 2023 name = "validate"
2019 2024 default = false
2020 2025
2021 2026 [[items]]
2022 2027 section = "server"
2023 2028 name = "view"
2024 2029 default = "served"
2025 2030
2026 2031 [[items]]
2027 2032 section = "server"
2028 2033 name = "zliblevel"
2029 2034 default = -1
2030 2035
2031 2036 [[items]]
2032 2037 section = "server"
2033 2038 name = "zstdlevel"
2034 2039 default = 3
2035 2040
2036 2041 [[items]]
2037 2042 section = "share"
2038 2043 name = "pool"
2039 2044
2040 2045 [[items]]
2041 2046 section = "share"
2042 2047 name = "poolnaming"
2043 2048 default = "identity"
2044 2049
2045 2050 [[items]]
2046 2051 section = "share"
2047 2052 name = "safe-mismatch.source-not-safe"
2048 2053 default = "abort"
2049 2054
2050 2055 [[items]]
2051 2056 section = "share"
2052 2057 name = "safe-mismatch.source-not-safe.warn"
2053 2058 default = true
2054 2059
2055 2060 [[items]]
2056 2061 section = "share"
2057 2062 name = "safe-mismatch.source-not-safe:verbose-upgrade"
2058 2063 default = true
2059 2064
2060 2065 [[items]]
2061 2066 section = "share"
2062 2067 name = "safe-mismatch.source-safe"
2063 2068 default = "abort"
2064 2069
2065 2070 [[items]]
2066 2071 section = "share"
2067 2072 name = "safe-mismatch.source-safe.warn"
2068 2073 default = true
2069 2074
2070 2075 [[items]]
2071 2076 section = "share"
2072 2077 name = "safe-mismatch.source-safe:verbose-upgrade"
2073 2078 default = true
2074 2079
2075 2080 [[items]]
2076 2081 section = "shelve"
2077 2082 name = "maxbackups"
2078 2083 default = 10
2079 2084
2080 2085 [[items]]
2081 2086 section = "shelve"
2082 2087 name = "store"
2083 2088 default = "internal"
2084 2089 experimental = true
2085 2090
2086 2091 [[items]]
2087 2092 section = "smtp"
2088 2093 name = "host"
2089 2094
2090 2095 [[items]]
2091 2096 section = "smtp"
2092 2097 name = "local_hostname"
2093 2098
2094 2099 [[items]]
2095 2100 section = "smtp"
2096 2101 name = "password"
2097 2102
2098 2103 [[items]]
2099 2104 section = "smtp"
2100 2105 name = "port"
2101 2106 default-type = "dynamic"
2102 2107
2103 2108 [[items]]
2104 2109 section = "smtp"
2105 2110 name = "tls"
2106 2111 default = "none"
2107 2112
2108 2113 [[items]]
2109 2114 section = "smtp"
2110 2115 name = "username"
2111 2116
2112 2117 [[items]]
2113 2118 section = "sparse"
2114 2119 name = "missingwarning"
2115 2120 default = true
2116 2121 experimental = true
2117 2122
2118 2123
2119 2124 # The "storage" section house config options that change how the repository
2120 2125 # data are accessed by the current process but does not affects the on disk
2121 2126 # format. They can also adjust how the storage is computed, but without affect
2122 2127 # compatibility wither other clients.
2123 2128 #
2124 2129 # For deeper format change, see the `format` section.
2125 2130
2126 2131
2127 2132 [[items]]
2128 2133 section = "storage"
2129 2134 name = "dirstate-v2.slow-path"
2130 2135 default = "abort"
2131 2136 experimental = true # experimental as long as format.use-dirstate-v2 is.
2132 2137
2133 2138 [[items]]
2134 2139 section = "storage"
2135 2140 name = "revbranchcache.mmap"
2136 2141 default = true
2137 2142
2138 2143 [[items]]
2139 2144 section = "storage"
2140 2145 name = "new-repo-backend"
2141 2146 default = "revlogv1"
2142 2147 experimental = true
2143 2148
2144 2149 [[items]]
2145 2150 section = "storage"
2146 2151 name = "revlog.delta-parent-search.candidate-group-chunk-size"
2147 2152 default = 20
2148 2153
2149 2154 [[items]]
2150 2155 section = "storage"
2151 2156 name = "revlog.issue6528.fix-incoming"
2152 2157 default = true
2153 2158
2154 2159 [[items]]
2155 2160 section = "storage"
2156 2161 name = "revlog.optimize-delta-parent-choice"
2157 2162 default = true
2158 2163 alias = [["format", "aggressivemergedeltas"]]
2159 2164
2160 2165 [[items]]
2161 2166 section = "storage"
2162 2167 name = "revlog.persistent-nodemap.mmap"
2163 2168 default = true
2164 2169
2165 2170 [[items]]
2166 2171 section = "storage"
2167 2172 name = "revlog.persistent-nodemap.slow-path"
2168 2173 default = "abort"
2169 2174
2170 2175 [[items]]
2171 2176 section = "storage"
2172 2177 name = "revlog.reuse-external-delta"
2173 2178 default = true
2174 2179
2175 2180 [[items]]
2176 2181 section = "storage"
2177 2182 name = "revlog.reuse-external-delta-parent"
2178 2183 documentation = """This option is true unless `format.generaldelta` is set."""
2179 2184
2180 2185 [[items]]
2181 2186 section = "storage"
2182 2187 name = "revlog.zlib.level"
2183 2188
2184 2189 [[items]]
2185 2190 section = "storage"
2186 2191 name = "revlog.zstd.level"
2187 2192
2188 2193 [[items]]
2189 2194 section = "subrepos"
2190 2195 name = "allowed"
2191 2196 default-type = "dynamic" # to make backporting simpler
2192 2197
2193 2198 [[items]]
2194 2199 section = "subrepos"
2195 2200 name = "git:allowed"
2196 2201 default-type = "dynamic"
2197 2202
2198 2203 [[items]]
2199 2204 section = "subrepos"
2200 2205 name = "hg:allowed"
2201 2206 default-type = "dynamic"
2202 2207
2203 2208 [[items]]
2204 2209 section = "subrepos"
2205 2210 name = "svn:allowed"
2206 2211 default-type = "dynamic"
2207 2212
2208 2213 [[items]]
2209 2214 section = "templateconfig"
2210 2215 name = ".*"
2211 2216 default-type = "dynamic"
2212 2217 generic = true
2213 2218
2214 2219 [[items]]
2215 2220 section = "templates"
2216 2221 name = ".*"
2217 2222 generic = true
2218 2223
2219 2224 [[items]]
2220 2225 section = "trusted"
2221 2226 name = "groups"
2222 2227 default-type = "list_type"
2223 2228
2224 2229 [[items]]
2225 2230 section = "trusted"
2226 2231 name = "users"
2227 2232 default-type = "list_type"
2228 2233
2229 2234 [[items]]
2230 2235 section = "ui"
2231 2236 name = "_usedassubrepo"
2232 2237 default = false
2233 2238
2234 2239 [[items]]
2235 2240 section = "ui"
2236 2241 name = "allowemptycommit"
2237 2242 default = false
2238 2243
2239 2244 [[items]]
2240 2245 section = "ui"
2241 2246 name = "archivemeta"
2242 2247 default = true
2243 2248
2244 2249 [[items]]
2245 2250 section = "ui"
2246 2251 name = "askusername"
2247 2252 default = false
2248 2253
2249 2254 [[items]]
2250 2255 section = "ui"
2251 2256 name = "available-memory"
2252 2257
2253 2258 [[items]]
2254 2259 section = "ui"
2255 2260 name = "clonebundlefallback"
2256 2261 default = false
2257 2262
2258 2263 [[items]]
2259 2264 section = "ui"
2260 2265 name = "clonebundleprefers"
2261 2266 default-type = "list_type"
2262 2267
2263 2268 [[items]]
2264 2269 section = "ui"
2265 2270 name = "clonebundles"
2266 2271 default = true
2267 2272
2268 2273 [[items]]
2269 2274 section = "ui"
2270 2275 name = "color"
2271 2276 default = "auto"
2272 2277
2273 2278 [[items]]
2274 2279 section = "ui"
2275 2280 name = "commitsubrepos"
2276 2281 default = false
2277 2282
2278 2283 [[items]]
2279 2284 section = "ui"
2280 2285 name = "debug"
2281 2286 default = false
2282 2287
2283 2288 [[items]]
2284 2289 section = "ui"
2285 2290 name = "debugger"
2286 2291
2287 2292 [[items]]
2288 2293 section = "ui"
2289 2294 name = "detailed-exit-code"
2290 2295 default = false
2291 2296 experimental = true
2292 2297
2293 2298 [[items]]
2294 2299 section = "ui"
2295 2300 name = "editor"
2296 2301 default-type = "dynamic"
2297 2302
2298 2303 [[items]]
2299 2304 section = "ui"
2300 2305 name = "fallbackencoding"
2301 2306
2302 2307 [[items]]
2303 2308 section = "ui"
2304 2309 name = "forcecwd"
2305 2310
2306 2311 [[items]]
2307 2312 section = "ui"
2308 2313 name = "forcemerge"
2309 2314
2310 2315 [[items]]
2311 2316 section = "ui"
2312 2317 name = "formatdebug"
2313 2318 default = false
2314 2319
2315 2320 [[items]]
2316 2321 section = "ui"
2317 2322 name = "formatjson"
2318 2323 default = false
2319 2324
2320 2325 [[items]]
2321 2326 section = "ui"
2322 2327 name = "formatted"
2323 2328
2324 2329 [[items]]
2325 2330 section = "ui"
2326 2331 name = "interactive"
2327 2332
2328 2333 [[items]]
2329 2334 section = "ui"
2330 2335 name = "interface"
2331 2336
2332 2337 [[items]]
2333 2338 section = "ui"
2334 2339 name = "interface.chunkselector"
2335 2340
2336 2341 [[items]]
2337 2342 section = "ui"
2338 2343 name = "large-file-limit"
2339 2344 default = 10485760
2340 2345
2341 2346 [[items]]
2342 2347 section = "ui"
2343 2348 name = "logblockedtimes"
2344 2349 default = false
2345 2350
2346 2351 [[items]]
2347 2352 section = "ui"
2348 2353 name = "merge"
2349 2354
2350 2355 [[items]]
2351 2356 section = "ui"
2352 2357 name = "mergemarkers"
2353 2358 default = "basic"
2354 2359
2355 2360 [[items]]
2356 2361 section = "ui"
2357 2362 name = "message-output"
2358 2363 default = "stdio"
2359 2364
2360 2365 [[items]]
2361 2366 section = "ui"
2362 2367 name = "nontty"
2363 2368 default = false
2364 2369
2365 2370 [[items]]
2366 2371 section = "ui"
2367 2372 name = "origbackuppath"
2368 2373
2369 2374 [[items]]
2370 2375 section = "ui"
2371 2376 name = "paginate"
2372 2377 default = true
2373 2378
2374 2379 [[items]]
2375 2380 section = "ui"
2376 2381 name = "patch"
2377 2382
2378 2383 [[items]]
2379 2384 section = "ui"
2380 2385 name = "portablefilenames"
2381 2386 default = "warn"
2382 2387
2383 2388 [[items]]
2384 2389 section = "ui"
2385 2390 name = "promptecho"
2386 2391 default = false
2387 2392
2388 2393 [[items]]
2389 2394 section = "ui"
2390 2395 name = "quiet"
2391 2396 default = false
2392 2397
2393 2398 [[items]]
2394 2399 section = "ui"
2395 2400 name = "quietbookmarkmove"
2396 2401 default = false
2397 2402
2398 2403 [[items]]
2399 2404 section = "ui"
2400 2405 name = "relative-paths"
2401 2406 default = "legacy"
2402 2407
2403 2408 [[items]]
2404 2409 section = "ui"
2405 2410 name = "remotecmd"
2406 2411 default = "hg"
2407 2412
2408 2413 [[items]]
2409 2414 section = "ui"
2410 2415 name = "report_untrusted"
2411 2416 default = true
2412 2417
2413 2418 [[items]]
2414 2419 section = "ui"
2415 2420 name = "rollback"
2416 2421 default = true
2417 2422
2418 2423 [[items]]
2419 2424 section = "ui"
2420 2425 name = "signal-safe-lock"
2421 2426 default = true
2422 2427
2423 2428 [[items]]
2424 2429 section = "ui"
2425 2430 name = "slash"
2426 2431 default = false
2427 2432
2428 2433 [[items]]
2429 2434 section = "ui"
2430 2435 name = "ssh"
2431 2436 default = "ssh"
2432 2437
2433 2438 [[items]]
2434 2439 section = "ui"
2435 2440 name = "ssherrorhint"
2436 2441
2437 2442 [[items]]
2438 2443 section = "ui"
2439 2444 name = "statuscopies"
2440 2445 default = false
2441 2446
2442 2447 [[items]]
2443 2448 section = "ui"
2444 2449 name = "strict"
2445 2450 default = false
2446 2451
2447 2452 [[items]]
2448 2453 section = "ui"
2449 2454 name = "style"
2450 2455 default = ""
2451 2456
2452 2457 [[items]]
2453 2458 section = "ui"
2454 2459 name = "supportcontact"
2455 2460
2456 2461 [[items]]
2457 2462 section = "ui"
2458 2463 name = "textwidth"
2459 2464 default = 78
2460 2465
2461 2466 [[items]]
2462 2467 section = "ui"
2463 2468 name = "timeout"
2464 2469 default = "600"
2465 2470
2466 2471 [[items]]
2467 2472 section = "ui"
2468 2473 name = "timeout.warn"
2469 2474 default = 0
2470 2475
2471 2476 [[items]]
2472 2477 section = "ui"
2473 2478 name = "timestamp-output"
2474 2479 default = false
2475 2480
2476 2481 [[items]]
2477 2482 section = "ui"
2478 2483 name = "traceback"
2479 2484 default = false
2480 2485
2481 2486 [[items]]
2482 2487 section = "ui"
2483 2488 name = "tweakdefaults"
2484 2489 default = false
2485 2490
2486 2491 [[items]]
2487 2492 section = "ui"
2488 2493 name = "username"
2489 2494 alias = [["ui", "user"]]
2490 2495
2491 2496 [[items]]
2492 2497 section = "ui"
2493 2498 name = "verbose"
2494 2499 default = false
2495 2500
2496 2501 [[items]]
2497 2502 section = "usage"
2498 2503 name = "repository-role"
2499 2504 default = "default"
2500 2505 documentation = """What this repository is used for.
2501 2506
2502 2507 This is used to adjust behavior and performance to best fit the repository purpose.
2503 2508
2504 2509 Currently recognised values are:
2505 2510 - default: an all purpose repository
2506 2511 """
2507 2512
2508 2513 [[items]]
2509 2514 section = "usage"
2510 2515 name = "resources"
2511 2516 default = "default"
2512 2517 documentation = """How aggressive Mercurial can be with resource usage:
2513 2518
2514 2519 Currently recognised values are:
2515 2520 - default: the default value currently is equivalent to medium,
2516 2521 - high: allows for higher cpu, memory and disk-space usage to improve the performance of some operations.
2517 2522 - medium: aims at a moderate resource usage,
2518 2523 - low: reduces resources usage when possible, decreasing overall performance.
2519 2524
2520 2525 For finer configuration, see also `usage.resources.cpu`,
2521 2526 `usage.resources.disk` and `usage.resources.memory`.
2522 2527 """
2523 2528
2524 2529 [[items]]
2525 2530 section = "usage"
2526 2531 name = "resources.cpu"
2527 2532 default = "default"
2528 2533 documentation = """How aggressive Mercurial can be in terms of cpu usage:
2529 2534
2530 2535 Currently recognised values are:
2531 2536 - default: the default value, inherits the value from `usage.resources`,
2532 2537 - high: allows for more aggressive cpu usage, improving storage quality and
2533 2538 the performance of some operations at the expense of machine load
2534 2539 - medium: aims at a moderate cpu usage,
2535 2540 - low: reduces cpu usage when possible, potentially at the expense of
2536 2541 slower operations, increased storage and exchange payload.
2537 2542
2538 2543 """
2539 2544
2540 2545 [[items]]
2541 2546 section = "usage"
2542 2547 name = "resources.disk"
2543 2548 default = "default"
2544 2549 documentation = """How aggressive Mercurial can be in terms of disk usage:
2545 2550
2546 2551 Currently recognised values are:
2547 2552 - default: the default value, inherits the value from `usage.resources`,
2548 2553 - high: allows for more disk space usage where it can improve the performance,
2549 2554 - medium: aims at a moderate disk usage,
2550 2555 - low: reduces disk usage when possible, decreasing performance in some occasion.
2551 2556 """
2552 2557
2553 2558 [[items]]
2554 2559 section = "usage"
2555 2560 name = "resources.memory"
2556 2561 default = "default"
2557 2562 documentation = """How aggressive Mercurial can be in terms of memory usage:
2558 2563
2559 2564 Currently recognised values are:
2560 2565 - default: the default value, inherits the value from `usage.resources`,
2561 2566 - high: allows for more aggressive memory usage to improve overall performance,
2562 2567 - medium: aims at a moderate memory usage,
2563 2568 - low: reduces memory usage when possible at the cost of overall performance.
2564 2569 """
2565 2570
2566 2571 [[items]]
2567 2572 section = "verify"
2568 2573 name = "skipflags"
2569 2574 default = 0
2570 2575
2571 2576 [[items]]
2572 2577 section = "web"
2573 2578 name = "accesslog"
2574 2579 default = "-"
2575 2580
2576 2581 [[items]]
2577 2582 section = "web"
2578 2583 name = "address"
2579 2584 default = ""
2580 2585
2581 2586 [[items]]
2582 2587 section = "web"
2583 2588 name = "allow-archive"
2584 2589 default-type = "list_type"
2585 2590 alias = [["web", "allow_archive"]]
2586 2591
2587 2592 [[items]]
2588 2593 section = "web"
2589 2594 name = "allow-pull"
2590 2595 default = true
2591 2596 alias = [["web", "allowpull"]]
2592 2597
2593 2598 [[items]]
2594 2599 section = "web"
2595 2600 name = "allow-push"
2596 2601 default-type = "list_type"
2597 2602 alias = [["web", "allow_push"]]
2598 2603
2599 2604 [[items]]
2600 2605 section = "web"
2601 2606 name = "allow_read"
2602 2607 default-type = "list_type"
2603 2608
2604 2609 [[items]]
2605 2610 section = "web"
2606 2611 name = "allowbz2"
2607 2612 default = false
2608 2613
2609 2614 [[items]]
2610 2615 section = "web"
2611 2616 name = "allowgz"
2612 2617 default = false
2613 2618
2614 2619 [[items]]
2615 2620 section = "web"
2616 2621 name = "allowzip"
2617 2622 default = false
2618 2623
2619 2624 [[items]]
2620 2625 section = "web"
2621 2626 name = "archivesubrepos"
2622 2627 default = false
2623 2628
2624 2629 [[items]]
2625 2630 section = "web"
2626 2631 name = "baseurl"
2627 2632
2628 2633 [[items]]
2629 2634 section = "web"
2630 2635 name = "cacerts"
2631 2636
2632 2637 [[items]]
2633 2638 section = "web"
2634 2639 name = "cache"
2635 2640 default = true
2636 2641
2637 2642 [[items]]
2638 2643 section = "web"
2639 2644 name = "certificate"
2640 2645
2641 2646 [[items]]
2642 2647 section = "web"
2643 2648 name = "collapse"
2644 2649 default = false
2645 2650
2646 2651 [[items]]
2647 2652 section = "web"
2648 2653 name = "comparisoncontext"
2649 2654 default = 5
2650 2655
2651 2656 [[items]]
2652 2657 section = "web"
2653 2658 name = "contact"
2654 2659
2655 2660 [[items]]
2656 2661 section = "web"
2657 2662 name = "csp"
2658 2663
2659 2664 [[items]]
2660 2665 section = "web"
2661 2666 name = "deny_push"
2662 2667 default-type = "list_type"
2663 2668
2664 2669 [[items]]
2665 2670 section = "web"
2666 2671 name = "deny_read"
2667 2672 default-type = "list_type"
2668 2673
2669 2674 [[items]]
2670 2675 section = "web"
2671 2676 name = "descend"
2672 2677 default = true
2673 2678
2674 2679 [[items]]
2675 2680 section = "web"
2676 2681 name = "description"
2677 2682 default = ""
2678 2683
2679 2684 [[items]]
2680 2685 section = "web"
2681 2686 name = "encoding"
2682 2687 default-type = "lazy_module"
2683 2688 default = "encoding.encoding"
2684 2689
2685 2690 [[items]]
2686 2691 section = "web"
2687 2692 name = "errorlog"
2688 2693 default = "-"
2689 2694
2690 2695 [[items]]
2691 2696 section = "web"
2692 2697 name = "guessmime"
2693 2698 default = false
2694 2699
2695 2700 [[items]]
2696 2701 section = "web"
2697 2702 name = "hidden"
2698 2703 default = false
2699 2704
2700 2705 [[items]]
2701 2706 section = "web"
2702 2707 name = "ipv6"
2703 2708 default = false
2704 2709
2705 2710 [[items]]
2706 2711 section = "web"
2707 2712 name = "labels"
2708 2713 default-type = "list_type"
2709 2714
2710 2715 [[items]]
2711 2716 section = "web"
2712 2717 name = "logoimg"
2713 2718 default = "hglogo.png"
2714 2719
2715 2720 [[items]]
2716 2721 section = "web"
2717 2722 name = "logourl"
2718 2723 default = "https://mercurial-scm.org/"
2719 2724
2720 2725 [[items]]
2721 2726 section = "web"
2722 2727 name = "maxchanges"
2723 2728 default = 10
2724 2729
2725 2730 [[items]]
2726 2731 section = "web"
2727 2732 name = "maxfiles"
2728 2733 default = 10
2729 2734
2730 2735 [[items]]
2731 2736 section = "web"
2732 2737 name = "maxshortchanges"
2733 2738 default = 60
2734 2739
2735 2740 [[items]]
2736 2741 section = "web"
2737 2742 name = "motd"
2738 2743 default = ""
2739 2744
2740 2745 [[items]]
2741 2746 section = "web"
2742 2747 name = "name"
2743 2748 default-type = "dynamic"
2744 2749
2745 2750 [[items]]
2746 2751 section = "web"
2747 2752 name = "port"
2748 2753 default = 8000
2749 2754
2750 2755 [[items]]
2751 2756 section = "web"
2752 2757 name = "prefix"
2753 2758 default = ""
2754 2759
2755 2760 [[items]]
2756 2761 section = "web"
2757 2762 name = "push_ssl"
2758 2763 default = true
2759 2764
2760 2765 [[items]]
2761 2766 section = "web"
2762 2767 name = "refreshinterval"
2763 2768 default = 20
2764 2769
2765 2770 [[items]]
2766 2771 section = "web"
2767 2772 name = "server-header"
2768 2773
2769 2774 [[items]]
2770 2775 section = "web"
2771 2776 name = "static"
2772 2777
2773 2778 [[items]]
2774 2779 section = "web"
2775 2780 name = "staticurl"
2776 2781
2777 2782 [[items]]
2778 2783 section = "web"
2779 2784 name = "stripes"
2780 2785 default = 1
2781 2786
2782 2787 [[items]]
2783 2788 section = "web"
2784 2789 name = "style"
2785 2790 default = "paper"
2786 2791
2787 2792 [[items]]
2788 2793 section = "web"
2789 2794 name = "templates"
2790 2795
2791 2796 [[items]]
2792 2797 section = "web"
2793 2798 name = "view"
2794 2799 default = "served"
2795 2800 experimental = true
2796 2801
2797 2802 [[items]]
2798 2803 section = "worker"
2799 2804 name = "backgroundclose"
2800 2805 default-type = "dynamic"
2801 2806
2802 2807 [[items]]
2803 2808 section = "worker"
2804 2809 name = "backgroundclosemaxqueue"
2805 2810 # Windows defaults to a limit of 512 open files. A buffer of 128
2806 2811 # should give us enough headway.
2807 2812 default = 384
2808 2813
2809 2814 [[items]]
2810 2815 section = "worker"
2811 2816 name = "backgroundcloseminfilecount"
2812 2817 default = 2048
2813 2818
2814 2819 [[items]]
2815 2820 section = "worker"
2816 2821 name = "backgroundclosethreadcount"
2817 2822 default = 4
2818 2823
2819 2824 [[items]]
2820 2825 section = "worker"
2821 2826 name = "enabled"
2822 2827 default = true
2823 2828
2824 2829 [[items]]
2825 2830 section = "worker"
2826 2831 name = "numcpus"
2827 2832
2828 2833 # Templates and template applications
2829 2834
2830 2835 [[template-applications]]
2831 2836 template = "diff-options"
2832 2837 section = "annotate"
2833 2838
2834 2839 [[template-applications]]
2835 2840 template = "diff-options"
2836 2841 section = "commands"
2837 2842 prefix = "commit.interactive"
2838 2843
2839 2844 [[template-applications]]
2840 2845 template = "diff-options"
2841 2846 section = "commands"
2842 2847 prefix = "revert.interactive"
2843 2848
2844 2849 [[template-applications]]
2845 2850 template = "diff-options"
2846 2851 section = "diff"
2847 2852
2848 2853 [templates]
2849 2854 [[templates.diff-options]]
2850 2855 suffix = "nodates"
2851 2856 default = false
2852 2857
2853 2858 [[templates.diff-options]]
2854 2859 suffix = "showfunc"
2855 2860 default = false
2856 2861
2857 2862 [[templates.diff-options]]
2858 2863 suffix = "unified"
2859 2864
2860 2865 [[templates.diff-options]]
2861 2866 suffix = "git"
2862 2867 default = false
2863 2868
2864 2869 [[templates.diff-options]]
2865 2870 suffix = "ignorews"
2866 2871 default = false
2867 2872
2868 2873 [[templates.diff-options]]
2869 2874 suffix = "ignorewsamount"
2870 2875 default = false
2871 2876
2872 2877 [[templates.diff-options]]
2873 2878 suffix = "ignoreblanklines"
2874 2879 default = false
2875 2880
2876 2881 [[templates.diff-options]]
2877 2882 suffix = "ignorewseol"
2878 2883 default = false
2879 2884
2880 2885 [[templates.diff-options]]
2881 2886 suffix = "nobinary"
2882 2887 default = false
2883 2888
2884 2889 [[templates.diff-options]]
2885 2890 suffix = "noprefix"
2886 2891 default = false
2887 2892
2888 2893 [[templates.diff-options]]
2889 2894 suffix = "word-diff"
2890 2895 default = false
2891 2896
2892 2897 # In-core extensions
2893 2898
2894 2899 [[items]]
2895 2900 section = "blackbox"
2896 2901 name = "debug.to-stderr"
2897 2902 default = false
2898 2903 in_core_extension = "blackbox"
2899 2904
2900 2905 [[items]]
2901 2906 section = "blackbox"
2902 2907 name = "dirty"
2903 2908 default = false
2904 2909 in_core_extension = "blackbox"
2905 2910
2906 2911 [[items]]
2907 2912 section = "blackbox"
2908 2913 name = "maxsize"
2909 2914 default = "1 MB"
2910 2915 in_core_extension = "blackbox"
2911 2916
2912 2917 [[items]]
2913 2918 section = "blackbox"
2914 2919 name = "logsource"
2915 2920 default = false
2916 2921 in_core_extension = "blackbox"
2917 2922
2918 2923 [[items]]
2919 2924 section = "blackbox"
2920 2925 name = "maxfiles"
2921 2926 default = 7
2922 2927 in_core_extension = "blackbox"
2923 2928
2924 2929 [[items]]
2925 2930 section = "blackbox"
2926 2931 name = "track"
2927 2932 default-type = "lambda"
2928 2933 default = ["*"]
2929 2934 in_core_extension = "blackbox"
2930 2935
2931 2936 [[items]]
2932 2937 section = "blackbox"
2933 2938 name = "ignore"
2934 2939 default-type = "lambda"
2935 2940 default = ["chgserver", "cmdserver", "extension"]
2936 2941 in_core_extension = "blackbox"
2937 2942
2938 2943 [[items]]
2939 2944 section = "blackbox"
2940 2945 name = "date-format"
2941 2946 default = ""
2942 2947 in_core_extension = "blackbox"
@@ -1,4034 +1,4038 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from . import (
32 32 bookmarks,
33 33 branchmap,
34 34 bundle2,
35 35 bundlecaches,
36 36 changegroup,
37 37 color,
38 38 commit,
39 39 context,
40 40 dirstate,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 policy,
58 58 pushkey,
59 59 pycompat,
60 60 rcutil,
61 61 repoview,
62 62 requirements as requirementsmod,
63 63 revlog,
64 64 revset,
65 65 revsetlang,
66 66 scmutil,
67 67 sparse,
68 68 store as storemod,
69 69 subrepoutil,
70 70 tags as tagsmod,
71 71 transaction,
72 72 txnutil,
73 73 util,
74 74 vfs as vfsmod,
75 75 wireprototypes,
76 76 )
77 77
78 78 from .interfaces import (
79 79 repository,
80 80 util as interfaceutil,
81 81 )
82 82
83 83 from .utils import (
84 84 hashutil,
85 85 procutil,
86 86 stringutil,
87 87 urlutil,
88 88 )
89 89
90 90 from .revlogutils import (
91 91 concurrency_checker as revlogchecker,
92 92 constants as revlogconst,
93 93 sidedata as sidedatamod,
94 94 )
95 95
96 96 release = lockmod.release
97 97 urlerr = util.urlerr
98 98 urlreq = util.urlreq
99 99
100 100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
101 101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
102 102 )
103 103
104 104 # set of (path, vfs-location) tuples. vfs-location is:
105 105 # - 'plain for vfs relative paths
106 106 # - '' for svfs relative paths
107 107 _cachedfiles = set()
108 108
109 109
110 110 class _basefilecache(scmutil.filecache):
111 111 """All filecache usage on repo are done for logic that should be unfiltered"""
112 112
113 113 def __get__(self, repo, type=None):
114 114 if repo is None:
115 115 return self
116 116 # proxy to unfiltered __dict__ since filtered repo has no entry
117 117 unfi = repo.unfiltered()
118 118 try:
119 119 return unfi.__dict__[self.sname]
120 120 except KeyError:
121 121 pass
122 122 return super(_basefilecache, self).__get__(unfi, type)
123 123
124 124 def set(self, repo, value):
125 125 return super(_basefilecache, self).set(repo.unfiltered(), value)
126 126
127 127
128 128 class repofilecache(_basefilecache):
129 129 """filecache for files in .hg but outside of .hg/store"""
130 130
131 131 def __init__(self, *paths):
132 132 super(repofilecache, self).__init__(*paths)
133 133 for path in paths:
134 134 _cachedfiles.add((path, b'plain'))
135 135
136 136 def join(self, obj, fname):
137 137 return obj.vfs.join(fname)
138 138
139 139
140 140 class storecache(_basefilecache):
141 141 """filecache for files in the store"""
142 142
143 143 def __init__(self, *paths):
144 144 super(storecache, self).__init__(*paths)
145 145 for path in paths:
146 146 _cachedfiles.add((path, b''))
147 147
148 148 def join(self, obj, fname):
149 149 return obj.sjoin(fname)
150 150
151 151
152 152 class changelogcache(storecache):
153 153 """filecache for the changelog"""
154 154
155 155 def __init__(self):
156 156 super(changelogcache, self).__init__()
157 157 _cachedfiles.add((b'00changelog.i', b''))
158 158 _cachedfiles.add((b'00changelog.n', b''))
159 159
160 160 def tracked_paths(self, obj):
161 161 paths = [self.join(obj, b'00changelog.i')]
162 162 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 163 paths.append(self.join(obj, b'00changelog.n'))
164 164 return paths
165 165
166 166
167 167 class manifestlogcache(storecache):
168 168 """filecache for the manifestlog"""
169 169
170 170 def __init__(self):
171 171 super(manifestlogcache, self).__init__()
172 172 _cachedfiles.add((b'00manifest.i', b''))
173 173 _cachedfiles.add((b'00manifest.n', b''))
174 174
175 175 def tracked_paths(self, obj):
176 176 paths = [self.join(obj, b'00manifest.i')]
177 177 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 178 paths.append(self.join(obj, b'00manifest.n'))
179 179 return paths
180 180
181 181
182 182 class mixedrepostorecache(_basefilecache):
183 183 """filecache for a mix files in .hg/store and outside"""
184 184
185 185 def __init__(self, *pathsandlocations):
186 186 # scmutil.filecache only uses the path for passing back into our
187 187 # join(), so we can safely pass a list of paths and locations
188 188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 189 _cachedfiles.update(pathsandlocations)
190 190
191 191 def join(self, obj, fnameandlocation):
192 192 fname, location = fnameandlocation
193 193 if location == b'plain':
194 194 return obj.vfs.join(fname)
195 195 else:
196 196 if location != b'':
197 197 raise error.ProgrammingError(
198 198 b'unexpected location: %s' % location
199 199 )
200 200 return obj.sjoin(fname)
201 201
202 202
203 203 def isfilecached(repo, name):
204 204 """check if a repo has already cached "name" filecache-ed property
205 205
206 206 This returns (cachedobj-or-None, iscached) tuple.
207 207 """
208 208 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 209 if not cacheentry:
210 210 return None, False
211 211 return cacheentry.obj, True
212 212
213 213
214 214 class unfilteredpropertycache(util.propertycache):
215 215 """propertycache that apply to unfiltered repo only"""
216 216
217 217 def __get__(self, repo, type=None):
218 218 unfi = repo.unfiltered()
219 219 if unfi is repo:
220 220 return super(unfilteredpropertycache, self).__get__(unfi)
221 221 return getattr(unfi, self.name)
222 222
223 223
224 224 class filteredpropertycache(util.propertycache):
225 225 """propertycache that must take filtering in account"""
226 226
227 227 def cachevalue(self, obj, value):
228 228 object.__setattr__(obj, self.name, value)
229 229
230 230
231 231 def hasunfilteredcache(repo, name):
232 232 """check if a repo has an unfilteredpropertycache value for <name>"""
233 233 return name in vars(repo.unfiltered())
234 234
235 235
236 236 def unfilteredmethod(orig):
237 237 """decorate method that always need to be run on unfiltered version"""
238 238
239 239 @functools.wraps(orig)
240 240 def wrapper(repo, *args, **kwargs):
241 241 return orig(repo.unfiltered(), *args, **kwargs)
242 242
243 243 return wrapper
244 244
245 245
246 246 moderncaps = {
247 247 b'lookup',
248 248 b'branchmap',
249 249 b'pushkey',
250 250 b'known',
251 251 b'getbundle',
252 252 b'unbundle',
253 253 }
254 254 legacycaps = moderncaps.union({b'changegroupsubset'})
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 258 class localcommandexecutor:
259 259 def __init__(self, peer):
260 260 self._peer = peer
261 261 self._sent = False
262 262 self._closed = False
263 263
264 264 def __enter__(self):
265 265 return self
266 266
267 267 def __exit__(self, exctype, excvalue, exctb):
268 268 self.close()
269 269
270 270 def callcommand(self, command, args):
271 271 if self._sent:
272 272 raise error.ProgrammingError(
273 273 b'callcommand() cannot be used after sendcommands()'
274 274 )
275 275
276 276 if self._closed:
277 277 raise error.ProgrammingError(
278 278 b'callcommand() cannot be used after close()'
279 279 )
280 280
281 281 # We don't need to support anything fancy. Just call the named
282 282 # method on the peer and return a resolved future.
283 283 fn = getattr(self._peer, pycompat.sysstr(command))
284 284
285 285 f = futures.Future()
286 286
287 287 try:
288 288 result = fn(**pycompat.strkwargs(args))
289 289 except Exception:
290 290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 291 else:
292 292 f.set_result(result)
293 293
294 294 return f
295 295
296 296 def sendcommands(self):
297 297 self._sent = True
298 298
299 299 def close(self):
300 300 self._closed = True
301 301
302 302
303 303 @interfaceutil.implementer(repository.ipeercommands)
304 304 class localpeer(repository.peer):
305 305 '''peer for a local repo; reflects only the most recent API'''
306 306
307 307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
308 308 super(localpeer, self).__init__(
309 309 repo.ui, path=path, remotehidden=remotehidden
310 310 )
311 311
312 312 if caps is None:
313 313 caps = moderncaps.copy()
314 314 if remotehidden:
315 315 self._repo = repo.filtered(b'served.hidden')
316 316 else:
317 317 self._repo = repo.filtered(b'served')
318 318 if repo._wanted_sidedata:
319 319 formatted = bundle2.format_remote_wanted_sidedata(repo)
320 320 caps.add(b'exp-wanted-sidedata=' + formatted)
321 321
322 322 self._caps = repo._restrictcapabilities(caps)
323 323
324 324 # Begin of _basepeer interface.
325 325
326 326 def url(self):
327 327 return self._repo.url()
328 328
329 329 def local(self):
330 330 return self._repo
331 331
332 332 def canpush(self):
333 333 return True
334 334
335 335 def close(self):
336 336 self._repo.close()
337 337
338 338 # End of _basepeer interface.
339 339
340 340 # Begin of _basewirecommands interface.
341 341
342 342 def branchmap(self):
343 343 return self._repo.branchmap()
344 344
345 345 def capabilities(self):
346 346 return self._caps
347 347
348 348 def get_cached_bundle_inline(self, path):
349 349 # not needed with local peer
350 350 raise NotImplementedError
351 351
352 352 def clonebundles(self):
353 353 return bundlecaches.get_manifest(self._repo)
354 354
355 355 def debugwireargs(self, one, two, three=None, four=None, five=None):
356 356 """Used to test argument passing over the wire"""
357 357 return b"%s %s %s %s %s" % (
358 358 one,
359 359 two,
360 360 pycompat.bytestr(three),
361 361 pycompat.bytestr(four),
362 362 pycompat.bytestr(five),
363 363 )
364 364
365 365 def getbundle(
366 366 self,
367 367 source,
368 368 heads=None,
369 369 common=None,
370 370 bundlecaps=None,
371 371 remote_sidedata=None,
372 372 **kwargs,
373 373 ):
374 374 chunks = exchange.getbundlechunks(
375 375 self._repo,
376 376 source,
377 377 heads=heads,
378 378 common=common,
379 379 bundlecaps=bundlecaps,
380 380 remote_sidedata=remote_sidedata,
381 381 **kwargs,
382 382 )[1]
383 383 cb = util.chunkbuffer(chunks)
384 384
385 385 if exchange.bundle2requested(bundlecaps):
386 386 # When requesting a bundle2, getbundle returns a stream to make the
387 387 # wire level function happier. We need to build a proper object
388 388 # from it in local peer.
389 389 return bundle2.getunbundler(self.ui, cb)
390 390 else:
391 391 return changegroup.getunbundler(b'01', cb, None)
392 392
393 393 def heads(self):
394 394 return self._repo.heads()
395 395
396 396 def known(self, nodes):
397 397 return self._repo.known(nodes)
398 398
399 399 def listkeys(self, namespace):
400 400 return self._repo.listkeys(namespace)
401 401
402 402 def lookup(self, key):
403 403 return self._repo.lookup(key)
404 404
405 405 def pushkey(self, namespace, key, old, new):
406 406 return self._repo.pushkey(namespace, key, old, new)
407 407
408 408 def stream_out(self):
409 409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
410 410
411 411 def unbundle(self, bundle, heads, url):
412 412 """apply a bundle on a repo
413 413
414 414 This function handles the repo locking itself."""
415 415 try:
416 416 try:
417 417 bundle = exchange.readbundle(self.ui, bundle, None)
418 418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
419 419 if hasattr(ret, 'getchunks'):
420 420 # This is a bundle20 object, turn it into an unbundler.
421 421 # This little dance should be dropped eventually when the
422 422 # API is finally improved.
423 423 stream = util.chunkbuffer(ret.getchunks())
424 424 ret = bundle2.getunbundler(self.ui, stream)
425 425 return ret
426 426 except Exception as exc:
427 427 # If the exception contains output salvaged from a bundle2
428 428 # reply, we need to make sure it is printed before continuing
429 429 # to fail. So we build a bundle2 with such output and consume
430 430 # it directly.
431 431 #
432 432 # This is not very elegant but allows a "simple" solution for
433 433 # issue4594
434 434 output = getattr(exc, '_bundle2salvagedoutput', ())
435 435 if output:
436 436 bundler = bundle2.bundle20(self._repo.ui)
437 437 for out in output:
438 438 bundler.addpart(out)
439 439 stream = util.chunkbuffer(bundler.getchunks())
440 440 b = bundle2.getunbundler(self.ui, stream)
441 441 bundle2.processbundle(self._repo, b)
442 442 raise
443 443 except error.PushRaced as exc:
444 444 raise error.ResponseError(
445 445 _(b'push failed:'), stringutil.forcebytestr(exc)
446 446 )
447 447
448 448 # End of _basewirecommands interface.
449 449
450 450 # Begin of peer interface.
451 451
452 452 def commandexecutor(self):
453 453 return localcommandexecutor(self)
454 454
455 455 # End of peer interface.
456 456
457 457
458 458 @interfaceutil.implementer(repository.ipeerlegacycommands)
459 459 class locallegacypeer(localpeer):
460 460 """peer extension which implements legacy methods too; used for tests with
461 461 restricted capabilities"""
462 462
463 463 def __init__(self, repo, path=None, remotehidden=False):
464 464 super(locallegacypeer, self).__init__(
465 465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 466 )
467 467
468 468 # Begin of baselegacywirecommands interface.
469 469
470 470 def between(self, pairs):
471 471 return self._repo.between(pairs)
472 472
473 473 def branches(self, nodes):
474 474 return self._repo.branches(nodes)
475 475
476 476 def changegroup(self, nodes, source):
477 477 outgoing = discovery.outgoing(
478 478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 479 )
480 480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481 481
482 482 def changegroupsubset(self, bases, heads, source):
483 483 outgoing = discovery.outgoing(
484 484 self._repo, missingroots=bases, ancestorsof=heads
485 485 )
486 486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487 487
488 488 # End of baselegacywirecommands interface.
489 489
490 490
491 491 # Functions receiving (ui, features) that extensions can register to impact
492 492 # the ability to load repositories with custom requirements. Only
493 493 # functions defined in loaded extensions are called.
494 494 #
495 495 # The function receives a set of requirement strings that the repository
496 496 # is capable of opening. Functions will typically add elements to the
497 497 # set to reflect that the extension knows how to handle that requirements.
498 498 featuresetupfuncs = set()
499 499
500 500
501 501 def _getsharedvfs(hgvfs, requirements):
502 502 """returns the vfs object pointing to root of shared source
503 503 repo for a shared repository
504 504
505 505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 506 requirements is a set of requirements of current repo (shared one)
507 507 """
508 508 # The ``shared`` or ``relshared`` requirements indicate the
509 509 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 510 # This is an absolute path for ``shared`` and relative to
511 511 # ``.hg/`` for ``relshared``.
512 512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 514 sharedpath = util.normpath(hgvfs.join(sharedpath))
515 515
516 516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 517
518 518 if not sharedvfs.exists():
519 519 raise error.RepoError(
520 520 _(b'.hg/sharedpath points to nonexistent directory %s')
521 521 % sharedvfs.base
522 522 )
523 523 return sharedvfs
524 524
525 525
526 526 def _readrequires(vfs, allowmissing):
527 527 """reads the require file present at root of this vfs
528 528 and return a set of requirements
529 529
530 530 If allowmissing is True, we suppress FileNotFoundError if raised"""
531 531 # requires file contains a newline-delimited list of
532 532 # features/capabilities the opener (us) must have in order to use
533 533 # the repository. This file was introduced in Mercurial 0.9.2,
534 534 # which means very old repositories may not have one. We assume
535 535 # a missing file translates to no requirements.
536 536 read = vfs.tryread if allowmissing else vfs.read
537 537 return set(read(b'requires').splitlines())
538 538
539 539
540 540 def makelocalrepository(baseui, path: bytes, intents=None):
541 541 """Create a local repository object.
542 542
543 543 Given arguments needed to construct a local repository, this function
544 544 performs various early repository loading functionality (such as
545 545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
546 546 the repository can be opened, derives a type suitable for representing
547 547 that repository, and returns an instance of it.
548 548
549 549 The returned object conforms to the ``repository.completelocalrepository``
550 550 interface.
551 551
552 552 The repository type is derived by calling a series of factory functions
553 553 for each aspect/interface of the final repository. These are defined by
554 554 ``REPO_INTERFACES``.
555 555
556 556 Each factory function is called to produce a type implementing a specific
557 557 interface. The cumulative list of returned types will be combined into a
558 558 new type and that type will be instantiated to represent the local
559 559 repository.
560 560
561 561 The factory functions each receive various state that may be consulted
562 562 as part of deriving a type.
563 563
564 564 Extensions should wrap these factory functions to customize repository type
565 565 creation. Note that an extension's wrapped function may be called even if
566 566 that extension is not loaded for the repo being constructed. Extensions
567 567 should check if their ``__name__`` appears in the
568 568 ``extensionmodulenames`` set passed to the factory function and no-op if
569 569 not.
570 570 """
571 571 ui = baseui.copy()
572 572 # Prevent copying repo configuration.
573 573 ui.copy = baseui.copy
574 574
575 575 # Working directory VFS rooted at repository root.
576 576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
577 577
578 578 # Main VFS for .hg/ directory.
579 579 hgpath = wdirvfs.join(b'.hg')
580 580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
581 581 # Whether this repository is shared one or not
582 582 shared = False
583 583 # If this repository is shared, vfs pointing to shared repo
584 584 sharedvfs = None
585 585
586 586 # The .hg/ path should exist and should be a directory. All other
587 587 # cases are errors.
588 588 if not hgvfs.isdir():
589 589 try:
590 590 hgvfs.stat()
591 591 except FileNotFoundError:
592 592 pass
593 593 except ValueError as e:
594 594 # Can be raised on Python 3.8 when path is invalid.
595 595 raise error.Abort(
596 596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
597 597 )
598 598
599 599 raise error.RepoError(_(b'repository %s not found') % path)
600 600
601 601 requirements = _readrequires(hgvfs, True)
602 602 shared = (
603 603 requirementsmod.SHARED_REQUIREMENT in requirements
604 604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
605 605 )
606 606 storevfs = None
607 607 if shared:
608 608 # This is a shared repo
609 609 sharedvfs = _getsharedvfs(hgvfs, requirements)
610 610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
611 611 else:
612 612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
613 613
614 614 # if .hg/requires contains the sharesafe requirement, it means
615 615 # there exists a `.hg/store/requires` too and we should read it
616 616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
617 617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
618 618 # is not present, refer checkrequirementscompat() for that
619 619 #
620 620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
621 621 # repository was shared the old way. We check the share source .hg/requires
622 622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
623 623 # to be reshared
624 624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
625 625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
626 626 if (
627 627 shared
628 628 and requirementsmod.SHARESAFE_REQUIREMENT
629 629 not in _readrequires(sharedvfs, True)
630 630 ):
631 631 mismatch_warn = ui.configbool(
632 632 b'share', b'safe-mismatch.source-not-safe.warn'
633 633 )
634 634 mismatch_config = ui.config(
635 635 b'share', b'safe-mismatch.source-not-safe'
636 636 )
637 637 mismatch_verbose_upgrade = ui.configbool(
638 638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
639 639 )
640 640 if mismatch_config in (
641 641 b'downgrade-allow',
642 642 b'allow',
643 643 b'downgrade-abort',
644 644 ):
645 645 # prevent cyclic import localrepo -> upgrade -> localrepo
646 646 from . import upgrade
647 647
648 648 upgrade.downgrade_share_to_non_safe(
649 649 ui,
650 650 hgvfs,
651 651 sharedvfs,
652 652 requirements,
653 653 mismatch_config,
654 654 mismatch_warn,
655 655 mismatch_verbose_upgrade,
656 656 )
657 657 elif mismatch_config == b'abort':
658 658 raise error.Abort(
659 659 _(b"share source does not support share-safe requirement"),
660 660 hint=hint,
661 661 )
662 662 else:
663 663 raise error.Abort(
664 664 _(
665 665 b"share-safe mismatch with source.\nUnrecognized"
666 666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
667 667 b" set."
668 668 )
669 669 % mismatch_config,
670 670 hint=hint,
671 671 )
672 672 else:
673 673 requirements |= _readrequires(storevfs, False)
674 674 elif shared:
675 675 sourcerequires = _readrequires(sharedvfs, False)
676 676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
677 677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
678 678 mismatch_warn = ui.configbool(
679 679 b'share', b'safe-mismatch.source-safe.warn'
680 680 )
681 681 mismatch_verbose_upgrade = ui.configbool(
682 682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
683 683 )
684 684 if mismatch_config in (
685 685 b'upgrade-allow',
686 686 b'allow',
687 687 b'upgrade-abort',
688 688 ):
689 689 # prevent cyclic import localrepo -> upgrade -> localrepo
690 690 from . import upgrade
691 691
692 692 upgrade.upgrade_share_to_safe(
693 693 ui,
694 694 hgvfs,
695 695 storevfs,
696 696 requirements,
697 697 mismatch_config,
698 698 mismatch_warn,
699 699 mismatch_verbose_upgrade,
700 700 )
701 701 elif mismatch_config == b'abort':
702 702 raise error.Abort(
703 703 _(
704 704 b'version mismatch: source uses share-safe'
705 705 b' functionality while the current share does not'
706 706 ),
707 707 hint=hint,
708 708 )
709 709 else:
710 710 raise error.Abort(
711 711 _(
712 712 b"share-safe mismatch with source.\nUnrecognized"
713 713 b" value '%s' of `share.safe-mismatch.source-safe` set."
714 714 )
715 715 % mismatch_config,
716 716 hint=hint,
717 717 )
718 718
719 719 # The .hg/hgrc file may load extensions or contain config options
720 720 # that influence repository construction. Attempt to load it and
721 721 # process any new extensions that it may have pulled in.
722 722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
723 723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
724 724 extensions.loadall(ui)
725 725 extensions.populateui(ui)
726 726
727 727 # Set of module names of extensions loaded for this repository.
728 728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
729 729
730 730 supportedrequirements = gathersupportedrequirements(ui)
731 731
732 732 # We first validate the requirements are known.
733 733 ensurerequirementsrecognized(requirements, supportedrequirements)
734 734
735 735 # Then we validate that the known set is reasonable to use together.
736 736 ensurerequirementscompatible(ui, requirements)
737 737
738 738 # TODO there are unhandled edge cases related to opening repositories with
739 739 # shared storage. If storage is shared, we should also test for requirements
740 740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
741 741 # that repo, as that repo may load extensions needed to open it. This is a
742 742 # bit complicated because we don't want the other hgrc to overwrite settings
743 743 # in this hgrc.
744 744 #
745 745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
746 746 # file when sharing repos. But if a requirement is added after the share is
747 747 # performed, thereby introducing a new requirement for the opener, we may
748 748 # will not see that and could encounter a run-time error interacting with
749 749 # that shared store since it has an unknown-to-us requirement.
750 750
751 751 # At this point, we know we should be capable of opening the repository.
752 752 # Now get on with doing that.
753 753
754 754 features = set()
755 755
756 756 # The "store" part of the repository holds versioned data. How it is
757 757 # accessed is determined by various requirements. If `shared` or
758 758 # `relshared` requirements are present, this indicates current repository
759 759 # is a share and store exists in path mentioned in `.hg/sharedpath`
760 760 if shared:
761 761 storebasepath = sharedvfs.base
762 762 cachepath = sharedvfs.join(b'cache')
763 763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
764 764 else:
765 765 storebasepath = hgvfs.base
766 766 cachepath = hgvfs.join(b'cache')
767 767 wcachepath = hgvfs.join(b'wcache')
768 768
769 769 # The store has changed over time and the exact layout is dictated by
770 770 # requirements. The store interface abstracts differences across all
771 771 # of them.
772 772 store = makestore(
773 773 requirements,
774 774 storebasepath,
775 775 lambda base: vfsmod.vfs(base, cacheaudited=True),
776 776 )
777 777 hgvfs.createmode = store.createmode
778 778
779 779 storevfs = store.vfs
780 780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
781 781
782 782 if (
783 783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
784 784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
785 785 ):
786 786 features.add(repository.REPO_FEATURE_SIDE_DATA)
787 787 # the revlogv2 docket introduced race condition that we need to fix
788 788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
789 789
790 790 # The cache vfs is used to manage cache files.
791 791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
792 792 cachevfs.createmode = store.createmode
793 793 # The cache vfs is used to manage cache files related to the working copy
794 794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
795 795 wcachevfs.createmode = store.createmode
796 796
797 797 # Now resolve the type for the repository object. We do this by repeatedly
798 798 # calling a factory function to produces types for specific aspects of the
799 799 # repo's operation. The aggregate returned types are used as base classes
800 800 # for a dynamically-derived type, which will represent our new repository.
801 801
802 802 bases = []
803 803 extrastate = {}
804 804
805 805 for iface, fn in REPO_INTERFACES:
806 806 # We pass all potentially useful state to give extensions tons of
807 807 # flexibility.
808 808 typ = fn()(
809 809 ui=ui,
810 810 intents=intents,
811 811 requirements=requirements,
812 812 features=features,
813 813 wdirvfs=wdirvfs,
814 814 hgvfs=hgvfs,
815 815 store=store,
816 816 storevfs=storevfs,
817 817 storeoptions=storevfs.options,
818 818 cachevfs=cachevfs,
819 819 wcachevfs=wcachevfs,
820 820 extensionmodulenames=extensionmodulenames,
821 821 extrastate=extrastate,
822 822 baseclasses=bases,
823 823 )
824 824
825 825 if not isinstance(typ, type):
826 826 raise error.ProgrammingError(
827 827 b'unable to construct type for %s' % iface
828 828 )
829 829
830 830 bases.append(typ)
831 831
832 832 # type() allows you to use characters in type names that wouldn't be
833 833 # recognized as Python symbols in source code. We abuse that to add
834 834 # rich information about our constructed repo.
835 835 name = pycompat.sysstr(
836 836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
837 837 )
838 838
839 839 cls = type(name, tuple(bases), {})
840 840
841 841 return cls(
842 842 baseui=baseui,
843 843 ui=ui,
844 844 origroot=path,
845 845 wdirvfs=wdirvfs,
846 846 hgvfs=hgvfs,
847 847 requirements=requirements,
848 848 supportedrequirements=supportedrequirements,
849 849 sharedpath=storebasepath,
850 850 store=store,
851 851 cachevfs=cachevfs,
852 852 wcachevfs=wcachevfs,
853 853 features=features,
854 854 intents=intents,
855 855 )
856 856
857 857
858 858 def loadhgrc(
859 859 ui,
860 860 wdirvfs: vfsmod.vfs,
861 861 hgvfs: vfsmod.vfs,
862 862 requirements,
863 863 sharedvfs: Optional[vfsmod.vfs] = None,
864 864 ):
865 865 """Load hgrc files/content into a ui instance.
866 866
867 867 This is called during repository opening to load any additional
868 868 config files or settings relevant to the current repository.
869 869
870 870 Returns a bool indicating whether any additional configs were loaded.
871 871
872 872 Extensions should monkeypatch this function to modify how per-repo
873 873 configs are loaded. For example, an extension may wish to pull in
874 874 configs from alternate files or sources.
875 875
876 876 sharedvfs is vfs object pointing to source repo if the current one is a
877 877 shared one
878 878 """
879 879 if not rcutil.use_repo_hgrc():
880 880 return False
881 881
882 882 ret = False
883 883 # first load config from shared source if we has to
884 884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
885 885 try:
886 886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
887 887 ret = True
888 888 except IOError:
889 889 pass
890 890
891 891 try:
892 892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
893 893 ret = True
894 894 except IOError:
895 895 pass
896 896
897 897 try:
898 898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
899 899 ret = True
900 900 except IOError:
901 901 pass
902 902
903 903 return ret
904 904
905 905
906 906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
907 907 """Perform additional actions after .hg/hgrc is loaded.
908 908
909 909 This function is called during repository loading immediately after
910 910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
911 911
912 912 The function can be used to validate configs, automatically add
913 913 options (including extensions) based on requirements, etc.
914 914 """
915 915
916 916 # Map of requirements to list of extensions to load automatically when
917 917 # requirement is present.
918 918 autoextensions = {
919 919 b'git': [b'git'],
920 920 b'largefiles': [b'largefiles'],
921 921 b'lfs': [b'lfs'],
922 922 }
923 923
924 924 for requirement, names in sorted(autoextensions.items()):
925 925 if requirement not in requirements:
926 926 continue
927 927
928 928 for name in names:
929 929 if not ui.hasconfig(b'extensions', name):
930 930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
931 931
932 932
933 933 def gathersupportedrequirements(ui):
934 934 """Determine the complete set of recognized requirements."""
935 935 # Start with all requirements supported by this file.
936 936 supported = set(localrepository._basesupported)
937 937
938 938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
939 939 # relevant to this ui instance.
940 940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
941 941
942 942 for fn in featuresetupfuncs:
943 943 if fn.__module__ in modules:
944 944 fn(ui, supported)
945 945
946 946 # Add derived requirements from registered compression engines.
947 947 for name in util.compengines:
948 948 engine = util.compengines[name]
949 949 if engine.available() and engine.revlogheader():
950 950 supported.add(b'exp-compression-%s' % name)
951 951 if engine.name() == b'zstd':
952 952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
953 953
954 954 return supported
955 955
956 956
957 957 def ensurerequirementsrecognized(requirements, supported):
958 958 """Validate that a set of local requirements is recognized.
959 959
960 960 Receives a set of requirements. Raises an ``error.RepoError`` if there
961 961 exists any requirement in that set that currently loaded code doesn't
962 962 recognize.
963 963
964 964 Returns a set of supported requirements.
965 965 """
966 966 missing = set()
967 967
968 968 for requirement in requirements:
969 969 if requirement in supported:
970 970 continue
971 971
972 972 if not requirement or not requirement[0:1].isalnum():
973 973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
974 974
975 975 missing.add(requirement)
976 976
977 977 if missing:
978 978 raise error.RequirementError(
979 979 _(b'repository requires features unknown to this Mercurial: %s')
980 980 % b' '.join(sorted(missing)),
981 981 hint=_(
982 982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
983 983 b'for more information'
984 984 ),
985 985 )
986 986
987 987
988 988 def ensurerequirementscompatible(ui, requirements):
989 989 """Validates that a set of recognized requirements is mutually compatible.
990 990
991 991 Some requirements may not be compatible with others or require
992 992 config options that aren't enabled. This function is called during
993 993 repository opening to ensure that the set of requirements needed
994 994 to open a repository is sane and compatible with config options.
995 995
996 996 Extensions can monkeypatch this function to perform additional
997 997 checking.
998 998
999 999 ``error.RepoError`` should be raised on failure.
1000 1000 """
1001 1001 if (
1002 1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1003 1003 and not sparse.enabled
1004 1004 ):
1005 1005 raise error.RepoError(
1006 1006 _(
1007 1007 b'repository is using sparse feature but '
1008 1008 b'sparse is not enabled; enable the '
1009 1009 b'"sparse" extensions to access'
1010 1010 )
1011 1011 )
1012 1012
1013 1013
1014 1014 def makestore(requirements, path, vfstype):
1015 1015 """Construct a storage object for a repository."""
1016 1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1017 1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1018 1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1019 1019 return storemod.fncachestore(path, vfstype, dotencode)
1020 1020
1021 1021 return storemod.encodedstore(path, vfstype)
1022 1022
1023 1023 return storemod.basicstore(path, vfstype)
1024 1024
1025 1025
1026 1026 def resolvestorevfsoptions(ui, requirements, features):
1027 1027 """Resolve the options to pass to the store vfs opener.
1028 1028
1029 1029 The returned dict is used to influence behavior of the storage layer.
1030 1030 """
1031 1031 options = {}
1032 1032
1033 1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1034 1034 options[b'treemanifest'] = True
1035 1035
1036 1036 # experimental config: format.manifestcachesize
1037 1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1038 1038 if manifestcachesize is not None:
1039 1039 options[b'manifestcachesize'] = manifestcachesize
1040 1040
1041 1041 # In the absence of another requirement superseding a revlog-related
1042 1042 # requirement, we have to assume the repo is using revlog version 0.
1043 1043 # This revlog format is super old and we don't bother trying to parse
1044 1044 # opener options for it because those options wouldn't do anything
1045 1045 # meaningful on such old repos.
1046 1046 if (
1047 1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1048 1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1049 1049 ):
1050 1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1051 1051 else: # explicitly mark repo as using revlogv0
1052 1052 options[b'revlogv0'] = True
1053 1053
1054 1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1055 1055 options[b'copies-storage'] = b'changeset-sidedata'
1056 1056 else:
1057 1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1058 1058 copiesextramode = (b'changeset-only', b'compatibility')
1059 1059 if writecopiesto in copiesextramode:
1060 1060 options[b'copies-storage'] = b'extra'
1061 1061
1062 1062 return options
1063 1063
1064 1064
1065 1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1066 1066 """Resolve opener options specific to revlogs."""
1067 1067
1068 1068 options = {}
1069 1069 options[b'flagprocessors'] = {}
1070 1070
1071 1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1072 1072 data_config = options[b'data-config'] = revlog.DataConfig()
1073 1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1074 1074
1075 1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 1076 options[b'revlogv1'] = True
1077 1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 1078 options[b'revlogv2'] = True
1079 1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 1080 options[b'changelogv2'] = True
1081 1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083 1083
1084 1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 1085 options[b'generaldelta'] = True
1086 1086
1087 1087 # experimental config: format.chunkcachesize
1088 1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 1089 if chunkcachesize is not None:
1090 1090 data_config.chunk_cache_size = chunkcachesize
1091 1091
1092 1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1093 1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1094 1094 data_config.uncompressed_cache_count = 10_000
1095 1095 data_config.uncompressed_cache_factor = 4
1096 1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1097 1097 data_config.uncompressed_cache_factor = 10
1098 1098
1099 1099 delta_config.delta_both_parents = ui.configbool(
1100 1100 b'storage', b'revlog.optimize-delta-parent-choice'
1101 1101 )
1102 1102 delta_config.candidate_group_chunk_size = ui.configint(
1103 1103 b'storage',
1104 1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1105 1105 )
1106 1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1107 1107
1108 1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1109 1109 options[b'issue6528.fix-incoming'] = issue6528
1110 1110
1111 1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1112 1112 lazydeltabase = False
1113 1113 if lazydelta:
1114 1114 lazydeltabase = ui.configbool(
1115 1115 b'storage', b'revlog.reuse-external-delta-parent'
1116 1116 )
1117 1117 if lazydeltabase is None:
1118 1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1119 1119 delta_config.lazy_delta = lazydelta
1120 1120 delta_config.lazy_delta_base = lazydeltabase
1121 1121
1122 1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1123 1123 if 0 <= chainspan:
1124 1124 delta_config.max_deltachain_span = chainspan
1125 1125
1126 1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1127 1127 if mmapindexthreshold is not None:
1128 1128 data_config.mmap_index_threshold = mmapindexthreshold
1129 1129
1130 1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1131 1131 srdensitythres = float(
1132 1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1133 1133 )
1134 1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1135 1135 data_config.with_sparse_read = withsparseread
1136 1136 data_config.sr_density_threshold = srdensitythres
1137 1137 data_config.sr_min_gap_size = srmingapsize
1138 1138
1139 1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1140 1140 delta_config.sparse_revlog = sparserevlog
1141 1141 if sparserevlog:
1142 1142 options[b'generaldelta'] = True
1143 1143 data_config.with_sparse_read = True
1144 1144
1145 1145 maxchainlen = None
1146 1146 if sparserevlog:
1147 1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1148 1148 # experimental config: format.maxchainlen
1149 1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1150 1150 if maxchainlen is not None:
1151 1151 delta_config.max_chain_len = maxchainlen
1152 1152
1153 1153 for r in requirements:
1154 1154 # we allow multiple compression engine requirement to co-exist because
1155 1155 # strickly speaking, revlog seems to support mixed compression style.
1156 1156 #
1157 1157 # The compression used for new entries will be "the last one"
1158 1158 prefix = r.startswith
1159 1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1160 1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1161 1161
1162 1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1163 1163 if zlib_level is not None:
1164 1164 if not (0 <= zlib_level <= 9):
1165 1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1166 1166 raise error.Abort(msg % zlib_level)
1167 1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1168 1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1169 1169 if zstd_level is not None:
1170 1170 if not (0 <= zstd_level <= 22):
1171 1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1172 1172 raise error.Abort(msg % zstd_level)
1173 1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1174 1174
1175 1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1176 1176 feature_config.enable_ellipsis = True
1177 1177
1178 1178 if ui.configbool(b'experimental', b'rust.index'):
1179 1179 options[b'rust.index'] = True
1180 1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1181 1181 slow_path = ui.config(
1182 1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1183 1183 )
1184 1184 if slow_path not in (b'allow', b'warn', b'abort'):
1185 1185 default = ui.config_default(
1186 1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1187 1187 )
1188 1188 msg = _(
1189 1189 b'unknown value for config '
1190 1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1191 1191 )
1192 1192 ui.warn(msg % slow_path)
1193 1193 if not ui.quiet:
1194 1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1195 1195 slow_path = default
1196 1196
1197 1197 msg = _(
1198 1198 b"accessing `persistent-nodemap` repository without associated "
1199 1199 b"fast implementation."
1200 1200 )
1201 1201 hint = _(
1202 1202 b"check `hg help config.format.use-persistent-nodemap` "
1203 1203 b"for details"
1204 1204 )
1205 1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1206 1206 if slow_path == b'warn':
1207 1207 msg = b"warning: " + msg + b'\n'
1208 1208 ui.warn(msg)
1209 1209 if not ui.quiet:
1210 1210 hint = b'(' + hint + b')\n'
1211 1211 ui.warn(hint)
1212 1212 if slow_path == b'abort':
1213 1213 raise error.Abort(msg, hint=hint)
1214 1214 options[b'persistent-nodemap'] = True
1215 1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1216 1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1217 1217 if slow_path not in (b'allow', b'warn', b'abort'):
1218 1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1219 1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1220 1220 ui.warn(msg % slow_path)
1221 1221 if not ui.quiet:
1222 1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1223 1223 slow_path = default
1224 1224
1225 1225 msg = _(
1226 1226 b"accessing `dirstate-v2` repository without associated "
1227 1227 b"fast implementation."
1228 1228 )
1229 1229 hint = _(
1230 1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1231 1231 )
1232 1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1233 1233 if slow_path == b'warn':
1234 1234 msg = b"warning: " + msg + b'\n'
1235 1235 ui.warn(msg)
1236 1236 if not ui.quiet:
1237 1237 hint = b'(' + hint + b')\n'
1238 1238 ui.warn(hint)
1239 1239 if slow_path == b'abort':
1240 1240 raise error.Abort(msg, hint=hint)
1241 1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1242 1242 options[b'persistent-nodemap.mmap'] = True
1243 1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1244 1244 options[b'devel-force-nodemap'] = True
1245 1245
1246 1246 return options
1247 1247
1248 1248
1249 1249 def makemain(**kwargs):
1250 1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1251 1251 return localrepository
1252 1252
1253 1253
1254 1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1255 1255 class revlogfilestorage:
1256 1256 """File storage when using revlogs."""
1257 1257
1258 1258 def file(self, path):
1259 1259 if path.startswith(b'/'):
1260 1260 path = path[1:]
1261 1261
1262 1262 try_split = (
1263 1263 self.currenttransaction() is not None
1264 1264 or txnutil.mayhavepending(self.root)
1265 1265 )
1266 1266
1267 1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1268 1268
1269 1269
1270 1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 1271 class revlognarrowfilestorage:
1272 1272 """File storage when using revlogs and narrow files."""
1273 1273
1274 1274 def file(self, path):
1275 1275 if path.startswith(b'/'):
1276 1276 path = path[1:]
1277 1277
1278 1278 try_split = (
1279 1279 self.currenttransaction() is not None
1280 1280 or txnutil.mayhavepending(self.root)
1281 1281 )
1282 1282 return filelog.narrowfilelog(
1283 1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1284 1284 )
1285 1285
1286 1286
1287 1287 def makefilestorage(requirements, features, **kwargs):
1288 1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1289 1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1290 1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1291 1291
1292 1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1293 1293 return revlognarrowfilestorage
1294 1294 else:
1295 1295 return revlogfilestorage
1296 1296
1297 1297
1298 1298 # List of repository interfaces and factory functions for them. Each
1299 1299 # will be called in order during ``makelocalrepository()`` to iteratively
1300 1300 # derive the final type for a local repository instance. We capture the
1301 1301 # function as a lambda so we don't hold a reference and the module-level
1302 1302 # functions can be wrapped.
1303 1303 REPO_INTERFACES = [
1304 1304 (repository.ilocalrepositorymain, lambda: makemain),
1305 1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1306 1306 ]
1307 1307
1308 1308
1309 1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1310 1310 class localrepository:
1311 1311 """Main class for representing local repositories.
1312 1312
1313 1313 All local repositories are instances of this class.
1314 1314
1315 1315 Constructed on its own, instances of this class are not usable as
1316 1316 repository objects. To obtain a usable repository object, call
1317 1317 ``hg.repository()``, ``localrepo.instance()``, or
1318 1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1319 1319 ``instance()`` adds support for creating new repositories.
1320 1320 ``hg.repository()`` adds more extension integration, including calling
1321 1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1322 1322 used.
1323 1323 """
1324 1324
1325 1325 _basesupported = {
1326 1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1327 1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1328 1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1329 1329 requirementsmod.COPIESSDC_REQUIREMENT,
1330 1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1331 1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1332 1332 requirementsmod.DOTENCODE_REQUIREMENT,
1333 1333 requirementsmod.FNCACHE_REQUIREMENT,
1334 1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1335 1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1336 1336 requirementsmod.NODEMAP_REQUIREMENT,
1337 1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1338 1338 requirementsmod.REVLOGV1_REQUIREMENT,
1339 1339 requirementsmod.REVLOGV2_REQUIREMENT,
1340 1340 requirementsmod.SHARED_REQUIREMENT,
1341 1341 requirementsmod.SHARESAFE_REQUIREMENT,
1342 1342 requirementsmod.SPARSE_REQUIREMENT,
1343 1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1344 1344 requirementsmod.STORE_REQUIREMENT,
1345 1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1346 1346 }
1347 1347
1348 1348 # list of prefix for file which can be written without 'wlock'
1349 1349 # Extensions should extend this list when needed
1350 1350 _wlockfreeprefix = {
1351 1351 # We migh consider requiring 'wlock' for the next
1352 1352 # two, but pretty much all the existing code assume
1353 1353 # wlock is not needed so we keep them excluded for
1354 1354 # now.
1355 1355 b'hgrc',
1356 1356 b'requires',
1357 1357 # XXX cache is a complicatged business someone
1358 1358 # should investigate this in depth at some point
1359 1359 b'cache/',
1360 1360 # XXX bisect was still a bit too messy at the time
1361 1361 # this changeset was introduced. Someone should fix
1362 1362 # the remainig bit and drop this line
1363 1363 b'bisect.state',
1364 1364 }
1365 1365
1366 1366 def __init__(
1367 1367 self,
1368 1368 baseui,
1369 1369 ui,
1370 1370 origroot: bytes,
1371 1371 wdirvfs: vfsmod.vfs,
1372 1372 hgvfs: vfsmod.vfs,
1373 1373 requirements,
1374 1374 supportedrequirements,
1375 1375 sharedpath: bytes,
1376 1376 store,
1377 1377 cachevfs: vfsmod.vfs,
1378 1378 wcachevfs: vfsmod.vfs,
1379 1379 features,
1380 1380 intents=None,
1381 1381 ):
1382 1382 """Create a new local repository instance.
1383 1383
1384 1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1385 1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1386 1386 object.
1387 1387
1388 1388 Arguments:
1389 1389
1390 1390 baseui
1391 1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1392 1392
1393 1393 ui
1394 1394 ``ui.ui`` instance for use by the repository.
1395 1395
1396 1396 origroot
1397 1397 ``bytes`` path to working directory root of this repository.
1398 1398
1399 1399 wdirvfs
1400 1400 ``vfs.vfs`` rooted at the working directory.
1401 1401
1402 1402 hgvfs
1403 1403 ``vfs.vfs`` rooted at .hg/
1404 1404
1405 1405 requirements
1406 1406 ``set`` of bytestrings representing repository opening requirements.
1407 1407
1408 1408 supportedrequirements
1409 1409 ``set`` of bytestrings representing repository requirements that we
1410 1410 know how to open. May be a supetset of ``requirements``.
1411 1411
1412 1412 sharedpath
1413 1413 ``bytes`` Defining path to storage base directory. Points to a
1414 1414 ``.hg/`` directory somewhere.
1415 1415
1416 1416 store
1417 1417 ``store.basicstore`` (or derived) instance providing access to
1418 1418 versioned storage.
1419 1419
1420 1420 cachevfs
1421 1421 ``vfs.vfs`` used for cache files.
1422 1422
1423 1423 wcachevfs
1424 1424 ``vfs.vfs`` used for cache files related to the working copy.
1425 1425
1426 1426 features
1427 1427 ``set`` of bytestrings defining features/capabilities of this
1428 1428 instance.
1429 1429
1430 1430 intents
1431 1431 ``set`` of system strings indicating what this repo will be used
1432 1432 for.
1433 1433 """
1434 1434 self.baseui = baseui
1435 1435 self.ui = ui
1436 1436 self.origroot = origroot
1437 1437 # vfs rooted at working directory.
1438 1438 self.wvfs = wdirvfs
1439 1439 self.root = wdirvfs.base
1440 1440 # vfs rooted at .hg/. Used to access most non-store paths.
1441 1441 self.vfs = hgvfs
1442 1442 self.path = hgvfs.base
1443 1443 self.requirements = requirements
1444 1444 self.nodeconstants = sha1nodeconstants
1445 1445 self.nullid = self.nodeconstants.nullid
1446 1446 self.supported = supportedrequirements
1447 1447 self.sharedpath = sharedpath
1448 1448 self.store = store
1449 1449 self.cachevfs = cachevfs
1450 1450 self.wcachevfs = wcachevfs
1451 1451 self.features = features
1452 1452
1453 1453 self.filtername = None
1454 1454
1455 1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 1456 b'devel', b'check-locks'
1457 1457 ):
1458 1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1459 1459 # A list of callback to shape the phase if no data were found.
1460 1460 # Callback are in the form: func(repo, roots) --> processed root.
1461 1461 # This list it to be filled by extension during repo setup
1462 1462 self._phasedefaults = []
1463 1463
1464 1464 color.setup(self.ui)
1465 1465
1466 1466 self.spath = self.store.path
1467 1467 self.svfs = self.store.vfs
1468 1468 self.sjoin = self.store.join
1469 1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1470 1470 b'devel', b'check-locks'
1471 1471 ):
1472 1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1473 1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1474 1474 else: # standard vfs
1475 1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1476 1476
1477 1477 self._dirstatevalidatewarned = False
1478 1478
1479 1479 self._branchcaches = branchmap.BranchMapCache()
1480 1480 self._revbranchcache = None
1481 1481 self._filterpats = {}
1482 1482 self._datafilters = {}
1483 1483 self._transref = self._lockref = self._wlockref = None
1484 1484
1485 1485 # A cache for various files under .hg/ that tracks file changes,
1486 1486 # (used by the filecache decorator)
1487 1487 #
1488 1488 # Maps a property name to its util.filecacheentry
1489 1489 self._filecache = {}
1490 1490
1491 1491 # hold sets of revision to be filtered
1492 1492 # should be cleared when something might have changed the filter value:
1493 1493 # - new changesets,
1494 1494 # - phase change,
1495 1495 # - new obsolescence marker,
1496 1496 # - working directory parent change,
1497 1497 # - bookmark changes
1498 1498 self.filteredrevcache = {}
1499 1499
1500 1500 self._dirstate = None
1501 1501 # post-dirstate-status hooks
1502 1502 self._postdsstatus = []
1503 1503
1504 1504 self._pending_narrow_pats = None
1505 1505 self._pending_narrow_pats_dirstate = None
1506 1506
1507 1507 # generic mapping between names and nodes
1508 1508 self.names = namespaces.namespaces()
1509 1509
1510 1510 # Key to signature value.
1511 1511 self._sparsesignaturecache = {}
1512 1512 # Signature to cached matcher instance.
1513 1513 self._sparsematchercache = {}
1514 1514
1515 1515 self._extrafilterid = repoview.extrafilter(ui)
1516 1516
1517 1517 self.filecopiesmode = None
1518 1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1519 1519 self.filecopiesmode = b'changeset-sidedata'
1520 1520
1521 1521 self._wanted_sidedata = set()
1522 1522 self._sidedata_computers = {}
1523 1523 sidedatamod.set_sidedata_spec_for_repo(self)
1524 1524
1525 1525 def _getvfsward(self, origfunc):
1526 1526 """build a ward for self.vfs"""
1527 1527 rref = weakref.ref(self)
1528 1528
1529 1529 def checkvfs(path, mode=None):
1530 1530 ret = origfunc(path, mode=mode)
1531 1531 repo = rref()
1532 1532 if (
1533 1533 repo is None
1534 1534 or not hasattr(repo, '_wlockref')
1535 1535 or not hasattr(repo, '_lockref')
1536 1536 ):
1537 1537 return
1538 1538 if mode in (None, b'r', b'rb'):
1539 1539 return
1540 1540 if path.startswith(repo.path):
1541 1541 # truncate name relative to the repository (.hg)
1542 1542 path = path[len(repo.path) + 1 :]
1543 1543 if path.startswith(b'cache/'):
1544 1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1545 1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1546 1546 # path prefixes covered by 'lock'
1547 1547 vfs_path_prefixes = (
1548 1548 b'journal.',
1549 1549 b'undo.',
1550 1550 b'strip-backup/',
1551 1551 b'cache/',
1552 1552 )
1553 1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1554 1554 if repo._currentlock(repo._lockref) is None:
1555 1555 repo.ui.develwarn(
1556 1556 b'write with no lock: "%s"' % path,
1557 1557 stacklevel=3,
1558 1558 config=b'check-locks',
1559 1559 )
1560 1560 elif repo._currentlock(repo._wlockref) is None:
1561 1561 # rest of vfs files are covered by 'wlock'
1562 1562 #
1563 1563 # exclude special files
1564 1564 for prefix in self._wlockfreeprefix:
1565 1565 if path.startswith(prefix):
1566 1566 return
1567 1567 repo.ui.develwarn(
1568 1568 b'write with no wlock: "%s"' % path,
1569 1569 stacklevel=3,
1570 1570 config=b'check-locks',
1571 1571 )
1572 1572 return ret
1573 1573
1574 1574 return checkvfs
1575 1575
1576 1576 def _getsvfsward(self, origfunc):
1577 1577 """build a ward for self.svfs"""
1578 1578 rref = weakref.ref(self)
1579 1579
1580 1580 def checksvfs(path, mode=None):
1581 1581 ret = origfunc(path, mode=mode)
1582 1582 repo = rref()
1583 1583 if repo is None or not hasattr(repo, '_lockref'):
1584 1584 return
1585 1585 if mode in (None, b'r', b'rb'):
1586 1586 return
1587 1587 if path.startswith(repo.sharedpath):
1588 1588 # truncate name relative to the repository (.hg)
1589 1589 path = path[len(repo.sharedpath) + 1 :]
1590 1590 if repo._currentlock(repo._lockref) is None:
1591 1591 repo.ui.develwarn(
1592 1592 b'write with no lock: "%s"' % path, stacklevel=4
1593 1593 )
1594 1594 return ret
1595 1595
1596 1596 return checksvfs
1597 1597
1598 1598 @property
1599 1599 def vfs_map(self):
1600 1600 return {
1601 1601 b'': self.svfs,
1602 1602 b'plain': self.vfs,
1603 1603 b'store': self.svfs,
1604 1604 }
1605 1605
1606 1606 def close(self):
1607 1607 self._writecaches()
1608 1608
1609 1609 def _writecaches(self):
1610 1610 if self._revbranchcache:
1611 1611 self._revbranchcache.write()
1612 1612
1613 1613 def _restrictcapabilities(self, caps):
1614 1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1615 1615 caps = set(caps)
1616 1616 capsblob = bundle2.encodecaps(
1617 1617 bundle2.getrepocaps(self, role=b'client')
1618 1618 )
1619 1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1620 1620 if self.ui.configbool(b'experimental', b'narrow'):
1621 1621 caps.add(wireprototypes.NARROWCAP)
1622 1622 return caps
1623 1623
1624 1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1625 1625 # self -> auditor -> self._checknested -> self
1626 1626
1627 1627 @property
1628 1628 def auditor(self):
1629 1629 # This is only used by context.workingctx.match in order to
1630 1630 # detect files in subrepos.
1631 1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1632 1632
1633 1633 @property
1634 1634 def nofsauditor(self):
1635 1635 # This is only used by context.basectx.match in order to detect
1636 1636 # files in subrepos.
1637 1637 return pathutil.pathauditor(
1638 1638 self.root, callback=self._checknested, realfs=False, cached=True
1639 1639 )
1640 1640
1641 1641 def _checknested(self, path):
1642 1642 """Determine if path is a legal nested repository."""
1643 1643 if not path.startswith(self.root):
1644 1644 return False
1645 1645 subpath = path[len(self.root) + 1 :]
1646 1646 normsubpath = util.pconvert(subpath)
1647 1647
1648 1648 # XXX: Checking against the current working copy is wrong in
1649 1649 # the sense that it can reject things like
1650 1650 #
1651 1651 # $ hg cat -r 10 sub/x.txt
1652 1652 #
1653 1653 # if sub/ is no longer a subrepository in the working copy
1654 1654 # parent revision.
1655 1655 #
1656 1656 # However, it can of course also allow things that would have
1657 1657 # been rejected before, such as the above cat command if sub/
1658 1658 # is a subrepository now, but was a normal directory before.
1659 1659 # The old path auditor would have rejected by mistake since it
1660 1660 # panics when it sees sub/.hg/.
1661 1661 #
1662 1662 # All in all, checking against the working copy seems sensible
1663 1663 # since we want to prevent access to nested repositories on
1664 1664 # the filesystem *now*.
1665 1665 ctx = self[None]
1666 1666 parts = util.splitpath(subpath)
1667 1667 while parts:
1668 1668 prefix = b'/'.join(parts)
1669 1669 if prefix in ctx.substate:
1670 1670 if prefix == normsubpath:
1671 1671 return True
1672 1672 else:
1673 1673 sub = ctx.sub(prefix)
1674 1674 return sub.checknested(subpath[len(prefix) + 1 :])
1675 1675 else:
1676 1676 parts.pop()
1677 1677 return False
1678 1678
1679 1679 def peer(self, path=None, remotehidden=False):
1680 1680 return localpeer(
1681 1681 self, path=path, remotehidden=remotehidden
1682 1682 ) # not cached to avoid reference cycle
1683 1683
1684 1684 def unfiltered(self):
1685 1685 """Return unfiltered version of the repository
1686 1686
1687 1687 Intended to be overwritten by filtered repo."""
1688 1688 return self
1689 1689
1690 1690 def filtered(self, name, visibilityexceptions=None):
1691 1691 """Return a filtered version of a repository
1692 1692
1693 1693 The `name` parameter is the identifier of the requested view. This
1694 1694 will return a repoview object set "exactly" to the specified view.
1695 1695
1696 1696 This function does not apply recursive filtering to a repository. For
1697 1697 example calling `repo.filtered("served")` will return a repoview using
1698 1698 the "served" view, regardless of the initial view used by `repo`.
1699 1699
1700 1700 In other word, there is always only one level of `repoview` "filtering".
1701 1701 """
1702 1702 if self._extrafilterid is not None and b'%' not in name:
1703 1703 name = name + b'%' + self._extrafilterid
1704 1704
1705 1705 cls = repoview.newtype(self.unfiltered().__class__)
1706 1706 return cls(self, name, visibilityexceptions)
1707 1707
1708 1708 @mixedrepostorecache(
1709 1709 (b'bookmarks', b'plain'),
1710 1710 (b'bookmarks.current', b'plain'),
1711 1711 (b'bookmarks', b''),
1712 1712 (b'00changelog.i', b''),
1713 1713 )
1714 1714 def _bookmarks(self):
1715 1715 # Since the multiple files involved in the transaction cannot be
1716 1716 # written atomically (with current repository format), there is a race
1717 1717 # condition here.
1718 1718 #
1719 1719 # 1) changelog content A is read
1720 1720 # 2) outside transaction update changelog to content B
1721 1721 # 3) outside transaction update bookmark file referring to content B
1722 1722 # 4) bookmarks file content is read and filtered against changelog-A
1723 1723 #
1724 1724 # When this happens, bookmarks against nodes missing from A are dropped.
1725 1725 #
1726 1726 # Having this happening during read is not great, but it become worse
1727 1727 # when this happen during write because the bookmarks to the "unknown"
1728 1728 # nodes will be dropped for good. However, writes happen within locks.
1729 1729 # This locking makes it possible to have a race free consistent read.
1730 1730 # For this purpose data read from disc before locking are
1731 1731 # "invalidated" right after the locks are taken. This invalidations are
1732 1732 # "light", the `filecache` mechanism keep the data in memory and will
1733 1733 # reuse them if the underlying files did not changed. Not parsing the
1734 1734 # same data multiple times helps performances.
1735 1735 #
1736 1736 # Unfortunately in the case describe above, the files tracked by the
1737 1737 # bookmarks file cache might not have changed, but the in-memory
1738 1738 # content is still "wrong" because we used an older changelog content
1739 1739 # to process the on-disk data. So after locking, the changelog would be
1740 1740 # refreshed but `_bookmarks` would be preserved.
1741 1741 # Adding `00changelog.i` to the list of tracked file is not
1742 1742 # enough, because at the time we build the content for `_bookmarks` in
1743 1743 # (4), the changelog file has already diverged from the content used
1744 1744 # for loading `changelog` in (1)
1745 1745 #
1746 1746 # To prevent the issue, we force the changelog to be explicitly
1747 1747 # reloaded while computing `_bookmarks`. The data race can still happen
1748 1748 # without the lock (with a narrower window), but it would no longer go
1749 1749 # undetected during the lock time refresh.
1750 1750 #
1751 1751 # The new schedule is as follow
1752 1752 #
1753 1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1754 1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1755 1755 # 3) We force `changelog` filecache to be tested
1756 1756 # 4) cachestat for `changelog` are captured (for changelog)
1757 1757 # 5) `_bookmarks` is computed and cached
1758 1758 #
1759 1759 # The step in (3) ensure we have a changelog at least as recent as the
1760 1760 # cache stat computed in (1). As a result at locking time:
1761 1761 # * if the changelog did not changed since (1) -> we can reuse the data
1762 1762 # * otherwise -> the bookmarks get refreshed.
1763 1763 self._refreshchangelog()
1764 1764 return bookmarks.bmstore(self)
1765 1765
1766 1766 def _refreshchangelog(self):
1767 1767 """make sure the in memory changelog match the on-disk one"""
1768 1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1769 1769 del self.changelog
1770 1770
1771 1771 @property
1772 1772 def _activebookmark(self):
1773 1773 return self._bookmarks.active
1774 1774
1775 1775 # _phasesets depend on changelog. what we need is to call
1776 1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1777 1777 # can't be easily expressed in filecache mechanism.
1778 1778 @storecache(b'phaseroots', b'00changelog.i')
1779 1779 def _phasecache(self):
1780 1780 return phases.phasecache(self, self._phasedefaults)
1781 1781
1782 1782 @storecache(b'obsstore')
1783 1783 def obsstore(self):
1784 1784 return obsolete.makestore(self.ui, self)
1785 1785
1786 1786 @changelogcache()
1787 1787 def changelog(repo):
1788 1788 # load dirstate before changelog to avoid race see issue6303
1789 1789 repo.dirstate.prefetch_parents()
1790 1790 return repo.store.changelog(
1791 1791 txnutil.mayhavepending(repo.root),
1792 1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1793 1793 )
1794 1794
1795 1795 @manifestlogcache()
1796 1796 def manifestlog(self):
1797 1797 return self.store.manifestlog(self, self._storenarrowmatch)
1798 1798
1799 1799 @unfilteredpropertycache
1800 1800 def dirstate(self):
1801 1801 if self._dirstate is None:
1802 1802 self._dirstate = self._makedirstate()
1803 1803 else:
1804 1804 self._dirstate.refresh()
1805 1805 return self._dirstate
1806 1806
1807 1807 def _makedirstate(self):
1808 1808 """Extension point for wrapping the dirstate per-repo."""
1809 1809 sparsematchfn = None
1810 1810 if sparse.use_sparse(self):
1811 1811 sparsematchfn = lambda: sparse.matcher(self)
1812 1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1813 1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1814 1814 use_dirstate_v2 = v2_req in self.requirements
1815 1815 use_tracked_hint = th in self.requirements
1816 1816
1817 1817 return dirstate.dirstate(
1818 1818 self.vfs,
1819 1819 self.ui,
1820 1820 self.root,
1821 1821 self._dirstatevalidate,
1822 1822 sparsematchfn,
1823 1823 self.nodeconstants,
1824 1824 use_dirstate_v2,
1825 1825 use_tracked_hint=use_tracked_hint,
1826 1826 )
1827 1827
1828 1828 def _dirstatevalidate(self, node):
1829 1829 okay = True
1830 1830 try:
1831 1831 self.changelog.rev(node)
1832 1832 except error.LookupError:
1833 1833 # If the parent are unknown it might just be because the changelog
1834 1834 # in memory is lagging behind the dirstate in memory. So try to
1835 1835 # refresh the changelog first.
1836 1836 #
1837 1837 # We only do so if we don't hold the lock, if we do hold the lock
1838 1838 # the invalidation at that time should have taken care of this and
1839 1839 # something is very fishy.
1840 1840 if self.currentlock() is None:
1841 1841 self.invalidate()
1842 1842 try:
1843 1843 self.changelog.rev(node)
1844 1844 except error.LookupError:
1845 1845 okay = False
1846 1846 else:
1847 1847 # XXX we should consider raising an error here.
1848 1848 okay = False
1849 1849 if okay:
1850 1850 return node
1851 1851 else:
1852 1852 if not self._dirstatevalidatewarned:
1853 1853 self._dirstatevalidatewarned = True
1854 1854 self.ui.warn(
1855 1855 _(b"warning: ignoring unknown working parent %s!\n")
1856 1856 % short(node)
1857 1857 )
1858 1858 return self.nullid
1859 1859
1860 1860 @storecache(narrowspec.FILENAME)
1861 1861 def narrowpats(self):
1862 1862 """matcher patterns for this repository's narrowspec
1863 1863
1864 1864 A tuple of (includes, excludes).
1865 1865 """
1866 1866 # the narrow management should probably move into its own object
1867 1867 val = self._pending_narrow_pats
1868 1868 if val is None:
1869 1869 val = narrowspec.load(self)
1870 1870 return val
1871 1871
1872 1872 @storecache(narrowspec.FILENAME)
1873 1873 def _storenarrowmatch(self):
1874 1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1875 1875 return matchmod.always()
1876 1876 include, exclude = self.narrowpats
1877 1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878 1878
1879 1879 @storecache(narrowspec.FILENAME)
1880 1880 def _narrowmatch(self):
1881 1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1882 1882 return matchmod.always()
1883 1883 narrowspec.checkworkingcopynarrowspec(self)
1884 1884 include, exclude = self.narrowpats
1885 1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1886 1886
1887 1887 def narrowmatch(self, match=None, includeexact=False):
1888 1888 """matcher corresponding the the repo's narrowspec
1889 1889
1890 1890 If `match` is given, then that will be intersected with the narrow
1891 1891 matcher.
1892 1892
1893 1893 If `includeexact` is True, then any exact matches from `match` will
1894 1894 be included even if they're outside the narrowspec.
1895 1895 """
1896 1896 if match:
1897 1897 if includeexact and not self._narrowmatch.always():
1898 1898 # do not exclude explicitly-specified paths so that they can
1899 1899 # be warned later on
1900 1900 em = matchmod.exact(match.files())
1901 1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1902 1902 return matchmod.intersectmatchers(match, nm)
1903 1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1904 1904 return self._narrowmatch
1905 1905
1906 1906 def setnarrowpats(self, newincludes, newexcludes):
1907 1907 narrowspec.save(self, newincludes, newexcludes)
1908 1908 self.invalidate(clearfilecache=True)
1909 1909
1910 1910 @unfilteredpropertycache
1911 1911 def _quick_access_changeid_null(self):
1912 1912 return {
1913 1913 b'null': (nullrev, self.nodeconstants.nullid),
1914 1914 nullrev: (nullrev, self.nodeconstants.nullid),
1915 1915 self.nullid: (nullrev, self.nullid),
1916 1916 }
1917 1917
1918 1918 @unfilteredpropertycache
1919 1919 def _quick_access_changeid_wc(self):
1920 1920 # also fast path access to the working copy parents
1921 1921 # however, only do it for filter that ensure wc is visible.
1922 1922 quick = self._quick_access_changeid_null.copy()
1923 1923 cl = self.unfiltered().changelog
1924 1924 for node in self.dirstate.parents():
1925 1925 if node == self.nullid:
1926 1926 continue
1927 1927 rev = cl.index.get_rev(node)
1928 1928 if rev is None:
1929 1929 # unknown working copy parent case:
1930 1930 #
1931 1931 # skip the fast path and let higher code deal with it
1932 1932 continue
1933 1933 pair = (rev, node)
1934 1934 quick[rev] = pair
1935 1935 quick[node] = pair
1936 1936 # also add the parents of the parents
1937 1937 for r in cl.parentrevs(rev):
1938 1938 if r == nullrev:
1939 1939 continue
1940 1940 n = cl.node(r)
1941 1941 pair = (r, n)
1942 1942 quick[r] = pair
1943 1943 quick[n] = pair
1944 1944 p1node = self.dirstate.p1()
1945 1945 if p1node != self.nullid:
1946 1946 quick[b'.'] = quick[p1node]
1947 1947 return quick
1948 1948
1949 1949 @unfilteredmethod
1950 1950 def _quick_access_changeid_invalidate(self):
1951 1951 if '_quick_access_changeid_wc' in vars(self):
1952 1952 del self.__dict__['_quick_access_changeid_wc']
1953 1953
1954 1954 @property
1955 1955 def _quick_access_changeid(self):
1956 1956 """an helper dictionnary for __getitem__ calls
1957 1957
1958 1958 This contains a list of symbol we can recognise right away without
1959 1959 further processing.
1960 1960 """
1961 1961 if self.filtername in repoview.filter_has_wc:
1962 1962 return self._quick_access_changeid_wc
1963 1963 return self._quick_access_changeid_null
1964 1964
1965 1965 def __getitem__(self, changeid):
1966 1966 # dealing with special cases
1967 1967 if changeid is None:
1968 1968 return context.workingctx(self)
1969 1969 if isinstance(changeid, context.basectx):
1970 1970 return changeid
1971 1971
1972 1972 # dealing with multiple revisions
1973 1973 if isinstance(changeid, slice):
1974 1974 # wdirrev isn't contiguous so the slice shouldn't include it
1975 1975 return [
1976 1976 self[i]
1977 1977 for i in range(*changeid.indices(len(self)))
1978 1978 if i not in self.changelog.filteredrevs
1979 1979 ]
1980 1980
1981 1981 # dealing with some special values
1982 1982 quick_access = self._quick_access_changeid.get(changeid)
1983 1983 if quick_access is not None:
1984 1984 rev, node = quick_access
1985 1985 return context.changectx(self, rev, node, maybe_filtered=False)
1986 1986 if changeid == b'tip':
1987 1987 node = self.changelog.tip()
1988 1988 rev = self.changelog.rev(node)
1989 1989 return context.changectx(self, rev, node)
1990 1990
1991 1991 # dealing with arbitrary values
1992 1992 try:
1993 1993 if isinstance(changeid, int):
1994 1994 node = self.changelog.node(changeid)
1995 1995 rev = changeid
1996 1996 elif changeid == b'.':
1997 1997 # this is a hack to delay/avoid loading obsmarkers
1998 1998 # when we know that '.' won't be hidden
1999 1999 node = self.dirstate.p1()
2000 2000 rev = self.unfiltered().changelog.rev(node)
2001 2001 elif len(changeid) == self.nodeconstants.nodelen:
2002 2002 try:
2003 2003 node = changeid
2004 2004 rev = self.changelog.rev(changeid)
2005 2005 except error.FilteredLookupError:
2006 2006 changeid = hex(changeid) # for the error message
2007 2007 raise
2008 2008 except LookupError:
2009 2009 # check if it might have come from damaged dirstate
2010 2010 #
2011 2011 # XXX we could avoid the unfiltered if we had a recognizable
2012 2012 # exception for filtered changeset access
2013 2013 if (
2014 2014 self.local()
2015 2015 and changeid in self.unfiltered().dirstate.parents()
2016 2016 ):
2017 2017 msg = _(b"working directory has unknown parent '%s'!")
2018 2018 raise error.Abort(msg % short(changeid))
2019 2019 changeid = hex(changeid) # for the error message
2020 2020 raise
2021 2021
2022 2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2023 2023 node = bin(changeid)
2024 2024 rev = self.changelog.rev(node)
2025 2025 else:
2026 2026 raise error.ProgrammingError(
2027 2027 b"unsupported changeid '%s' of type %s"
2028 2028 % (changeid, pycompat.bytestr(type(changeid)))
2029 2029 )
2030 2030
2031 2031 return context.changectx(self, rev, node)
2032 2032
2033 2033 except (error.FilteredIndexError, error.FilteredLookupError):
2034 2034 raise error.FilteredRepoLookupError(
2035 2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2036 2036 )
2037 2037 except (IndexError, LookupError):
2038 2038 raise error.RepoLookupError(
2039 2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2040 2040 )
2041 2041 except error.WdirUnsupported:
2042 2042 return context.workingctx(self)
2043 2043
2044 2044 def __contains__(self, changeid):
2045 2045 """True if the given changeid exists"""
2046 2046 try:
2047 2047 self[changeid]
2048 2048 return True
2049 2049 except error.RepoLookupError:
2050 2050 return False
2051 2051
2052 2052 def __nonzero__(self):
2053 2053 return True
2054 2054
2055 2055 __bool__ = __nonzero__
2056 2056
2057 2057 def __len__(self):
2058 2058 # no need to pay the cost of repoview.changelog
2059 2059 unfi = self.unfiltered()
2060 2060 return len(unfi.changelog)
2061 2061
2062 2062 def __iter__(self):
2063 2063 return iter(self.changelog)
2064 2064
2065 2065 def revs(self, expr: bytes, *args):
2066 2066 """Find revisions matching a revset.
2067 2067
2068 2068 The revset is specified as a string ``expr`` that may contain
2069 2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2070 2070
2071 2071 Revset aliases from the configuration are not expanded. To expand
2072 2072 user aliases, consider calling ``scmutil.revrange()`` or
2073 2073 ``repo.anyrevs([expr], user=True)``.
2074 2074
2075 2075 Returns a smartset.abstractsmartset, which is a list-like interface
2076 2076 that contains integer revisions.
2077 2077 """
2078 2078 tree = revsetlang.spectree(expr, *args)
2079 2079 return revset.makematcher(tree)(self)
2080 2080
2081 2081 def set(self, expr: bytes, *args):
2082 2082 """Find revisions matching a revset and emit changectx instances.
2083 2083
2084 2084 This is a convenience wrapper around ``revs()`` that iterates the
2085 2085 result and is a generator of changectx instances.
2086 2086
2087 2087 Revset aliases from the configuration are not expanded. To expand
2088 2088 user aliases, consider calling ``scmutil.revrange()``.
2089 2089 """
2090 2090 for r in self.revs(expr, *args):
2091 2091 yield self[r]
2092 2092
2093 2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2094 2094 """Find revisions matching one of the given revsets.
2095 2095
2096 2096 Revset aliases from the configuration are not expanded by default. To
2097 2097 expand user aliases, specify ``user=True``. To provide some local
2098 2098 definitions overriding user aliases, set ``localalias`` to
2099 2099 ``{name: definitionstring}``.
2100 2100 """
2101 2101 if specs == [b'null']:
2102 2102 return revset.baseset([nullrev])
2103 2103 if specs == [b'.']:
2104 2104 quick_data = self._quick_access_changeid.get(b'.')
2105 2105 if quick_data is not None:
2106 2106 return revset.baseset([quick_data[0]])
2107 2107 if user:
2108 2108 m = revset.matchany(
2109 2109 self.ui,
2110 2110 specs,
2111 2111 lookup=revset.lookupfn(self),
2112 2112 localalias=localalias,
2113 2113 )
2114 2114 else:
2115 2115 m = revset.matchany(None, specs, localalias=localalias)
2116 2116 return m(self)
2117 2117
2118 2118 def url(self) -> bytes:
2119 2119 return b'file:' + self.root
2120 2120
2121 2121 def hook(self, name, throw=False, **args):
2122 2122 """Call a hook, passing this repo instance.
2123 2123
2124 2124 This a convenience method to aid invoking hooks. Extensions likely
2125 2125 won't call this unless they have registered a custom hook or are
2126 2126 replacing code that is expected to call a hook.
2127 2127 """
2128 2128 return hook.hook(self.ui, self, name, throw, **args)
2129 2129
2130 2130 @filteredpropertycache
2131 2131 def _tagscache(self):
2132 2132 """Returns a tagscache object that contains various tags related
2133 2133 caches."""
2134 2134
2135 2135 # This simplifies its cache management by having one decorated
2136 2136 # function (this one) and the rest simply fetch things from it.
2137 2137 class tagscache:
2138 2138 def __init__(self):
2139 2139 # These two define the set of tags for this repository. tags
2140 2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2141 2141 # 'local'. (Global tags are defined by .hgtags across all
2142 2142 # heads, and local tags are defined in .hg/localtags.)
2143 2143 # They constitute the in-memory cache of tags.
2144 2144 self.tags = self.tagtypes = None
2145 2145
2146 2146 self.nodetagscache = self.tagslist = None
2147 2147
2148 2148 cache = tagscache()
2149 2149 cache.tags, cache.tagtypes = self._findtags()
2150 2150
2151 2151 return cache
2152 2152
2153 2153 def tags(self):
2154 2154 '''return a mapping of tag to node'''
2155 2155 t = {}
2156 2156 if self.changelog.filteredrevs:
2157 2157 tags, tt = self._findtags()
2158 2158 else:
2159 2159 tags = self._tagscache.tags
2160 2160 rev = self.changelog.rev
2161 2161 for k, v in tags.items():
2162 2162 try:
2163 2163 # ignore tags to unknown nodes
2164 2164 rev(v)
2165 2165 t[k] = v
2166 2166 except (error.LookupError, ValueError):
2167 2167 pass
2168 2168 return t
2169 2169
2170 2170 def _findtags(self):
2171 2171 """Do the hard work of finding tags. Return a pair of dicts
2172 2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2173 2173 maps tag name to a string like \'global\' or \'local\'.
2174 2174 Subclasses or extensions are free to add their own tags, but
2175 2175 should be aware that the returned dicts will be retained for the
2176 2176 duration of the localrepo object."""
2177 2177
2178 2178 # XXX what tagtype should subclasses/extensions use? Currently
2179 2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2180 2180 # Should each extension invent its own tag type? Should there
2181 2181 # be one tagtype for all such "virtual" tags? Or is the status
2182 2182 # quo fine?
2183 2183
2184 2184 # map tag name to (node, hist)
2185 2185 alltags = tagsmod.findglobaltags(self.ui, self)
2186 2186 # map tag name to tag type
2187 2187 tagtypes = {tag: b'global' for tag in alltags}
2188 2188
2189 2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2190 2190
2191 2191 # Build the return dicts. Have to re-encode tag names because
2192 2192 # the tags module always uses UTF-8 (in order not to lose info
2193 2193 # writing to the cache), but the rest of Mercurial wants them in
2194 2194 # local encoding.
2195 2195 tags = {}
2196 2196 for name, (node, hist) in alltags.items():
2197 2197 if node != self.nullid:
2198 2198 tags[encoding.tolocal(name)] = node
2199 2199 tags[b'tip'] = self.changelog.tip()
2200 2200 tagtypes = {
2201 2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2202 2202 }
2203 2203 return (tags, tagtypes)
2204 2204
2205 2205 def tagtype(self, tagname):
2206 2206 """
2207 2207 return the type of the given tag. result can be:
2208 2208
2209 2209 'local' : a local tag
2210 2210 'global' : a global tag
2211 2211 None : tag does not exist
2212 2212 """
2213 2213
2214 2214 return self._tagscache.tagtypes.get(tagname)
2215 2215
2216 2216 def tagslist(self):
2217 2217 '''return a list of tags ordered by revision'''
2218 2218 if not self._tagscache.tagslist:
2219 2219 l = []
2220 2220 for t, n in self.tags().items():
2221 2221 l.append((self.changelog.rev(n), t, n))
2222 2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2223 2223
2224 2224 return self._tagscache.tagslist
2225 2225
2226 2226 def nodetags(self, node):
2227 2227 '''return the tags associated with a node'''
2228 2228 if not self._tagscache.nodetagscache:
2229 2229 nodetagscache = {}
2230 2230 for t, n in self._tagscache.tags.items():
2231 2231 nodetagscache.setdefault(n, []).append(t)
2232 2232 for tags in nodetagscache.values():
2233 2233 tags.sort()
2234 2234 self._tagscache.nodetagscache = nodetagscache
2235 2235 return self._tagscache.nodetagscache.get(node, [])
2236 2236
2237 2237 def nodebookmarks(self, node):
2238 2238 """return the list of bookmarks pointing to the specified node"""
2239 2239 return self._bookmarks.names(node)
2240 2240
2241 2241 def branchmap(self):
2242 2242 """returns a dictionary {branch: [branchheads]} with branchheads
2243 2243 ordered by increasing revision number"""
2244 2244 return self._branchcaches[self]
2245 2245
2246 2246 @unfilteredmethod
2247 2247 def revbranchcache(self):
2248 2248 if not self._revbranchcache:
2249 2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2250 2250 return self._revbranchcache
2251 2251
2252 2252 def register_changeset(self, rev, changelogrevision):
2253 2253 self.revbranchcache().setdata(rev, changelogrevision)
2254 2254
2255 2255 def branchtip(self, branch, ignoremissing=False):
2256 2256 """return the tip node for a given branch
2257 2257
2258 2258 If ignoremissing is True, then this method will not raise an error.
2259 2259 This is helpful for callers that only expect None for a missing branch
2260 2260 (e.g. namespace).
2261 2261
2262 2262 """
2263 2263 try:
2264 2264 return self.branchmap().branchtip(branch)
2265 2265 except KeyError:
2266 2266 if not ignoremissing:
2267 2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2268 2268 else:
2269 2269 pass
2270 2270
2271 2271 def lookup(self, key):
2272 2272 node = scmutil.revsymbol(self, key).node()
2273 2273 if node is None:
2274 2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2275 2275 return node
2276 2276
2277 2277 def lookupbranch(self, key):
2278 2278 if self.branchmap().hasbranch(key):
2279 2279 return key
2280 2280
2281 2281 return scmutil.revsymbol(self, key).branch()
2282 2282
2283 2283 def known(self, nodes):
2284 2284 cl = self.changelog
2285 2285 get_rev = cl.index.get_rev
2286 2286 filtered = cl.filteredrevs
2287 2287 result = []
2288 2288 for n in nodes:
2289 2289 r = get_rev(n)
2290 2290 resp = not (r is None or r in filtered)
2291 2291 result.append(resp)
2292 2292 return result
2293 2293
2294 2294 def local(self):
2295 2295 return self
2296 2296
2297 2297 def publishing(self):
2298 2298 # it's safe (and desirable) to trust the publish flag unconditionally
2299 2299 # so that we don't finalize changes shared between users via ssh or nfs
2300 2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2301 2301
2302 2302 def cancopy(self):
2303 2303 # so statichttprepo's override of local() works
2304 2304 if not self.local():
2305 2305 return False
2306 2306 if not self.publishing():
2307 2307 return True
2308 2308 # if publishing we can't copy if there is filtered content
2309 2309 return not self.filtered(b'visible').changelog.filteredrevs
2310 2310
2311 2311 def shared(self):
2312 2312 '''the type of shared repository (None if not shared)'''
2313 2313 if self.sharedpath != self.path:
2314 2314 return b'store'
2315 2315 return None
2316 2316
2317 2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2318 2318 return self.vfs.reljoin(self.root, f, *insidef)
2319 2319
2320 2320 def setparents(self, p1, p2=None):
2321 2321 if p2 is None:
2322 2322 p2 = self.nullid
2323 2323 self[None].setparents(p1, p2)
2324 2324 self._quick_access_changeid_invalidate()
2325 2325
2326 2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2327 2327 """changeid must be a changeset revision, if specified.
2328 2328 fileid can be a file revision or node."""
2329 2329 return context.filectx(
2330 2330 self, path, changeid, fileid, changectx=changectx
2331 2331 )
2332 2332
2333 2333 def getcwd(self) -> bytes:
2334 2334 return self.dirstate.getcwd()
2335 2335
2336 2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2337 2337 return self.dirstate.pathto(f, cwd)
2338 2338
2339 2339 def _loadfilter(self, filter):
2340 2340 if filter not in self._filterpats:
2341 2341 l = []
2342 2342 for pat, cmd in self.ui.configitems(filter):
2343 2343 if cmd == b'!':
2344 2344 continue
2345 2345 mf = matchmod.match(self.root, b'', [pat])
2346 2346 fn = None
2347 2347 params = cmd
2348 2348 for name, filterfn in self._datafilters.items():
2349 2349 if cmd.startswith(name):
2350 2350 fn = filterfn
2351 2351 params = cmd[len(name) :].lstrip()
2352 2352 break
2353 2353 if not fn:
2354 2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2355 2355 fn.__name__ = 'commandfilter'
2356 2356 # Wrap old filters not supporting keyword arguments
2357 2357 if not pycompat.getargspec(fn)[2]:
2358 2358 oldfn = fn
2359 2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2360 2360 fn.__name__ = 'compat-' + oldfn.__name__
2361 2361 l.append((mf, fn, params))
2362 2362 self._filterpats[filter] = l
2363 2363 return self._filterpats[filter]
2364 2364
2365 2365 def _filter(self, filterpats, filename, data):
2366 2366 for mf, fn, cmd in filterpats:
2367 2367 if mf(filename):
2368 2368 self.ui.debug(
2369 2369 b"filtering %s through %s\n"
2370 2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2371 2371 )
2372 2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2373 2373 break
2374 2374
2375 2375 return data
2376 2376
2377 2377 @unfilteredpropertycache
2378 2378 def _encodefilterpats(self):
2379 2379 return self._loadfilter(b'encode')
2380 2380
2381 2381 @unfilteredpropertycache
2382 2382 def _decodefilterpats(self):
2383 2383 return self._loadfilter(b'decode')
2384 2384
2385 2385 def adddatafilter(self, name, filter):
2386 2386 self._datafilters[name] = filter
2387 2387
2388 2388 def wread(self, filename: bytes) -> bytes:
2389 2389 if self.wvfs.islink(filename):
2390 2390 data = self.wvfs.readlink(filename)
2391 2391 else:
2392 2392 data = self.wvfs.read(filename)
2393 2393 return self._filter(self._encodefilterpats, filename, data)
2394 2394
2395 2395 def wwrite(
2396 2396 self,
2397 2397 filename: bytes,
2398 2398 data: bytes,
2399 2399 flags: bytes,
2400 2400 backgroundclose=False,
2401 2401 **kwargs,
2402 2402 ) -> int:
2403 2403 """write ``data`` into ``filename`` in the working directory
2404 2404
2405 2405 This returns length of written (maybe decoded) data.
2406 2406 """
2407 2407 data = self._filter(self._decodefilterpats, filename, data)
2408 2408 if b'l' in flags:
2409 2409 self.wvfs.symlink(data, filename)
2410 2410 else:
2411 2411 self.wvfs.write(
2412 2412 filename, data, backgroundclose=backgroundclose, **kwargs
2413 2413 )
2414 2414 if b'x' in flags:
2415 2415 self.wvfs.setflags(filename, False, True)
2416 2416 else:
2417 2417 self.wvfs.setflags(filename, False, False)
2418 2418 return len(data)
2419 2419
2420 2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2421 2421 return self._filter(self._decodefilterpats, filename, data)
2422 2422
2423 2423 def currenttransaction(self):
2424 2424 """return the current transaction or None if non exists"""
2425 2425 if self._transref:
2426 2426 tr = self._transref()
2427 2427 else:
2428 2428 tr = None
2429 2429
2430 2430 if tr and tr.running():
2431 2431 return tr
2432 2432 return None
2433 2433
2434 2434 def transaction(self, desc, report=None):
2435 2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2436 2436 b'devel', b'check-locks'
2437 2437 ):
2438 2438 if self._currentlock(self._lockref) is None:
2439 2439 raise error.ProgrammingError(b'transaction requires locking')
2440 2440 tr = self.currenttransaction()
2441 2441 if tr is not None:
2442 2442 return tr.nest(name=desc)
2443 2443
2444 2444 # abort here if the journal already exists
2445 2445 if self.svfs.exists(b"journal"):
2446 2446 raise error.RepoError(
2447 2447 _(b"abandoned transaction found"),
2448 2448 hint=_(b"run 'hg recover' to clean up transaction"),
2449 2449 )
2450 2450
2451 2451 # At that point your dirstate should be clean:
2452 2452 #
2453 2453 # - If you don't have the wlock, why would you still have a dirty
2454 2454 # dirstate ?
2455 2455 #
2456 2456 # - If you hold the wlock, you should not be opening a transaction in
2457 2457 # the middle of a `distate.changing_*` block. The transaction needs to
2458 2458 # be open before that and wrap the change-context.
2459 2459 #
2460 2460 # - If you are not within a `dirstate.changing_*` context, why is our
2461 2461 # dirstate dirty?
2462 2462 if self.dirstate._dirty:
2463 2463 m = "cannot open a transaction with a dirty dirstate"
2464 2464 raise error.ProgrammingError(m)
2465 2465
2466 2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2467 2467 ha = hex(hashutil.sha1(idbase).digest())
2468 2468 txnid = b'TXN:' + ha
2469 2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2470 2470
2471 2471 self._writejournal(desc)
2472 2472 if report:
2473 2473 rp = report
2474 2474 else:
2475 2475 rp = self.ui.warn
2476 2476 vfsmap = self.vfs_map
2477 2477 # we must avoid cyclic reference between repo and transaction.
2478 2478 reporef = weakref.ref(self)
2479 2479 # Code to track tag movement
2480 2480 #
2481 2481 # Since tags are all handled as file content, it is actually quite hard
2482 2482 # to track these movement from a code perspective. So we fallback to a
2483 2483 # tracking at the repository level. One could envision to track changes
2484 2484 # to the '.hgtags' file through changegroup apply but that fails to
2485 2485 # cope with case where transaction expose new heads without changegroup
2486 2486 # being involved (eg: phase movement).
2487 2487 #
2488 2488 # For now, We gate the feature behind a flag since this likely comes
2489 2489 # with performance impacts. The current code run more often than needed
2490 2490 # and do not use caches as much as it could. The current focus is on
2491 2491 # the behavior of the feature so we disable it by default. The flag
2492 2492 # will be removed when we are happy with the performance impact.
2493 2493 #
2494 2494 # Once this feature is no longer experimental move the following
2495 2495 # documentation to the appropriate help section:
2496 2496 #
2497 2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2498 2498 # tags (new or changed or deleted tags). In addition the details of
2499 2499 # these changes are made available in a file at:
2500 2500 # ``REPOROOT/.hg/changes/tags.changes``.
2501 2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2502 2502 # might exist from a previous transaction even if no tag were touched
2503 2503 # in this one. Changes are recorded in a line base format::
2504 2504 #
2505 2505 # <action> <hex-node> <tag-name>\n
2506 2506 #
2507 2507 # Actions are defined as follow:
2508 2508 # "-R": tag is removed,
2509 2509 # "+A": tag is added,
2510 2510 # "-M": tag is moved (old value),
2511 2511 # "+M": tag is moved (new value),
2512 2512 tracktags = lambda x: None
2513 2513 # experimental config: experimental.hook-track-tags
2514 2514 shouldtracktags = self.ui.configbool(
2515 2515 b'experimental', b'hook-track-tags'
2516 2516 )
2517 2517 if desc != b'strip' and shouldtracktags:
2518 2518 oldheads = self.changelog.headrevs()
2519 2519
2520 2520 def tracktags(tr2):
2521 2521 repo = reporef()
2522 2522 assert repo is not None # help pytype
2523 2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2524 2524 newheads = repo.changelog.headrevs()
2525 2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2526 2526 # notes: we compare lists here.
2527 2527 # As we do it only once buiding set would not be cheaper
2528 2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2529 2529 if changes:
2530 2530 tr2.hookargs[b'tag_moved'] = b'1'
2531 2531 with repo.vfs(
2532 2532 b'changes/tags.changes', b'w', atomictemp=True
2533 2533 ) as changesfile:
2534 2534 # note: we do not register the file to the transaction
2535 2535 # because we needs it to still exist on the transaction
2536 2536 # is close (for txnclose hooks)
2537 2537 tagsmod.writediff(changesfile, changes)
2538 2538
2539 2539 def validate(tr2):
2540 2540 """will run pre-closing hooks"""
2541 2541 # XXX the transaction API is a bit lacking here so we take a hacky
2542 2542 # path for now
2543 2543 #
2544 2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2545 2545 # dict is copied before these run. In addition we needs the data
2546 2546 # available to in memory hooks too.
2547 2547 #
2548 2548 # Moreover, we also need to make sure this runs before txnclose
2549 2549 # hooks and there is no "pending" mechanism that would execute
2550 2550 # logic only if hooks are about to run.
2551 2551 #
2552 2552 # Fixing this limitation of the transaction is also needed to track
2553 2553 # other families of changes (bookmarks, phases, obsolescence).
2554 2554 #
2555 2555 # This will have to be fixed before we remove the experimental
2556 2556 # gating.
2557 2557 tracktags(tr2)
2558 2558 repo = reporef()
2559 2559 assert repo is not None # help pytype
2560 2560
2561 2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2562 2562 singlehead = repo.ui.configbool(*singleheadopt)
2563 2563 if singlehead:
2564 2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2565 2565 accountclosed = singleheadsub.get(
2566 2566 b"account-closed-heads", False
2567 2567 )
2568 2568 if singleheadsub.get(b"public-changes-only", False):
2569 2569 filtername = b"immutable"
2570 2570 else:
2571 2571 filtername = b"visible"
2572 2572 scmutil.enforcesinglehead(
2573 2573 repo, tr2, desc, accountclosed, filtername
2574 2574 )
2575 2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2576 2576 for name, (old, new) in sorted(
2577 2577 tr.changes[b'bookmarks'].items()
2578 2578 ):
2579 2579 args = tr.hookargs.copy()
2580 2580 args.update(bookmarks.preparehookargs(name, old, new))
2581 2581 repo.hook(
2582 2582 b'pretxnclose-bookmark',
2583 2583 throw=True,
2584 2584 **pycompat.strkwargs(args),
2585 2585 )
2586 2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2587 2587 cl = repo.unfiltered().changelog
2588 2588 for revs, (old, new) in tr.changes[b'phases']:
2589 2589 for rev in revs:
2590 2590 args = tr.hookargs.copy()
2591 2591 node = hex(cl.node(rev))
2592 2592 args.update(phases.preparehookargs(node, old, new))
2593 2593 repo.hook(
2594 2594 b'pretxnclose-phase',
2595 2595 throw=True,
2596 2596 **pycompat.strkwargs(args),
2597 2597 )
2598 2598
2599 2599 repo.hook(
2600 2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2601 2601 )
2602 2602
2603 2603 def releasefn(tr, success):
2604 2604 repo = reporef()
2605 2605 if repo is None:
2606 2606 # If the repo has been GC'd (and this release function is being
2607 2607 # called from transaction.__del__), there's not much we can do,
2608 2608 # so just leave the unfinished transaction there and let the
2609 2609 # user run `hg recover`.
2610 2610 return
2611 2611 if success:
2612 2612 # this should be explicitly invoked here, because
2613 2613 # in-memory changes aren't written out at closing
2614 2614 # transaction, if tr.addfilegenerator (via
2615 2615 # dirstate.write or so) isn't invoked while
2616 2616 # transaction running
2617 2617 repo.dirstate.write(None)
2618 2618 else:
2619 2619 # discard all changes (including ones already written
2620 2620 # out) in this transaction
2621 2621 repo.invalidate(clearfilecache=True)
2622 2622
2623 2623 tr = transaction.transaction(
2624 2624 rp,
2625 2625 self.svfs,
2626 2626 vfsmap,
2627 2627 b"journal",
2628 2628 b"undo",
2629 2629 lambda: None,
2630 2630 self.store.createmode,
2631 2631 validator=validate,
2632 2632 releasefn=releasefn,
2633 2633 checkambigfiles=_cachedfiles,
2634 2634 name=desc,
2635 2635 )
2636 2636 for vfs_id, path in self._journalfiles():
2637 2637 tr.add_journal(vfs_id, path)
2638 2638 tr.changes[b'origrepolen'] = len(self)
2639 2639 tr.changes[b'obsmarkers'] = set()
2640 2640 tr.changes[b'phases'] = []
2641 2641 tr.changes[b'bookmarks'] = {}
2642 2642
2643 2643 tr.hookargs[b'txnid'] = txnid
2644 2644 tr.hookargs[b'txnname'] = desc
2645 2645 tr.hookargs[b'changes'] = tr.changes
2646 2646 # note: writing the fncache only during finalize mean that the file is
2647 2647 # outdated when running hooks. As fncache is used for streaming clone,
2648 2648 # this is not expected to break anything that happen during the hooks.
2649 2649 tr.addfinalize(b'flush-fncache', self.store.write)
2650 2650
2651 2651 def txnclosehook(tr2):
2652 2652 """To be run if transaction is successful, will schedule a hook run"""
2653 2653 # Don't reference tr2 in hook() so we don't hold a reference.
2654 2654 # This reduces memory consumption when there are multiple
2655 2655 # transactions per lock. This can likely go away if issue5045
2656 2656 # fixes the function accumulation.
2657 2657 hookargs = tr2.hookargs
2658 2658
2659 2659 def hookfunc(unused_success):
2660 2660 repo = reporef()
2661 2661 assert repo is not None # help pytype
2662 2662
2663 2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2664 2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2665 2665 for name, (old, new) in bmchanges:
2666 2666 args = tr.hookargs.copy()
2667 2667 args.update(bookmarks.preparehookargs(name, old, new))
2668 2668 repo.hook(
2669 2669 b'txnclose-bookmark',
2670 2670 throw=False,
2671 2671 **pycompat.strkwargs(args),
2672 2672 )
2673 2673
2674 2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2675 2675 cl = repo.unfiltered().changelog
2676 2676 phasemv = sorted(
2677 2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2678 2678 )
2679 2679 for revs, (old, new) in phasemv:
2680 2680 for rev in revs:
2681 2681 args = tr.hookargs.copy()
2682 2682 node = hex(cl.node(rev))
2683 2683 args.update(phases.preparehookargs(node, old, new))
2684 2684 repo.hook(
2685 2685 b'txnclose-phase',
2686 2686 throw=False,
2687 2687 **pycompat.strkwargs(args),
2688 2688 )
2689 2689
2690 2690 repo.hook(
2691 2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2692 2692 )
2693 2693
2694 2694 repo = reporef()
2695 2695 assert repo is not None # help pytype
2696 2696 repo._afterlock(hookfunc)
2697 2697
2698 2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2699 2699 # Include a leading "-" to make it happen before the transaction summary
2700 2700 # reports registered via scmutil.registersummarycallback() whose names
2701 2701 # are 00-txnreport etc. That way, the caches will be warm when the
2702 2702 # callbacks run.
2703 2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2704 2704
2705 2705 def txnaborthook(tr2):
2706 2706 """To be run if transaction is aborted"""
2707 2707 repo = reporef()
2708 2708 assert repo is not None # help pytype
2709 2709 repo.hook(
2710 2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2711 2711 )
2712 2712
2713 2713 tr.addabort(b'txnabort-hook', txnaborthook)
2714 2714 # avoid eager cache invalidation. in-memory data should be identical
2715 2715 # to stored data if transaction has no error.
2716 2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2717 2717 self._transref = weakref.ref(tr)
2718 2718 scmutil.registersummarycallback(self, tr, desc)
2719 2719 # This only exist to deal with the need of rollback to have viable
2720 2720 # parents at the end of the operation. So backup viable parents at the
2721 2721 # time of this operation.
2722 2722 #
2723 2723 # We only do it when the `wlock` is taken, otherwise other might be
2724 2724 # altering the dirstate under us.
2725 2725 #
2726 2726 # This is really not a great way to do this (first, because we cannot
2727 2727 # always do it). There are more viable alternative that exists
2728 2728 #
2729 2729 # - backing only the working copy parent in a dedicated files and doing
2730 2730 # a clean "keep-update" to them on `hg rollback`.
2731 2731 #
2732 2732 # - slightly changing the behavior an applying a logic similar to "hg
2733 2733 # strip" to pick a working copy destination on `hg rollback`
2734 2734 if self.currentwlock() is not None:
2735 2735 ds = self.dirstate
2736 2736 if not self.vfs.exists(b'branch'):
2737 2737 # force a file to be written if None exist
2738 2738 ds.setbranch(b'default', None)
2739 2739
2740 2740 def backup_dirstate(tr):
2741 2741 for f in ds.all_file_names():
2742 2742 # hardlink backup is okay because `dirstate` is always
2743 2743 # atomically written and possible data file are append only
2744 2744 # and resistant to trailing data.
2745 2745 tr.addbackup(f, hardlink=True, location=b'plain')
2746 2746
2747 2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2748 2748 return tr
2749 2749
2750 2750 def _journalfiles(self):
2751 2751 return (
2752 2752 (self.svfs, b'journal'),
2753 2753 (self.vfs, b'journal.desc'),
2754 2754 )
2755 2755
2756 2756 def undofiles(self):
2757 2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2758 2758
2759 2759 @unfilteredmethod
2760 2760 def _writejournal(self, desc):
2761 2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2762 2762
2763 2763 def recover(self):
2764 2764 with self.lock():
2765 2765 if self.svfs.exists(b"journal"):
2766 2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2767 2767 vfsmap = self.vfs_map
2768 2768 transaction.rollback(
2769 2769 self.svfs,
2770 2770 vfsmap,
2771 2771 b"journal",
2772 2772 self.ui.warn,
2773 2773 checkambigfiles=_cachedfiles,
2774 2774 )
2775 2775 self.invalidate()
2776 2776 return True
2777 2777 else:
2778 2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2779 2779 return False
2780 2780
2781 2781 def rollback(self, dryrun=False, force=False):
2782 2782 wlock = lock = None
2783 2783 try:
2784 2784 wlock = self.wlock()
2785 2785 lock = self.lock()
2786 2786 if self.svfs.exists(b"undo"):
2787 2787 return self._rollback(dryrun, force)
2788 2788 else:
2789 2789 self.ui.warn(_(b"no rollback information available\n"))
2790 2790 return 1
2791 2791 finally:
2792 2792 release(lock, wlock)
2793 2793
2794 2794 @unfilteredmethod # Until we get smarter cache management
2795 2795 def _rollback(self, dryrun, force):
2796 2796 ui = self.ui
2797 2797
2798 2798 parents = self.dirstate.parents()
2799 2799 try:
2800 2800 args = self.vfs.read(b'undo.desc').splitlines()
2801 2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2802 2802 if len(args) >= 3:
2803 2803 detail = args[2]
2804 2804 oldtip = oldlen - 1
2805 2805
2806 2806 if detail and ui.verbose:
2807 2807 msg = _(
2808 2808 b'repository tip rolled back to revision %d'
2809 2809 b' (undo %s: %s)\n'
2810 2810 ) % (oldtip, desc, detail)
2811 2811 else:
2812 2812 msg = _(
2813 2813 b'repository tip rolled back to revision %d (undo %s)\n'
2814 2814 ) % (oldtip, desc)
2815 2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2816 2816 except IOError:
2817 2817 msg = _(b'rolling back unknown transaction\n')
2818 2818 desc = None
2819 2819 parentgone = True
2820 2820
2821 2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2822 2822 raise error.Abort(
2823 2823 _(
2824 2824 b'rollback of last commit while not checked out '
2825 2825 b'may lose data'
2826 2826 ),
2827 2827 hint=_(b'use -f to force'),
2828 2828 )
2829 2829
2830 2830 ui.status(msg)
2831 2831 if dryrun:
2832 2832 return 0
2833 2833
2834 2834 self.destroying()
2835 2835 vfsmap = self.vfs_map
2836 2836 skip_journal_pattern = None
2837 2837 if not parentgone:
2838 2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2839 2839 transaction.rollback(
2840 2840 self.svfs,
2841 2841 vfsmap,
2842 2842 b'undo',
2843 2843 ui.warn,
2844 2844 checkambigfiles=_cachedfiles,
2845 2845 skip_journal_pattern=skip_journal_pattern,
2846 2846 )
2847 2847 self.invalidate()
2848 2848 self.dirstate.invalidate()
2849 2849
2850 2850 if parentgone:
2851 2851 # replace this with some explicit parent update in the future.
2852 2852 has_node = self.changelog.index.has_node
2853 2853 if not all(has_node(p) for p in self.dirstate._pl):
2854 2854 # There was no dirstate to backup initially, we need to drop
2855 2855 # the existing one.
2856 2856 with self.dirstate.changing_parents(self):
2857 2857 self.dirstate.setparents(self.nullid)
2858 2858 self.dirstate.clear()
2859 2859
2860 2860 parents = tuple([p.rev() for p in self[None].parents()])
2861 2861 if len(parents) > 1:
2862 2862 ui.status(
2863 2863 _(
2864 2864 b'working directory now based on '
2865 2865 b'revisions %d and %d\n'
2866 2866 )
2867 2867 % parents
2868 2868 )
2869 2869 else:
2870 2870 ui.status(
2871 2871 _(b'working directory now based on revision %d\n') % parents
2872 2872 )
2873 2873 mergestatemod.mergestate.clean(self)
2874 2874
2875 2875 # TODO: if we know which new heads may result from this rollback, pass
2876 2876 # them to destroy(), which will prevent the branchhead cache from being
2877 2877 # invalidated.
2878 2878 self.destroyed()
2879 2879 return 0
2880 2880
2881 2881 def _buildcacheupdater(self, newtransaction):
2882 2882 """called during transaction to build the callback updating cache
2883 2883
2884 2884 Lives on the repository to help extension who might want to augment
2885 2885 this logic. For this purpose, the created transaction is passed to the
2886 2886 method.
2887 2887 """
2888 2888 # we must avoid cyclic reference between repo and transaction.
2889 2889 reporef = weakref.ref(self)
2890 2890
2891 2891 def updater(tr):
2892 2892 repo = reporef()
2893 2893 assert repo is not None # help pytype
2894 2894 repo.updatecaches(tr)
2895 2895
2896 2896 return updater
2897 2897
2898 2898 @unfilteredmethod
2899 2899 def updatecaches(self, tr=None, full=False, caches=None):
2900 2900 """warm appropriate caches
2901 2901
2902 2902 If this function is called after a transaction closed. The transaction
2903 2903 will be available in the 'tr' argument. This can be used to selectively
2904 2904 update caches relevant to the changes in that transaction.
2905 2905
2906 2906 If 'full' is set, make sure all caches the function knows about have
2907 2907 up-to-date data. Even the ones usually loaded more lazily.
2908 2908
2909 2909 The `full` argument can take a special "post-clone" value. In this case
2910 2910 the cache warming is made after a clone and of the slower cache might
2911 2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2912 2912 as we plan for a cleaner way to deal with this for 5.9.
2913 2913 """
2914 2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2915 2915 # During strip, many caches are invalid but
2916 2916 # later call to `destroyed` will refresh them.
2917 2917 return
2918 2918
2919 2919 unfi = self.unfiltered()
2920 2920
2921 2921 if caches is None:
2922 2922 caches = repository.CACHES_DEFAULT
2923 2923
2924 2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2925 2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2926 2926 # accessing the 'served' branchmap should refresh all the others,
2927 2927 self.ui.debug(b'updating the branch cache\n')
2928 2928 self.filtered(b'served').branchmap()
2929 2929 self.filtered(b'served.hidden').branchmap()
2930 2930 # flush all possibly delayed write.
2931 2931 self._branchcaches.write_delayed(self)
2932 2932
2933 2933 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 2934 self.changelog.update_caches(transaction=tr)
2935 2935
2936 2936 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 2937 self.manifestlog.update_caches(transaction=tr)
2938 2938 for entry in self.store.walk():
2939 2939 if not entry.is_revlog:
2940 2940 continue
2941 2941 if not entry.is_manifestlog:
2942 2942 continue
2943 2943 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 2944 if manifestrevlog is not None:
2945 2945 manifestrevlog.update_caches(transaction=tr)
2946 2946
2947 2947 if repository.CACHE_REV_BRANCH in caches:
2948 2948 rbc = unfi.revbranchcache()
2949 2949 for r in unfi.changelog:
2950 2950 rbc.branchinfo(r)
2951 2951 rbc.write()
2952 2952
2953 2953 if repository.CACHE_FULL_MANIFEST in caches:
2954 2954 # ensure the working copy parents are in the manifestfulltextcache
2955 2955 for ctx in self[b'.'].parents():
2956 2956 ctx.manifest() # accessing the manifest is enough
2957 2957
2958 2958 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 2959 # accessing fnode cache warms the cache
2960 2960 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2961 2961
2962 2962 if repository.CACHE_TAGS_DEFAULT in caches:
2963 2963 # accessing tags warm the cache
2964 2964 self.tags()
2965 2965 if repository.CACHE_TAGS_SERVED in caches:
2966 2966 self.filtered(b'served').tags()
2967 2967
2968 2968 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 2969 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 2970 # so we're forcing a write to cause these caches to be warmed up
2971 2971 # even if they haven't explicitly been requested yet (if they've
2972 2972 # never been used by hg, they won't ever have been written, even if
2973 2973 # they're a subset of another kind of cache that *has* been used).
2974 2974 for filt in repoview.filtertable.keys():
2975 2975 filtered = self.filtered(filt)
2976 2976 filtered.branchmap().write(filtered)
2977 2977
2978 2978 def invalidatecaches(self):
2979 2979 if '_tagscache' in vars(self):
2980 2980 # can't use delattr on proxy
2981 2981 del self.__dict__['_tagscache']
2982 2982
2983 2983 self._branchcaches.clear()
2984 2984 self.invalidatevolatilesets()
2985 2985 self._sparsesignaturecache.clear()
2986 2986
2987 2987 def invalidatevolatilesets(self):
2988 2988 self.filteredrevcache.clear()
2989 2989 obsolete.clearobscaches(self)
2990 2990 self._quick_access_changeid_invalidate()
2991 2991
2992 2992 def invalidatedirstate(self):
2993 2993 """Invalidates the dirstate, causing the next call to dirstate
2994 2994 to check if it was modified since the last time it was read,
2995 2995 rereading it if it has.
2996 2996
2997 2997 This is different to dirstate.invalidate() that it doesn't always
2998 2998 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 2999 explicitly read the dirstate again (i.e. restoring it to a previous
3000 3000 known good state)."""
3001 3001 unfi = self.unfiltered()
3002 3002 if 'dirstate' in unfi.__dict__:
3003 3003 assert not self.dirstate.is_changing_any
3004 3004 del unfi.__dict__['dirstate']
3005 3005
3006 3006 def invalidate(self, clearfilecache=False):
3007 3007 """Invalidates both store and non-store parts other than dirstate
3008 3008
3009 3009 If a transaction is running, invalidation of store is omitted,
3010 3010 because discarding in-memory changes might cause inconsistency
3011 3011 (e.g. incomplete fncache causes unintentional failure, but
3012 3012 redundant one doesn't).
3013 3013 """
3014 3014 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 3015 for k in list(self._filecache.keys()):
3016 3016 if (
3017 3017 k == b'changelog'
3018 3018 and self.currenttransaction()
3019 3019 and self.changelog.is_delaying
3020 3020 ):
3021 3021 # The changelog object may store unwritten revisions. We don't
3022 3022 # want to lose them.
3023 3023 # TODO: Solve the problem instead of working around it.
3024 3024 continue
3025 3025
3026 3026 if clearfilecache:
3027 3027 del self._filecache[k]
3028 3028 try:
3029 3029 # XXX ideally, the key would be a unicode string to match the
3030 3030 # fact it refers to an attribut name. However changing this was
3031 3031 # a bit a scope creep compared to the series cleaning up
3032 3032 # del/set/getattr so we kept thing simple here.
3033 3033 delattr(unfiltered, pycompat.sysstr(k))
3034 3034 except AttributeError:
3035 3035 pass
3036 3036 self.invalidatecaches()
3037 3037 if not self.currenttransaction():
3038 3038 # TODO: Changing contents of store outside transaction
3039 3039 # causes inconsistency. We should make in-memory store
3040 3040 # changes detectable, and abort if changed.
3041 3041 self.store.invalidatecaches()
3042 3042
3043 3043 def invalidateall(self):
3044 3044 """Fully invalidates both store and non-store parts, causing the
3045 3045 subsequent operation to reread any outside changes."""
3046 3046 # extension should hook this to invalidate its caches
3047 3047 self.invalidate()
3048 3048 self.invalidatedirstate()
3049 3049
3050 3050 @unfilteredmethod
3051 3051 def _refreshfilecachestats(self, tr):
3052 3052 """Reload stats of cached files so that they are flagged as valid"""
3053 3053 for k, ce in self._filecache.items():
3054 3054 k = pycompat.sysstr(k)
3055 3055 if k == 'dirstate' or k not in self.__dict__:
3056 3056 continue
3057 3057 ce.refresh()
3058 3058
3059 3059 def _lock(
3060 3060 self,
3061 3061 vfs,
3062 3062 lockname,
3063 3063 wait,
3064 3064 releasefn,
3065 3065 acquirefn,
3066 3066 desc,
3067 3067 ):
3068 3068 timeout = 0
3069 3069 warntimeout = 0
3070 3070 if wait:
3071 3071 timeout = self.ui.configint(b"ui", b"timeout")
3072 3072 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 3073 # internal config: ui.signal-safe-lock
3074 3074 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3076 if not sync_file:
3077 sync_file = None
3075 3078
3076 3079 l = lockmod.trylock(
3077 3080 self.ui,
3078 3081 vfs,
3079 3082 lockname,
3080 3083 timeout,
3081 3084 warntimeout,
3082 3085 releasefn=releasefn,
3083 3086 acquirefn=acquirefn,
3084 3087 desc=desc,
3085 3088 signalsafe=signalsafe,
3089 devel_wait_sync_file=sync_file,
3086 3090 )
3087 3091 return l
3088 3092
3089 3093 def _afterlock(self, callback):
3090 3094 """add a callback to be run when the repository is fully unlocked
3091 3095
3092 3096 The callback will be executed when the outermost lock is released
3093 3097 (with wlock being higher level than 'lock')."""
3094 3098 for ref in (self._wlockref, self._lockref):
3095 3099 l = ref and ref()
3096 3100 if l and l.held:
3097 3101 l.postrelease.append(callback)
3098 3102 break
3099 3103 else: # no lock have been found.
3100 3104 callback(True)
3101 3105
3102 3106 def lock(self, wait=True):
3103 3107 """Lock the repository store (.hg/store) and return a weak reference
3104 3108 to the lock. Use this before modifying the store (e.g. committing or
3105 3109 stripping). If you are opening a transaction, get a lock as well.)
3106 3110
3107 3111 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 3112 'wlock' first to avoid a dead-lock hazard."""
3109 3113 l = self._currentlock(self._lockref)
3110 3114 if l is not None:
3111 3115 l.lock()
3112 3116 return l
3113 3117
3114 3118 l = self._lock(
3115 3119 vfs=self.svfs,
3116 3120 lockname=b"lock",
3117 3121 wait=wait,
3118 3122 releasefn=None,
3119 3123 acquirefn=self.invalidate,
3120 3124 desc=_(b'repository %s') % self.origroot,
3121 3125 )
3122 3126 self._lockref = weakref.ref(l)
3123 3127 return l
3124 3128
3125 3129 def wlock(self, wait=True):
3126 3130 """Lock the non-store parts of the repository (everything under
3127 3131 .hg except .hg/store) and return a weak reference to the lock.
3128 3132
3129 3133 Use this before modifying files in .hg.
3130 3134
3131 3135 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 3136 'wlock' first to avoid a dead-lock hazard."""
3133 3137 l = self._wlockref() if self._wlockref else None
3134 3138 if l is not None and l.held:
3135 3139 l.lock()
3136 3140 return l
3137 3141
3138 3142 # We do not need to check for non-waiting lock acquisition. Such
3139 3143 # acquisition would not cause dead-lock as they would just fail.
3140 3144 if wait and (
3141 3145 self.ui.configbool(b'devel', b'all-warnings')
3142 3146 or self.ui.configbool(b'devel', b'check-locks')
3143 3147 ):
3144 3148 if self._currentlock(self._lockref) is not None:
3145 3149 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146 3150
3147 3151 def unlock():
3148 3152 if self.dirstate.is_changing_any:
3149 3153 msg = b"wlock release in the middle of a changing parents"
3150 3154 self.ui.develwarn(msg)
3151 3155 self.dirstate.invalidate()
3152 3156 else:
3153 3157 if self.dirstate._dirty:
3154 3158 msg = b"dirty dirstate on wlock release"
3155 3159 self.ui.develwarn(msg)
3156 3160 self.dirstate.write(None)
3157 3161
3158 3162 unfi = self.unfiltered()
3159 3163 if 'dirstate' in unfi.__dict__:
3160 3164 del unfi.__dict__['dirstate']
3161 3165
3162 3166 l = self._lock(
3163 3167 self.vfs,
3164 3168 b"wlock",
3165 3169 wait,
3166 3170 unlock,
3167 3171 self.invalidatedirstate,
3168 3172 _(b'working directory of %s') % self.origroot,
3169 3173 )
3170 3174 self._wlockref = weakref.ref(l)
3171 3175 return l
3172 3176
3173 3177 def _currentlock(self, lockref):
3174 3178 """Returns the lock if it's held, or None if it's not."""
3175 3179 if lockref is None:
3176 3180 return None
3177 3181 l = lockref()
3178 3182 if l is None or not l.held:
3179 3183 return None
3180 3184 return l
3181 3185
3182 3186 def currentwlock(self):
3183 3187 """Returns the wlock if it's held, or None if it's not."""
3184 3188 return self._currentlock(self._wlockref)
3185 3189
3186 3190 def currentlock(self):
3187 3191 """Returns the lock if it's held, or None if it's not."""
3188 3192 return self._currentlock(self._lockref)
3189 3193
3190 3194 def checkcommitpatterns(self, wctx, match, status, fail):
3191 3195 """check for commit arguments that aren't committable"""
3192 3196 if match.isexact() or match.prefix():
3193 3197 matched = set(status.modified + status.added + status.removed)
3194 3198
3195 3199 for f in match.files():
3196 3200 f = self.dirstate.normalize(f)
3197 3201 if f == b'.' or f in matched or f in wctx.substate:
3198 3202 continue
3199 3203 if f in status.deleted:
3200 3204 fail(f, _(b'file not found!'))
3201 3205 # Is it a directory that exists or used to exist?
3202 3206 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 3207 d = f + b'/'
3204 3208 for mf in matched:
3205 3209 if mf.startswith(d):
3206 3210 break
3207 3211 else:
3208 3212 fail(f, _(b"no match under directory!"))
3209 3213 elif f not in self.dirstate:
3210 3214 fail(f, _(b"file not tracked!"))
3211 3215
3212 3216 @unfilteredmethod
3213 3217 def commit(
3214 3218 self,
3215 3219 text=b"",
3216 3220 user=None,
3217 3221 date=None,
3218 3222 match=None,
3219 3223 force=False,
3220 3224 editor=None,
3221 3225 extra=None,
3222 3226 ):
3223 3227 """Add a new revision to current repository.
3224 3228
3225 3229 Revision information is gathered from the working directory,
3226 3230 match can be used to filter the committed files. If editor is
3227 3231 supplied, it is called to get a commit message.
3228 3232 """
3229 3233 if extra is None:
3230 3234 extra = {}
3231 3235
3232 3236 def fail(f, msg):
3233 3237 raise error.InputError(b'%s: %s' % (f, msg))
3234 3238
3235 3239 if not match:
3236 3240 match = matchmod.always()
3237 3241
3238 3242 if not force:
3239 3243 match.bad = fail
3240 3244
3241 3245 # lock() for recent changelog (see issue4368)
3242 3246 with self.wlock(), self.lock():
3243 3247 wctx = self[None]
3244 3248 merge = len(wctx.parents()) > 1
3245 3249
3246 3250 if not force and merge and not match.always():
3247 3251 raise error.Abort(
3248 3252 _(
3249 3253 b'cannot partially commit a merge '
3250 3254 b'(do not specify files or patterns)'
3251 3255 )
3252 3256 )
3253 3257
3254 3258 status = self.status(match=match, clean=force)
3255 3259 if force:
3256 3260 status.modified.extend(
3257 3261 status.clean
3258 3262 ) # mq may commit clean files
3259 3263
3260 3264 # check subrepos
3261 3265 subs, commitsubs, newstate = subrepoutil.precommit(
3262 3266 self.ui, wctx, status, match, force=force
3263 3267 )
3264 3268
3265 3269 # make sure all explicit patterns are matched
3266 3270 if not force:
3267 3271 self.checkcommitpatterns(wctx, match, status, fail)
3268 3272
3269 3273 cctx = context.workingcommitctx(
3270 3274 self, status, text, user, date, extra
3271 3275 )
3272 3276
3273 3277 ms = mergestatemod.mergestate.read(self)
3274 3278 mergeutil.checkunresolved(ms)
3275 3279
3276 3280 # internal config: ui.allowemptycommit
3277 3281 if cctx.isempty() and not self.ui.configbool(
3278 3282 b'ui', b'allowemptycommit'
3279 3283 ):
3280 3284 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 3285 ms.reset()
3282 3286 return None
3283 3287
3284 3288 if merge and cctx.deleted():
3285 3289 raise error.Abort(_(b"cannot commit merge with missing files"))
3286 3290
3287 3291 if editor:
3288 3292 cctx._text = editor(self, cctx, subs)
3289 3293 edited = text != cctx._text
3290 3294
3291 3295 # Save commit message in case this transaction gets rolled back
3292 3296 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 3297 # the assumption that the user will use the same editor again.
3294 3298 msg_path = self.savecommitmessage(cctx._text)
3295 3299
3296 3300 # commit subs and write new state
3297 3301 if subs:
3298 3302 uipathfn = scmutil.getuipathfn(self)
3299 3303 for s in sorted(commitsubs):
3300 3304 sub = wctx.sub(s)
3301 3305 self.ui.status(
3302 3306 _(b'committing subrepository %s\n')
3303 3307 % uipathfn(subrepoutil.subrelpath(sub))
3304 3308 )
3305 3309 sr = sub.commit(cctx._text, user, date)
3306 3310 newstate[s] = (newstate[s][0], sr)
3307 3311 subrepoutil.writestate(self, newstate)
3308 3312
3309 3313 p1, p2 = self.dirstate.parents()
3310 3314 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 3315 try:
3312 3316 self.hook(
3313 3317 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 3318 )
3315 3319 with self.transaction(b'commit'):
3316 3320 ret = self.commitctx(cctx, True)
3317 3321 # update bookmarks, dirstate and mergestate
3318 3322 bookmarks.update(self, [p1, p2], ret)
3319 3323 cctx.markcommitted(ret)
3320 3324 ms.reset()
3321 3325 except: # re-raises
3322 3326 if edited:
3323 3327 self.ui.write(
3324 3328 _(b'note: commit message saved in %s\n') % msg_path
3325 3329 )
3326 3330 self.ui.write(
3327 3331 _(
3328 3332 b"note: use 'hg commit --logfile "
3329 3333 b"%s --edit' to reuse it\n"
3330 3334 )
3331 3335 % msg_path
3332 3336 )
3333 3337 raise
3334 3338
3335 3339 def commithook(unused_success):
3336 3340 # hack for command that use a temporary commit (eg: histedit)
3337 3341 # temporary commit got stripped before hook release
3338 3342 if self.changelog.hasnode(ret):
3339 3343 self.hook(
3340 3344 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 3345 )
3342 3346
3343 3347 self._afterlock(commithook)
3344 3348 return ret
3345 3349
3346 3350 @unfilteredmethod
3347 3351 def commitctx(self, ctx, error=False, origctx=None):
3348 3352 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349 3353
3350 3354 @unfilteredmethod
3351 3355 def destroying(self):
3352 3356 """Inform the repository that nodes are about to be destroyed.
3353 3357 Intended for use by strip and rollback, so there's a common
3354 3358 place for anything that has to be done before destroying history.
3355 3359
3356 3360 This is mostly useful for saving state that is in memory and waiting
3357 3361 to be flushed when the current lock is released. Because a call to
3358 3362 destroyed is imminent, the repo will be invalidated causing those
3359 3363 changes to stay in memory (waiting for the next unlock), or vanish
3360 3364 completely.
3361 3365 """
3362 3366 # When using the same lock to commit and strip, the phasecache is left
3363 3367 # dirty after committing. Then when we strip, the repo is invalidated,
3364 3368 # causing those changes to disappear.
3365 3369 if '_phasecache' in vars(self):
3366 3370 self._phasecache.write(self)
3367 3371
3368 3372 @unfilteredmethod
3369 3373 def destroyed(self):
3370 3374 """Inform the repository that nodes have been destroyed.
3371 3375 Intended for use by strip and rollback, so there's a common
3372 3376 place for anything that has to be done after destroying history.
3373 3377 """
3374 3378 # refresh all repository caches
3375 3379 self.updatecaches()
3376 3380
3377 3381 # Ensure the persistent tag cache is updated. Doing it now
3378 3382 # means that the tag cache only has to worry about destroyed
3379 3383 # heads immediately after a strip/rollback. That in turn
3380 3384 # guarantees that "cachetip == currenttip" (comparing both rev
3381 3385 # and node) always means no nodes have been added or destroyed.
3382 3386
3383 3387 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 3388 # head, refresh the tag cache, then immediately add a new head.
3385 3389 # But I think doing it this way is necessary for the "instant
3386 3390 # tag cache retrieval" case to work.
3387 3391 self.invalidate()
3388 3392
3389 3393 def status(
3390 3394 self,
3391 3395 node1=b'.',
3392 3396 node2=None,
3393 3397 match=None,
3394 3398 ignored=False,
3395 3399 clean=False,
3396 3400 unknown=False,
3397 3401 listsubrepos=False,
3398 3402 ):
3399 3403 '''a convenience method that calls node1.status(node2)'''
3400 3404 return self[node1].status(
3401 3405 node2, match, ignored, clean, unknown, listsubrepos
3402 3406 )
3403 3407
3404 3408 def addpostdsstatus(self, ps):
3405 3409 """Add a callback to run within the wlock, at the point at which status
3406 3410 fixups happen.
3407 3411
3408 3412 On status completion, callback(wctx, status) will be called with the
3409 3413 wlock held, unless the dirstate has changed from underneath or the wlock
3410 3414 couldn't be grabbed.
3411 3415
3412 3416 Callbacks should not capture and use a cached copy of the dirstate --
3413 3417 it might change in the meanwhile. Instead, they should access the
3414 3418 dirstate via wctx.repo().dirstate.
3415 3419
3416 3420 This list is emptied out after each status run -- extensions should
3417 3421 make sure it adds to this list each time dirstate.status is called.
3418 3422 Extensions should also make sure they don't call this for statuses
3419 3423 that don't involve the dirstate.
3420 3424 """
3421 3425
3422 3426 # The list is located here for uniqueness reasons -- it is actually
3423 3427 # managed by the workingctx, but that isn't unique per-repo.
3424 3428 self._postdsstatus.append(ps)
3425 3429
3426 3430 def postdsstatus(self):
3427 3431 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 3432 return self._postdsstatus
3429 3433
3430 3434 def clearpostdsstatus(self):
3431 3435 """Used by workingctx to clear post-dirstate-status hooks."""
3432 3436 del self._postdsstatus[:]
3433 3437
3434 3438 def heads(self, start=None):
3435 3439 if start is None:
3436 3440 cl = self.changelog
3437 3441 headrevs = reversed(cl.headrevs())
3438 3442 return [cl.node(rev) for rev in headrevs]
3439 3443
3440 3444 heads = self.changelog.heads(start)
3441 3445 # sort the output in rev descending order
3442 3446 return sorted(heads, key=self.changelog.rev, reverse=True)
3443 3447
3444 3448 def branchheads(self, branch=None, start=None, closed=False):
3445 3449 """return a (possibly filtered) list of heads for the given branch
3446 3450
3447 3451 Heads are returned in topological order, from newest to oldest.
3448 3452 If branch is None, use the dirstate branch.
3449 3453 If start is not None, return only heads reachable from start.
3450 3454 If closed is True, return heads that are marked as closed as well.
3451 3455 """
3452 3456 if branch is None:
3453 3457 branch = self[None].branch()
3454 3458 branches = self.branchmap()
3455 3459 if not branches.hasbranch(branch):
3456 3460 return []
3457 3461 # the cache returns heads ordered lowest to highest
3458 3462 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 3463 if start is not None:
3460 3464 # filter out the heads that cannot be reached from startrev
3461 3465 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 3466 bheads = [h for h in bheads if h in fbheads]
3463 3467 return bheads
3464 3468
3465 3469 def branches(self, nodes):
3466 3470 if not nodes:
3467 3471 nodes = [self.changelog.tip()]
3468 3472 b = []
3469 3473 for n in nodes:
3470 3474 t = n
3471 3475 while True:
3472 3476 p = self.changelog.parents(n)
3473 3477 if p[1] != self.nullid or p[0] == self.nullid:
3474 3478 b.append((t, n, p[0], p[1]))
3475 3479 break
3476 3480 n = p[0]
3477 3481 return b
3478 3482
3479 3483 def between(self, pairs):
3480 3484 r = []
3481 3485
3482 3486 for top, bottom in pairs:
3483 3487 n, l, i = top, [], 0
3484 3488 f = 1
3485 3489
3486 3490 while n != bottom and n != self.nullid:
3487 3491 p = self.changelog.parents(n)[0]
3488 3492 if i == f:
3489 3493 l.append(n)
3490 3494 f = f * 2
3491 3495 n = p
3492 3496 i += 1
3493 3497
3494 3498 r.append(l)
3495 3499
3496 3500 return r
3497 3501
3498 3502 def checkpush(self, pushop):
3499 3503 """Extensions can override this function if additional checks have
3500 3504 to be performed before pushing, or call it if they override push
3501 3505 command.
3502 3506 """
3503 3507
3504 3508 @unfilteredpropertycache
3505 3509 def prepushoutgoinghooks(self):
3506 3510 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 3511 methods, which are called before pushing changesets.
3508 3512 """
3509 3513 return util.hooks()
3510 3514
3511 3515 def pushkey(self, namespace, key, old, new):
3512 3516 try:
3513 3517 tr = self.currenttransaction()
3514 3518 hookargs = {}
3515 3519 if tr is not None:
3516 3520 hookargs.update(tr.hookargs)
3517 3521 hookargs = pycompat.strkwargs(hookargs)
3518 3522 hookargs['namespace'] = namespace
3519 3523 hookargs['key'] = key
3520 3524 hookargs['old'] = old
3521 3525 hookargs['new'] = new
3522 3526 self.hook(b'prepushkey', throw=True, **hookargs)
3523 3527 except error.HookAbort as exc:
3524 3528 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 3529 if exc.hint:
3526 3530 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 3531 return False
3528 3532 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 3533 ret = pushkey.push(self, namespace, key, old, new)
3530 3534
3531 3535 def runhook(unused_success):
3532 3536 self.hook(
3533 3537 b'pushkey',
3534 3538 namespace=namespace,
3535 3539 key=key,
3536 3540 old=old,
3537 3541 new=new,
3538 3542 ret=ret,
3539 3543 )
3540 3544
3541 3545 self._afterlock(runhook)
3542 3546 return ret
3543 3547
3544 3548 def listkeys(self, namespace):
3545 3549 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 3550 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 3551 values = pushkey.list(self, namespace)
3548 3552 self.hook(b'listkeys', namespace=namespace, values=values)
3549 3553 return values
3550 3554
3551 3555 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 3556 '''used to test argument passing over the wire'''
3553 3557 return b"%s %s %s %s %s" % (
3554 3558 one,
3555 3559 two,
3556 3560 pycompat.bytestr(three),
3557 3561 pycompat.bytestr(four),
3558 3562 pycompat.bytestr(five),
3559 3563 )
3560 3564
3561 3565 def savecommitmessage(self, text):
3562 3566 fp = self.vfs(b'last-message.txt', b'wb')
3563 3567 try:
3564 3568 fp.write(text)
3565 3569 finally:
3566 3570 fp.close()
3567 3571 return self.pathto(fp.name[len(self.root) + 1 :])
3568 3572
3569 3573 def register_wanted_sidedata(self, category):
3570 3574 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 3575 # Only revlogv2 repos can want sidedata.
3572 3576 return
3573 3577 self._wanted_sidedata.add(pycompat.bytestr(category))
3574 3578
3575 3579 def register_sidedata_computer(
3576 3580 self, kind, category, keys, computer, flags, replace=False
3577 3581 ):
3578 3582 if kind not in revlogconst.ALL_KINDS:
3579 3583 msg = _(b"unexpected revlog kind '%s'.")
3580 3584 raise error.ProgrammingError(msg % kind)
3581 3585 category = pycompat.bytestr(category)
3582 3586 already_registered = category in self._sidedata_computers.get(kind, [])
3583 3587 if already_registered and not replace:
3584 3588 msg = _(
3585 3589 b"cannot register a sidedata computer twice for category '%s'."
3586 3590 )
3587 3591 raise error.ProgrammingError(msg % category)
3588 3592 if replace and not already_registered:
3589 3593 msg = _(
3590 3594 b"cannot replace a sidedata computer that isn't registered "
3591 3595 b"for category '%s'."
3592 3596 )
3593 3597 raise error.ProgrammingError(msg % category)
3594 3598 self._sidedata_computers.setdefault(kind, {})
3595 3599 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596 3600
3597 3601
3598 3602 def undoname(fn: bytes) -> bytes:
3599 3603 base, name = os.path.split(fn)
3600 3604 assert name.startswith(b'journal')
3601 3605 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602 3606
3603 3607
3604 3608 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 3609 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 3610 from . import upgrade
3607 3611
3608 3612 localpath = urlutil.urllocalpath(path)
3609 3613 if create:
3610 3614 createrepository(ui, localpath, createopts=createopts)
3611 3615
3612 3616 def repo_maker():
3613 3617 return makelocalrepository(ui, localpath, intents=intents)
3614 3618
3615 3619 repo = repo_maker()
3616 3620 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 3621 return repo
3618 3622
3619 3623
3620 3624 def islocal(path: bytes) -> bool:
3621 3625 return True
3622 3626
3623 3627
3624 3628 def defaultcreateopts(ui, createopts=None):
3625 3629 """Populate the default creation options for a repository.
3626 3630
3627 3631 A dictionary of explicitly requested creation options can be passed
3628 3632 in. Missing keys will be populated.
3629 3633 """
3630 3634 createopts = dict(createopts or {})
3631 3635
3632 3636 if b'backend' not in createopts:
3633 3637 # experimental config: storage.new-repo-backend
3634 3638 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635 3639
3636 3640 return createopts
3637 3641
3638 3642
3639 3643 def clone_requirements(ui, createopts, srcrepo):
3640 3644 """clone the requirements of a local repo for a local clone
3641 3645
3642 3646 The store requirements are unchanged while the working copy requirements
3643 3647 depends on the configuration
3644 3648 """
3645 3649 target_requirements = set()
3646 3650 if not srcrepo.requirements:
3647 3651 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 3652 # with it.
3649 3653 return target_requirements
3650 3654 createopts = defaultcreateopts(ui, createopts=createopts)
3651 3655 for r in newreporequirements(ui, createopts):
3652 3656 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 3657 target_requirements.add(r)
3654 3658
3655 3659 for r in srcrepo.requirements:
3656 3660 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 3661 target_requirements.add(r)
3658 3662 return target_requirements
3659 3663
3660 3664
3661 3665 def newreporequirements(ui, createopts):
3662 3666 """Determine the set of requirements for a new local repository.
3663 3667
3664 3668 Extensions can wrap this function to specify custom requirements for
3665 3669 new repositories.
3666 3670 """
3667 3671
3668 3672 if b'backend' not in createopts:
3669 3673 raise error.ProgrammingError(
3670 3674 b'backend key not present in createopts; '
3671 3675 b'was defaultcreateopts() called?'
3672 3676 )
3673 3677
3674 3678 if createopts[b'backend'] != b'revlogv1':
3675 3679 raise error.Abort(
3676 3680 _(
3677 3681 b'unable to determine repository requirements for '
3678 3682 b'storage backend: %s'
3679 3683 )
3680 3684 % createopts[b'backend']
3681 3685 )
3682 3686
3683 3687 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 3688 if ui.configbool(b'format', b'usestore'):
3685 3689 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 3690 if ui.configbool(b'format', b'usefncache'):
3687 3691 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 3692 if ui.configbool(b'format', b'dotencode'):
3689 3693 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690 3694
3691 3695 compengines = ui.configlist(b'format', b'revlog-compression')
3692 3696 for compengine in compengines:
3693 3697 if compengine in util.compengines:
3694 3698 engine = util.compengines[compengine]
3695 3699 if engine.available() and engine.revlogheader():
3696 3700 break
3697 3701 else:
3698 3702 raise error.Abort(
3699 3703 _(
3700 3704 b'compression engines %s defined by '
3701 3705 b'format.revlog-compression not available'
3702 3706 )
3703 3707 % b', '.join(b'"%s"' % e for e in compengines),
3704 3708 hint=_(
3705 3709 b'run "hg debuginstall" to list available '
3706 3710 b'compression engines'
3707 3711 ),
3708 3712 )
3709 3713
3710 3714 # zlib is the historical default and doesn't need an explicit requirement.
3711 3715 if compengine == b'zstd':
3712 3716 requirements.add(b'revlog-compression-zstd')
3713 3717 elif compengine != b'zlib':
3714 3718 requirements.add(b'exp-compression-%s' % compengine)
3715 3719
3716 3720 if scmutil.gdinitconfig(ui):
3717 3721 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 3722 if ui.configbool(b'format', b'sparse-revlog'):
3719 3723 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720 3724
3721 3725 # experimental config: format.use-dirstate-v2
3722 3726 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 3727 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 3728 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725 3729
3726 3730 # experimental config: format.exp-use-copies-side-data-changeset
3727 3731 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 3732 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 3733 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 3734 if ui.configbool(b'experimental', b'treemanifest'):
3731 3735 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732 3736
3733 3737 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 3738 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 3739 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736 3740
3737 3741 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 3742 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 3743 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 3744 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 3745 # experimental config: format.internal-phase
3742 3746 if ui.configbool(b'format', b'use-internal-phase'):
3743 3747 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744 3748
3745 3749 # experimental config: format.exp-archived-phase
3746 3750 if ui.configbool(b'format', b'exp-archived-phase'):
3747 3751 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748 3752
3749 3753 if createopts.get(b'narrowfiles'):
3750 3754 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751 3755
3752 3756 if createopts.get(b'lfs'):
3753 3757 requirements.add(b'lfs')
3754 3758
3755 3759 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 3760 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757 3761
3758 3762 # The feature is disabled unless a fast implementation is available.
3759 3763 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 3764 if ui.configbool(
3761 3765 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 3766 ):
3763 3767 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764 3768
3765 3769 # if share-safe is enabled, let's create the new repository with the new
3766 3770 # requirement
3767 3771 if ui.configbool(b'format', b'use-share-safe'):
3768 3772 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769 3773
3770 3774 # if we are creating a share-repoΒΉ we have to handle requirement
3771 3775 # differently.
3772 3776 #
3773 3777 # [1] (i.e. reusing the store from another repository, just having a
3774 3778 # working copy)
3775 3779 if b'sharedrepo' in createopts:
3776 3780 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777 3781
3778 3782 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 3783 # share to an old school repository, we have to copy the
3780 3784 # requirements and hope for the best.
3781 3785 requirements = source_requirements
3782 3786 else:
3783 3787 # We have control on the working copy only, so "copy" the non
3784 3788 # working copy part over, ignoring previous logic.
3785 3789 to_drop = set()
3786 3790 for req in requirements:
3787 3791 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 3792 continue
3789 3793 if req in source_requirements:
3790 3794 continue
3791 3795 to_drop.add(req)
3792 3796 requirements -= to_drop
3793 3797 requirements |= source_requirements
3794 3798
3795 3799 if createopts.get(b'sharedrelative'):
3796 3800 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 3801 else:
3798 3802 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799 3803
3800 3804 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 3805 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 3806 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 3807 hint = _(
3804 3808 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 3809 )
3806 3810 if version != 1:
3807 3811 ui.warn(msg % version, hint=hint)
3808 3812 else:
3809 3813 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810 3814
3811 3815 return requirements
3812 3816
3813 3817
3814 3818 def checkrequirementscompat(ui, requirements):
3815 3819 """Checks compatibility of repository requirements enabled and disabled.
3816 3820
3817 3821 Returns a set of requirements which needs to be dropped because dependend
3818 3822 requirements are not enabled. Also warns users about it"""
3819 3823
3820 3824 dropped = set()
3821 3825
3822 3826 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 3827 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 3828 ui.warn(
3825 3829 _(
3826 3830 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 3831 b'beacuse it is incompatible with disabled '
3828 3832 b'\'format.usestore\' config\n'
3829 3833 )
3830 3834 )
3831 3835 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832 3836
3833 3837 if (
3834 3838 requirementsmod.SHARED_REQUIREMENT in requirements
3835 3839 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 3840 ):
3837 3841 raise error.Abort(
3838 3842 _(
3839 3843 b"cannot create shared repository as source was created"
3840 3844 b" with 'format.usestore' config disabled"
3841 3845 )
3842 3846 )
3843 3847
3844 3848 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 3849 if ui.hasconfig(b'format', b'use-share-safe'):
3846 3850 msg = _(
3847 3851 b"ignoring enabled 'format.use-share-safe' config because "
3848 3852 b"it is incompatible with disabled 'format.usestore'"
3849 3853 b" config\n"
3850 3854 )
3851 3855 ui.warn(msg)
3852 3856 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853 3857
3854 3858 return dropped
3855 3859
3856 3860
3857 3861 def filterknowncreateopts(ui, createopts):
3858 3862 """Filters a dict of repo creation options against options that are known.
3859 3863
3860 3864 Receives a dict of repo creation options and returns a dict of those
3861 3865 options that we don't know how to handle.
3862 3866
3863 3867 This function is called as part of repository creation. If the
3864 3868 returned dict contains any items, repository creation will not
3865 3869 be allowed, as it means there was a request to create a repository
3866 3870 with options not recognized by loaded code.
3867 3871
3868 3872 Extensions can wrap this function to filter out creation options
3869 3873 they know how to handle.
3870 3874 """
3871 3875 known = {
3872 3876 b'backend',
3873 3877 b'lfs',
3874 3878 b'narrowfiles',
3875 3879 b'sharedrepo',
3876 3880 b'sharedrelative',
3877 3881 b'shareditems',
3878 3882 b'shallowfilestore',
3879 3883 }
3880 3884
3881 3885 return {k: v for k, v in createopts.items() if k not in known}
3882 3886
3883 3887
3884 3888 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 3889 """Create a new repository in a vfs.
3886 3890
3887 3891 ``path`` path to the new repo's working directory.
3888 3892 ``createopts`` options for the new repository.
3889 3893 ``requirement`` predefined set of requirements.
3890 3894 (incompatible with ``createopts``)
3891 3895
3892 3896 The following keys for ``createopts`` are recognized:
3893 3897
3894 3898 backend
3895 3899 The storage backend to use.
3896 3900 lfs
3897 3901 Repository will be created with ``lfs`` requirement. The lfs extension
3898 3902 will automatically be loaded when the repository is accessed.
3899 3903 narrowfiles
3900 3904 Set up repository to support narrow file storage.
3901 3905 sharedrepo
3902 3906 Repository object from which storage should be shared.
3903 3907 sharedrelative
3904 3908 Boolean indicating if the path to the shared repo should be
3905 3909 stored as relative. By default, the pointer to the "parent" repo
3906 3910 is stored as an absolute path.
3907 3911 shareditems
3908 3912 Set of items to share to the new repository (in addition to storage).
3909 3913 shallowfilestore
3910 3914 Indicates that storage for files should be shallow (not all ancestor
3911 3915 revisions are known).
3912 3916 """
3913 3917
3914 3918 if requirements is not None:
3915 3919 if createopts is not None:
3916 3920 msg = b'cannot specify both createopts and requirements'
3917 3921 raise error.ProgrammingError(msg)
3918 3922 createopts = {}
3919 3923 else:
3920 3924 createopts = defaultcreateopts(ui, createopts=createopts)
3921 3925
3922 3926 unknownopts = filterknowncreateopts(ui, createopts)
3923 3927
3924 3928 if not isinstance(unknownopts, dict):
3925 3929 raise error.ProgrammingError(
3926 3930 b'filterknowncreateopts() did not return a dict'
3927 3931 )
3928 3932
3929 3933 if unknownopts:
3930 3934 raise error.Abort(
3931 3935 _(
3932 3936 b'unable to create repository because of unknown '
3933 3937 b'creation option: %s'
3934 3938 )
3935 3939 % b', '.join(sorted(unknownopts)),
3936 3940 hint=_(b'is a required extension not loaded?'),
3937 3941 )
3938 3942
3939 3943 requirements = newreporequirements(ui, createopts=createopts)
3940 3944 requirements -= checkrequirementscompat(ui, requirements)
3941 3945
3942 3946 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943 3947
3944 3948 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 3949 if hgvfs.exists():
3946 3950 raise error.RepoError(_(b'repository %s already exists') % path)
3947 3951
3948 3952 if b'sharedrepo' in createopts:
3949 3953 sharedpath = createopts[b'sharedrepo'].sharedpath
3950 3954
3951 3955 if createopts.get(b'sharedrelative'):
3952 3956 try:
3953 3957 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 3958 sharedpath = util.pconvert(sharedpath)
3955 3959 except (IOError, ValueError) as e:
3956 3960 # ValueError is raised on Windows if the drive letters differ
3957 3961 # on each path.
3958 3962 raise error.Abort(
3959 3963 _(b'cannot calculate relative path'),
3960 3964 hint=stringutil.forcebytestr(e),
3961 3965 )
3962 3966
3963 3967 if not wdirvfs.exists():
3964 3968 wdirvfs.makedirs()
3965 3969
3966 3970 hgvfs.makedir(notindexed=True)
3967 3971 if b'sharedrepo' not in createopts:
3968 3972 hgvfs.mkdir(b'cache')
3969 3973 hgvfs.mkdir(b'wcache')
3970 3974
3971 3975 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 3976 if has_store and b'sharedrepo' not in createopts:
3973 3977 hgvfs.mkdir(b'store')
3974 3978
3975 3979 # We create an invalid changelog outside the store so very old
3976 3980 # Mercurial versions (which didn't know about the requirements
3977 3981 # file) encounter an error on reading the changelog. This
3978 3982 # effectively locks out old clients and prevents them from
3979 3983 # mucking with a repo in an unknown format.
3980 3984 #
3981 3985 # The revlog header has version 65535, which won't be recognized by
3982 3986 # such old clients.
3983 3987 hgvfs.append(
3984 3988 b'00changelog.i',
3985 3989 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 3990 b'layout',
3987 3991 )
3988 3992
3989 3993 # Filter the requirements into working copy and store ones
3990 3994 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 3995 # write working copy ones
3992 3996 scmutil.writerequires(hgvfs, wcreq)
3993 3997 # If there are store requirements and the current repository
3994 3998 # is not a shared one, write stored requirements
3995 3999 # For new shared repository, we don't need to write the store
3996 4000 # requirements as they are already present in store requires
3997 4001 if storereq and b'sharedrepo' not in createopts:
3998 4002 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 4003 scmutil.writerequires(storevfs, storereq)
4000 4004
4001 4005 # Write out file telling readers where to find the shared store.
4002 4006 if b'sharedrepo' in createopts:
4003 4007 hgvfs.write(b'sharedpath', sharedpath)
4004 4008
4005 4009 if createopts.get(b'shareditems'):
4006 4010 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 4011 hgvfs.write(b'shared', shared)
4008 4012
4009 4013
4010 4014 def poisonrepository(repo):
4011 4015 """Poison a repository instance so it can no longer be used."""
4012 4016 # Perform any cleanup on the instance.
4013 4017 repo.close()
4014 4018
4015 4019 # Our strategy is to replace the type of the object with one that
4016 4020 # has all attribute lookups result in error.
4017 4021 #
4018 4022 # But we have to allow the close() method because some constructors
4019 4023 # of repos call close() on repo references.
4020 4024 class poisonedrepository:
4021 4025 def __getattribute__(self, item):
4022 4026 if item == 'close':
4023 4027 return object.__getattribute__(self, item)
4024 4028
4025 4029 raise error.ProgrammingError(
4026 4030 b'repo instances should not be used after unshare'
4027 4031 )
4028 4032
4029 4033 def close(self):
4030 4034 pass
4031 4035
4032 4036 # We may have a repoview, which intercepts __setattr__. So be sure
4033 4037 # we operate at the lowest level possible.
4034 4038 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,402 +1,408 b''
1 1 # lock.py - simple advisory locking scheme for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import contextlib
10 10 import errno
11 11 import os
12 12 import signal
13 13 import socket
14 14 import time
15 15 import typing
16 16 import warnings
17 17
18 18 from .i18n import _
19 19
20 20 from . import (
21 21 encoding,
22 22 error,
23 23 pycompat,
24 24 util,
25 25 )
26 26
27 27 from .utils import procutil
28 28
29 29
30 30 def _getlockprefix():
31 31 """Return a string which is used to differentiate pid namespaces
32 32
33 33 It's useful to detect "dead" processes and remove stale locks with
34 34 confidence. Typically it's just hostname. On modern linux, we include an
35 35 extra Linux-specific pid namespace identifier.
36 36 """
37 37 result = encoding.strtolocal(socket.gethostname())
38 38 if pycompat.sysplatform.startswith(b'linux'):
39 39 try:
40 40 result += b'/%x' % os.stat(b'/proc/self/ns/pid').st_ino
41 41 except (FileNotFoundError, PermissionError, NotADirectoryError):
42 42 pass
43 43 return result
44 44
45 45
46 46 @contextlib.contextmanager
47 47 def _delayedinterrupt():
48 48 """Block signal interrupt while doing something critical
49 49
50 50 This makes sure that the code block wrapped by this context manager won't
51 51 be interrupted.
52 52
53 53 For Windows developers: It appears not possible to guard time.sleep()
54 54 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
55 55 working.
56 56 """
57 57 assertedsigs = []
58 58 blocked = False
59 59 orighandlers = {}
60 60
61 61 def raiseinterrupt(num):
62 62 if num == getattr(signal, 'SIGINT', None) or num == getattr(
63 63 signal, 'CTRL_C_EVENT', None
64 64 ):
65 65 raise KeyboardInterrupt
66 66 else:
67 67 raise error.SignalInterrupt
68 68
69 69 def catchterm(num, frame):
70 70 if blocked:
71 71 assertedsigs.append(num)
72 72 else:
73 73 raiseinterrupt(num)
74 74
75 75 try:
76 76 # save handlers first so they can be restored even if a setup is
77 77 # interrupted between signal.signal() and orighandlers[] =.
78 78 for name in [
79 79 'CTRL_C_EVENT',
80 80 'SIGINT',
81 81 'SIGBREAK',
82 82 'SIGHUP',
83 83 'SIGTERM',
84 84 ]:
85 85 num = getattr(signal, name, None)
86 86 if num and num not in orighandlers:
87 87 orighandlers[num] = signal.getsignal(num)
88 88 try:
89 89 for num in orighandlers:
90 90 signal.signal(num, catchterm)
91 91 except ValueError:
92 92 pass # in a thread? no luck
93 93
94 94 blocked = True
95 95 yield
96 96 finally:
97 97 # no simple way to reliably restore all signal handlers because
98 98 # any loops, recursive function calls, except blocks, etc. can be
99 99 # interrupted. so instead, make catchterm() raise interrupt.
100 100 blocked = False
101 101 try:
102 102 for num, handler in orighandlers.items():
103 103 signal.signal(num, handler)
104 104 except ValueError:
105 105 pass # in a thread?
106 106
107 107 # re-raise interrupt exception if any, which may be shadowed by a new
108 108 # interrupt occurred while re-raising the first one
109 109 if assertedsigs:
110 110 raiseinterrupt(assertedsigs[0])
111 111
112 112
113 113 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
114 114 """return an acquired lock or raise an a LockHeld exception
115 115
116 116 This function is responsible to issue warnings and or debug messages about
117 117 the held lock while trying to acquires it."""
118 devel_wait_file = kwargs.pop("devel_wait_sync_file", None)
118 119
119 120 def printwarning(printer, locker):
120 121 """issue the usual "waiting on lock" message through any channel"""
121 122 # show more details for new-style locks
122 123 if b':' in locker:
123 124 host, pid = locker.split(b":", 1)
124 125 msg = _(
125 126 b"waiting for lock on %s held by process %r on host %r\n"
126 127 ) % (
127 128 pycompat.bytestr(l.desc),
128 129 pycompat.bytestr(pid),
129 130 pycompat.bytestr(host),
130 131 )
131 132 else:
132 133 msg = _(b"waiting for lock on %s held by %r\n") % (
133 134 l.desc,
134 135 pycompat.bytestr(locker),
135 136 )
136 137 printer(msg)
137 138
138 139 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
139 140
140 141 debugidx = 0 if (warntimeout and timeout) else -1
141 142 warningidx = 0
142 143 if not timeout:
143 144 warningidx = -1
144 145 elif warntimeout:
145 146 warningidx = warntimeout
146 147
147 148 delay = 0
148 149 while True:
149 150 try:
150 151 l._trylock()
151 152 break
152 153 except error.LockHeld as inst:
154 if devel_wait_file is not None:
155 # create the file to signal we are waiting
156 with open(devel_wait_file, 'w'):
157 pass
158
153 159 if delay == debugidx:
154 160 printwarning(ui.debug, inst.locker)
155 161 if delay == warningidx:
156 162 printwarning(ui.warn, inst.locker)
157 163 if timeout <= delay:
158 164 assert isinstance(inst.filename, bytes)
159 165 raise error.LockHeld(
160 166 errno.ETIMEDOUT,
161 167 typing.cast(bytes, inst.filename),
162 168 l.desc,
163 169 inst.locker,
164 170 )
165 171 time.sleep(1)
166 172 delay += 1
167 173
168 174 l.delay = delay
169 175 if l.delay:
170 176 if 0 <= warningidx <= l.delay:
171 177 ui.warn(_(b"got lock after %d seconds\n") % l.delay)
172 178 else:
173 179 ui.debug(b"got lock after %d seconds\n" % l.delay)
174 180 if l.acquirefn:
175 181 l.acquirefn()
176 182 return l
177 183
178 184
179 185 class lock:
180 186 """An advisory lock held by one process to control access to a set
181 187 of files. Non-cooperating processes or incorrectly written scripts
182 188 can ignore Mercurial's locking scheme and stomp all over the
183 189 repository, so don't do that.
184 190
185 191 Typically used via localrepository.lock() to lock the repository
186 192 store (.hg/store/) or localrepository.wlock() to lock everything
187 193 else under .hg/."""
188 194
189 195 # lock is symlink on platforms that support it, file on others.
190 196
191 197 # symlink is used because create of directory entry and contents
192 198 # are atomic even over nfs.
193 199
194 200 # old-style lock: symlink to pid
195 201 # new-style lock: symlink to hostname:pid
196 202
197 203 _host = None
198 204
199 205 def __init__(
200 206 self,
201 207 vfs,
202 208 fname,
203 209 timeout=-1,
204 210 releasefn=None,
205 211 acquirefn=None,
206 212 desc=None,
207 213 signalsafe=True,
208 214 dolock=True,
209 215 ):
210 216 self.vfs = vfs
211 217 self.f = fname
212 218 self.held = 0
213 219 self.timeout = timeout
214 220 self.releasefn = releasefn
215 221 self.acquirefn = acquirefn
216 222 self.desc = desc
217 223 if signalsafe:
218 224 self._maybedelayedinterrupt = _delayedinterrupt
219 225 else:
220 226 self._maybedelayedinterrupt = util.nullcontextmanager
221 227 self.postrelease = []
222 228 self.pid = self._getpid()
223 229 if dolock:
224 230 self.delay = self.lock()
225 231 if self.acquirefn:
226 232 self.acquirefn()
227 233
228 234 def __enter__(self):
229 235 return self
230 236
231 237 def __exit__(self, exc_type, exc_value, exc_tb):
232 238 success = all(a is None for a in (exc_type, exc_value, exc_tb))
233 239 self.release(success=success)
234 240
235 241 def __del__(self):
236 242 if self.held:
237 243 warnings.warn(
238 244 "use lock.release instead of del lock",
239 245 category=DeprecationWarning,
240 246 stacklevel=2,
241 247 )
242 248
243 249 # ensure the lock will be removed
244 250 # even if recursive locking did occur
245 251 self.held = 1
246 252
247 253 self.release()
248 254
249 255 def _getpid(self):
250 256 # wrapper around procutil.getpid() to make testing easier
251 257 return procutil.getpid()
252 258
253 259 def lock(self):
254 260 timeout = self.timeout
255 261 while True:
256 262 try:
257 263 self._trylock()
258 264 return self.timeout - timeout
259 265 except error.LockHeld as inst:
260 266 if timeout != 0:
261 267 time.sleep(1)
262 268 if timeout > 0:
263 269 timeout -= 1
264 270 continue
265 271 raise error.LockHeld(
266 272 errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
267 273 )
268 274
269 275 def _trylock(self):
270 276 if self.held:
271 277 self.held += 1
272 278 return
273 279 if lock._host is None:
274 280 lock._host = _getlockprefix()
275 281 lockname = b'%s:%d' % (lock._host, self.pid)
276 282 retry = 5
277 283 while not self.held and retry:
278 284 retry -= 1
279 285 try:
280 286 with self._maybedelayedinterrupt():
281 287 self.vfs.makelock(lockname, self.f)
282 288 self.held = 1
283 289 except (OSError, IOError) as why:
284 290 if why.errno == errno.EEXIST:
285 291 locker = self._readlock()
286 292 if locker is None:
287 293 continue
288 294
289 295 locker = self._testlock(locker)
290 296 if locker is not None:
291 297 raise error.LockHeld(
292 298 errno.EAGAIN,
293 299 self.vfs.join(self.f),
294 300 self.desc,
295 301 locker,
296 302 )
297 303 else:
298 304 assert isinstance(why.filename, bytes)
299 305 assert isinstance(why.strerror, str)
300 306 raise error.LockUnavailable(
301 307 why.errno,
302 308 why.strerror,
303 309 typing.cast(bytes, why.filename),
304 310 self.desc,
305 311 )
306 312
307 313 if not self.held:
308 314 # use empty locker to mean "busy for frequent lock/unlock
309 315 # by many processes"
310 316 raise error.LockHeld(
311 317 errno.EAGAIN, self.vfs.join(self.f), self.desc, b""
312 318 )
313 319
314 320 def _readlock(self):
315 321 """read lock and return its value
316 322
317 323 Returns None if no lock exists, pid for old-style locks, and host:pid
318 324 for new-style locks.
319 325 """
320 326 try:
321 327 return self.vfs.readlock(self.f)
322 328 except FileNotFoundError:
323 329 return None
324 330
325 331 def _lockshouldbebroken(self, locker):
326 332 if locker is None:
327 333 return False
328 334 try:
329 335 host, pid = locker.split(b":", 1)
330 336 except ValueError:
331 337 return False
332 338 if host != lock._host:
333 339 return False
334 340 try:
335 341 pid = int(pid)
336 342 except ValueError:
337 343 return False
338 344 if procutil.testpid(pid):
339 345 return False
340 346 return True
341 347
342 348 def _testlock(self, locker):
343 349 if not self._lockshouldbebroken(locker):
344 350 return locker
345 351
346 352 # if locker dead, break lock. must do this with another lock
347 353 # held, or can race and break valid lock.
348 354 try:
349 355 with lock(self.vfs, self.f + b'.break', timeout=0):
350 356 locker = self._readlock()
351 357 if not self._lockshouldbebroken(locker):
352 358 return locker
353 359 self.vfs.unlink(self.f)
354 360 except error.LockError:
355 361 return locker
356 362
357 363 def testlock(self):
358 364 """return id of locker if lock is valid, else None.
359 365
360 366 If old-style lock, we cannot tell what machine locker is on.
361 367 with new-style lock, if locker is on this machine, we can
362 368 see if locker is alive. If locker is on this machine but
363 369 not alive, we can safely break lock.
364 370
365 371 The lock file is only deleted when None is returned.
366 372
367 373 """
368 374 locker = self._readlock()
369 375 return self._testlock(locker)
370 376
371 377 def release(self, success=True):
372 378 """release the lock and execute callback function if any
373 379
374 380 If the lock has been acquired multiple times, the actual release is
375 381 delayed to the last release call."""
376 382 if self.held > 1:
377 383 self.held -= 1
378 384 elif self.held == 1:
379 385 self.held = 0
380 386 if self._getpid() != self.pid:
381 387 # we forked, and are not the parent
382 388 return
383 389 try:
384 390 if self.releasefn:
385 391 self.releasefn()
386 392 finally:
387 393 try:
388 394 self.vfs.unlink(self.f)
389 395 except OSError:
390 396 pass
391 397 # The postrelease functions typically assume the lock is not held
392 398 # at all.
393 399 for callback in self.postrelease:
394 400 callback(success)
395 401 # Prevent double usage and help clear cycles.
396 402 self.postrelease = None
397 403
398 404
399 405 def release(*locks):
400 406 for lock in locks:
401 407 if lock is not None:
402 408 lock.release()
@@ -1,141 +1,178 b''
1 1 #require unix-permissions no-root no-windows
2 2
3 3 Prepare
4 4
5 5 $ hg init a
6 6 $ echo a > a/a
7 7 $ hg -R a ci -A -m a
8 8 adding a
9 9
10 10 $ hg clone a b
11 11 updating to branch default
12 12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 13
14 14 Test that raising an exception in the release function doesn't cause the lock to choke
15 15
16 16 $ cat > testlock.py << EOF
17 17 > from mercurial import error, registrar
18 18 >
19 19 > cmdtable = {}
20 20 > command = registrar.command(cmdtable)
21 21 >
22 22 > def acquiretestlock(repo, releaseexc):
23 23 > def unlock():
24 24 > if releaseexc:
25 25 > raise error.Abort(b'expected release exception')
26 26 > l = repo._lock(repo.vfs, b'testlock', False, unlock, None, b'test lock')
27 27 > return l
28 28 >
29 29 > @command(b'testlockexc')
30 30 > def testlockexc(ui, repo):
31 31 > testlock = acquiretestlock(repo, True)
32 32 > try:
33 33 > testlock.release()
34 34 > finally:
35 35 > try:
36 36 > testlock = acquiretestlock(repo, False)
37 37 > except error.LockHeld:
38 38 > raise error.Abort(b'lockfile on disk even after releasing!')
39 39 > testlock.release()
40 40 > EOF
41 41 $ cat >> $HGRCPATH << EOF
42 42 > [extensions]
43 43 > testlock=$TESTTMP/testlock.py
44 44 > EOF
45 45
46 46 $ hg -R b testlockexc
47 47 abort: expected release exception
48 48 [255]
49 49
50 50 One process waiting for another
51 51
52 $ cat > hooks.py << EOF
53 > import time
54 > def sleepone(**x): time.sleep(1)
55 > def sleephalf(**x): time.sleep(0.5)
52 $ SYNC_FILE_LOCKED="$TESTTMP/sync-file-locked"
53 $ export SYNC_FILE_LOCKED
54 $ SYNC_FILE_TRYING_LOCK="$TESTTMP/sync-file-trying-lock"
55 $ export SYNC_FILE_TRYING_LOCK
56 $ cat << EOF > locker.sh
57 > $RUNTESTDIR/testlib/wait-on-file 10 $SYNC_FILE_TRYING_LOCK $SYNC_FILE_LOCKED;
58 > EOF
59 $ cat << EOF > waiter.sh
60 > $RUNTESTDIR/testlib/wait-on-file 10 $SYNC_FILE_LOCKED;
56 61 > EOF
62 $ clean_sync() {
63 > rm -f "$SYNC_FILE_LOCKED"
64 > rm -f "$SYNC_FILE_TRYING_LOCK"
65 > }
66
67
68 $ clean_sync
57 69 $ echo b > b/b
58 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
59 $ hg -R b up -q --config ui.timeout.warn=0 --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
60 > > preup-stdout 2>preup-stderr
70 $ hg -R b ci -A -m b \
71 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
72 > > stdout &
73 $ hg -R b up -q \
74 > --config ui.timeout.warn=0 \
75 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
76 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
77 > > preup-stdout 2> preup-stderr
61 78 $ wait
62 79 $ cat preup-stdout
63 80 $ cat preup-stderr
64 81 waiting for lock on working directory of b held by process '*' on host '*' (glob)
65 82 got lock after * seconds (glob)
66 83 $ cat stdout
67 84 adding b
68 85
69 86 On processs waiting on another, warning after a long time.
70 87
88 $ clean_sync
71 89 $ echo b > b/c
72 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
73 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
74 > --config ui.timeout.warn=250 \
75 > > preup-stdout 2>preup-stderr
90 $ hg -R b ci -A -m b \
91 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
92 > > stdout &
93 $ hg -R b up -q \
94 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
95 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
96 > --config ui.timeout.warn=250 \
97 > > preup-stdout 2> preup-stderr
76 98 $ wait
77 99 $ cat preup-stdout
78 100 $ cat preup-stderr
79 101 $ cat stdout
80 102 adding c
81 103
82 104 On processs waiting on another, warning disabled.
83 105
106 $ clean_sync
84 107 $ echo b > b/d
85 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
86 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
87 > --config ui.timeout.warn=-1 \
88 > > preup-stdout 2>preup-stderr
108 $ hg -R b ci -A -m b \
109 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
110 > > stdout &
111 $ hg -R b up -q \
112 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
113 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
114 > --config ui.timeout.warn=-1 \
115 > > preup-stdout 2>preup-stderr
89 116 $ wait
90 117 $ cat preup-stdout
91 118 $ cat preup-stderr
92 119 $ cat stdout
93 120 adding d
94 121
95 122 check we still print debug output
96 123
97 124 On processs waiting on another, warning after a long time (debug output on)
98 125
126 $ clean_sync
99 127 $ echo b > b/e
100 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
101 $ hg -R b up --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
102 > --config ui.timeout.warn=250 --debug\
103 > > preup-stdout 2>preup-stderr
128 $ hg -R b ci -A -m b \
129 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
130 > > stdout &
131 $ hg -R b up \
132 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
133 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
134 > --config ui.timeout.warn=250 --debug \
135 > > preup-stdout 2>preup-stderr
104 136 $ wait
105 137 $ cat preup-stdout
106 calling hook pre-update: hghook_pre-update.sleephalf
138 running hook pre-update: sh $TESTTMP/waiter.sh
107 139 waiting for lock on working directory of b held by process '*' on host '*' (glob)
108 140 got lock after * seconds (glob)
109 141 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 142 $ cat preup-stderr
111 143 $ cat stdout
112 144 adding e
113 145
114 146 On processs waiting on another, warning disabled, (debug output on)
115 147
148 $ clean_sync
116 149 $ echo b > b/f
117 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
118 $ hg -R b up --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
119 > --config ui.timeout.warn=-1 --debug\
120 > > preup-stdout 2>preup-stderr
150 $ hg -R b ci -A -m b \
151 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
152 > > stdout &
153 $ hg -R b up \
154 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
155 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
156 > --config ui.timeout.warn=-1 --debug\
157 > > preup-stdout 2>preup-stderr
121 158 $ wait
122 159 $ cat preup-stdout
123 calling hook pre-update: hghook_pre-update.sleephalf
160 running hook pre-update: sh $TESTTMP/waiter.sh
124 161 waiting for lock on working directory of b held by process '*' on host '*' (glob)
125 162 got lock after * seconds (glob)
126 163 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 164 $ cat preup-stderr
128 165 $ cat stdout
129 166 adding f
130 167
131 168 Pushing to a local read-only repo that can't be locked
132 169
133 170 $ chmod 100 a/.hg/store
134 171
135 172 $ hg -R b push a
136 173 pushing to a
137 174 searching for changes
138 175 abort: could not lock repository a: $EACCES$
139 176 [20]
140 177
141 178 $ chmod 700 a/.hg/store
General Comments 0
You need to be logged in to leave comments. Login now