##// END OF EJS Templates
dirstate-v2: Add a new experimental `exp-dirstate-v2` repository requirement...
Simon Sapin -
r48052:ed0d54b2 default
parent child Browse files
Show More
@@ -1,2711 +1,2719 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 1153 # * include management of a persistent nodemap in the main docket
1154 1154 # * enforce a "no-truncate" policy for mmap safety
1155 1155 # - for censoring operation
1156 1156 # - for stripping operation
1157 1157 # - for rollback operation
1158 1158 # * proper streaming (race free) of the docket file
1159 1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 1160 # * Exchange-wise, we will also need to do something more efficient than
1161 1161 # keeping references to the affected revlogs, especially memory-wise when
1162 1162 # rewriting sidedata.
1163 1163 # * sidedata compression
1164 1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 1165 # * Improvement to consider
1166 1166 # - avoid compression header in chunk using the default compression?
1167 1167 # - forbid "inline" compression mode entirely?
1168 1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 1170 # - keep track of chain base or size (probably not that useful anymore)
1171 1171 # - store data and sidedata in different files
1172 1172 coreconfigitem(
1173 1173 b'experimental',
1174 1174 b'revlogv2',
1175 1175 default=None,
1176 1176 )
1177 1177 coreconfigitem(
1178 1178 b'experimental',
1179 1179 b'revisions.disambiguatewithin',
1180 1180 default=None,
1181 1181 )
1182 1182 coreconfigitem(
1183 1183 b'experimental',
1184 1184 b'rust.index',
1185 1185 default=False,
1186 1186 )
1187 1187 coreconfigitem(
1188 1188 b'experimental',
1189 1189 b'server.filesdata.recommended-batch-size',
1190 1190 default=50000,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'experimental',
1194 1194 b'server.manifestdata.recommended-batch-size',
1195 1195 default=100000,
1196 1196 )
1197 1197 coreconfigitem(
1198 1198 b'experimental',
1199 1199 b'server.stream-narrow-clones',
1200 1200 default=False,
1201 1201 )
1202 1202 coreconfigitem(
1203 1203 b'experimental',
1204 1204 b'single-head-per-branch',
1205 1205 default=False,
1206 1206 )
1207 1207 coreconfigitem(
1208 1208 b'experimental',
1209 1209 b'single-head-per-branch:account-closed-heads',
1210 1210 default=False,
1211 1211 )
1212 1212 coreconfigitem(
1213 1213 b'experimental',
1214 1214 b'single-head-per-branch:public-changes-only',
1215 1215 default=False,
1216 1216 )
1217 1217 coreconfigitem(
1218 1218 b'experimental',
1219 1219 b'sshserver.support-v2',
1220 1220 default=False,
1221 1221 )
1222 1222 coreconfigitem(
1223 1223 b'experimental',
1224 1224 b'sparse-read',
1225 1225 default=False,
1226 1226 )
1227 1227 coreconfigitem(
1228 1228 b'experimental',
1229 1229 b'sparse-read.density-threshold',
1230 1230 default=0.50,
1231 1231 )
1232 1232 coreconfigitem(
1233 1233 b'experimental',
1234 1234 b'sparse-read.min-gap-size',
1235 1235 default=b'65K',
1236 1236 )
1237 1237 coreconfigitem(
1238 1238 b'experimental',
1239 1239 b'treemanifest',
1240 1240 default=False,
1241 1241 )
1242 1242 coreconfigitem(
1243 1243 b'experimental',
1244 1244 b'update.atomic-file',
1245 1245 default=False,
1246 1246 )
1247 1247 coreconfigitem(
1248 1248 b'experimental',
1249 1249 b'sshpeer.advertise-v2',
1250 1250 default=False,
1251 1251 )
1252 1252 coreconfigitem(
1253 1253 b'experimental',
1254 1254 b'web.apiserver',
1255 1255 default=False,
1256 1256 )
1257 1257 coreconfigitem(
1258 1258 b'experimental',
1259 1259 b'web.api.http-v2',
1260 1260 default=False,
1261 1261 )
1262 1262 coreconfigitem(
1263 1263 b'experimental',
1264 1264 b'web.api.debugreflect',
1265 1265 default=False,
1266 1266 )
1267 1267 coreconfigitem(
1268 1268 b'experimental',
1269 1269 b'worker.wdir-get-thread-safe',
1270 1270 default=False,
1271 1271 )
1272 1272 coreconfigitem(
1273 1273 b'experimental',
1274 1274 b'worker.repository-upgrade',
1275 1275 default=False,
1276 1276 )
1277 1277 coreconfigitem(
1278 1278 b'experimental',
1279 1279 b'xdiff',
1280 1280 default=False,
1281 1281 )
1282 1282 coreconfigitem(
1283 1283 b'extensions',
1284 1284 b'.*',
1285 1285 default=None,
1286 1286 generic=True,
1287 1287 )
1288 1288 coreconfigitem(
1289 1289 b'extdata',
1290 1290 b'.*',
1291 1291 default=None,
1292 1292 generic=True,
1293 1293 )
1294 1294 coreconfigitem(
1295 1295 b'format',
1296 1296 b'bookmarks-in-store',
1297 1297 default=False,
1298 1298 )
1299 1299 coreconfigitem(
1300 1300 b'format',
1301 1301 b'chunkcachesize',
1302 1302 default=None,
1303 1303 experimental=True,
1304 1304 )
1305 1305 coreconfigitem(
1306 # Enable this dirstate format *when creating a new repository*.
1307 # Which format to use for existing repos is controlled by .hg/requires
1308 b'format',
1309 b'exp-dirstate-v2',
1310 default=False,
1311 experimental=True,
1312 )
1313 coreconfigitem(
1306 1314 b'format',
1307 1315 b'dotencode',
1308 1316 default=True,
1309 1317 )
1310 1318 coreconfigitem(
1311 1319 b'format',
1312 1320 b'generaldelta',
1313 1321 default=False,
1314 1322 experimental=True,
1315 1323 )
1316 1324 coreconfigitem(
1317 1325 b'format',
1318 1326 b'manifestcachesize',
1319 1327 default=None,
1320 1328 experimental=True,
1321 1329 )
1322 1330 coreconfigitem(
1323 1331 b'format',
1324 1332 b'maxchainlen',
1325 1333 default=dynamicdefault,
1326 1334 experimental=True,
1327 1335 )
1328 1336 coreconfigitem(
1329 1337 b'format',
1330 1338 b'obsstore-version',
1331 1339 default=None,
1332 1340 )
1333 1341 coreconfigitem(
1334 1342 b'format',
1335 1343 b'sparse-revlog',
1336 1344 default=True,
1337 1345 )
1338 1346 coreconfigitem(
1339 1347 b'format',
1340 1348 b'revlog-compression',
1341 1349 default=lambda: [b'zstd', b'zlib'],
1342 1350 alias=[(b'experimental', b'format.compression')],
1343 1351 )
1344 1352 # Experimental TODOs:
1345 1353 #
1346 1354 # * Same as for evlogv2 (but for the reduction of the number of files)
1347 1355 # * Improvement to investigate
1348 1356 # - storing .hgtags fnode
1349 1357 # - storing `rank` of changesets
1350 1358 # - storing branch related identifier
1351 1359
1352 1360 coreconfigitem(
1353 1361 b'format',
1354 1362 b'exp-use-changelog-v2',
1355 1363 default=None,
1356 1364 experimental=True,
1357 1365 )
1358 1366 coreconfigitem(
1359 1367 b'format',
1360 1368 b'usefncache',
1361 1369 default=True,
1362 1370 )
1363 1371 coreconfigitem(
1364 1372 b'format',
1365 1373 b'usegeneraldelta',
1366 1374 default=True,
1367 1375 )
1368 1376 coreconfigitem(
1369 1377 b'format',
1370 1378 b'usestore',
1371 1379 default=True,
1372 1380 )
1373 1381
1374 1382
1375 1383 def _persistent_nodemap_default():
1376 1384 """compute `use-persistent-nodemap` default value
1377 1385
1378 1386 The feature is disabled unless a fast implementation is available.
1379 1387 """
1380 1388 from . import policy
1381 1389
1382 1390 return policy.importrust('revlog') is not None
1383 1391
1384 1392
1385 1393 coreconfigitem(
1386 1394 b'format',
1387 1395 b'use-persistent-nodemap',
1388 1396 default=_persistent_nodemap_default,
1389 1397 )
1390 1398 coreconfigitem(
1391 1399 b'format',
1392 1400 b'exp-use-copies-side-data-changeset',
1393 1401 default=False,
1394 1402 experimental=True,
1395 1403 )
1396 1404 coreconfigitem(
1397 1405 b'format',
1398 1406 b'use-share-safe',
1399 1407 default=False,
1400 1408 )
1401 1409 coreconfigitem(
1402 1410 b'format',
1403 1411 b'internal-phase',
1404 1412 default=False,
1405 1413 experimental=True,
1406 1414 )
1407 1415 coreconfigitem(
1408 1416 b'fsmonitor',
1409 1417 b'warn_when_unused',
1410 1418 default=True,
1411 1419 )
1412 1420 coreconfigitem(
1413 1421 b'fsmonitor',
1414 1422 b'warn_update_file_count',
1415 1423 default=50000,
1416 1424 )
1417 1425 coreconfigitem(
1418 1426 b'fsmonitor',
1419 1427 b'warn_update_file_count_rust',
1420 1428 default=400000,
1421 1429 )
1422 1430 coreconfigitem(
1423 1431 b'help',
1424 1432 br'hidden-command\..*',
1425 1433 default=False,
1426 1434 generic=True,
1427 1435 )
1428 1436 coreconfigitem(
1429 1437 b'help',
1430 1438 br'hidden-topic\..*',
1431 1439 default=False,
1432 1440 generic=True,
1433 1441 )
1434 1442 coreconfigitem(
1435 1443 b'hooks',
1436 1444 b'[^:]*',
1437 1445 default=dynamicdefault,
1438 1446 generic=True,
1439 1447 )
1440 1448 coreconfigitem(
1441 1449 b'hooks',
1442 1450 b'.*:run-with-plain',
1443 1451 default=True,
1444 1452 generic=True,
1445 1453 )
1446 1454 coreconfigitem(
1447 1455 b'hgweb-paths',
1448 1456 b'.*',
1449 1457 default=list,
1450 1458 generic=True,
1451 1459 )
1452 1460 coreconfigitem(
1453 1461 b'hostfingerprints',
1454 1462 b'.*',
1455 1463 default=list,
1456 1464 generic=True,
1457 1465 )
1458 1466 coreconfigitem(
1459 1467 b'hostsecurity',
1460 1468 b'ciphers',
1461 1469 default=None,
1462 1470 )
1463 1471 coreconfigitem(
1464 1472 b'hostsecurity',
1465 1473 b'minimumprotocol',
1466 1474 default=dynamicdefault,
1467 1475 )
1468 1476 coreconfigitem(
1469 1477 b'hostsecurity',
1470 1478 b'.*:minimumprotocol$',
1471 1479 default=dynamicdefault,
1472 1480 generic=True,
1473 1481 )
1474 1482 coreconfigitem(
1475 1483 b'hostsecurity',
1476 1484 b'.*:ciphers$',
1477 1485 default=dynamicdefault,
1478 1486 generic=True,
1479 1487 )
1480 1488 coreconfigitem(
1481 1489 b'hostsecurity',
1482 1490 b'.*:fingerprints$',
1483 1491 default=list,
1484 1492 generic=True,
1485 1493 )
1486 1494 coreconfigitem(
1487 1495 b'hostsecurity',
1488 1496 b'.*:verifycertsfile$',
1489 1497 default=None,
1490 1498 generic=True,
1491 1499 )
1492 1500
1493 1501 coreconfigitem(
1494 1502 b'http_proxy',
1495 1503 b'always',
1496 1504 default=False,
1497 1505 )
1498 1506 coreconfigitem(
1499 1507 b'http_proxy',
1500 1508 b'host',
1501 1509 default=None,
1502 1510 )
1503 1511 coreconfigitem(
1504 1512 b'http_proxy',
1505 1513 b'no',
1506 1514 default=list,
1507 1515 )
1508 1516 coreconfigitem(
1509 1517 b'http_proxy',
1510 1518 b'passwd',
1511 1519 default=None,
1512 1520 )
1513 1521 coreconfigitem(
1514 1522 b'http_proxy',
1515 1523 b'user',
1516 1524 default=None,
1517 1525 )
1518 1526
1519 1527 coreconfigitem(
1520 1528 b'http',
1521 1529 b'timeout',
1522 1530 default=None,
1523 1531 )
1524 1532
1525 1533 coreconfigitem(
1526 1534 b'logtoprocess',
1527 1535 b'commandexception',
1528 1536 default=None,
1529 1537 )
1530 1538 coreconfigitem(
1531 1539 b'logtoprocess',
1532 1540 b'commandfinish',
1533 1541 default=None,
1534 1542 )
1535 1543 coreconfigitem(
1536 1544 b'logtoprocess',
1537 1545 b'command',
1538 1546 default=None,
1539 1547 )
1540 1548 coreconfigitem(
1541 1549 b'logtoprocess',
1542 1550 b'develwarn',
1543 1551 default=None,
1544 1552 )
1545 1553 coreconfigitem(
1546 1554 b'logtoprocess',
1547 1555 b'uiblocked',
1548 1556 default=None,
1549 1557 )
1550 1558 coreconfigitem(
1551 1559 b'merge',
1552 1560 b'checkunknown',
1553 1561 default=b'abort',
1554 1562 )
1555 1563 coreconfigitem(
1556 1564 b'merge',
1557 1565 b'checkignored',
1558 1566 default=b'abort',
1559 1567 )
1560 1568 coreconfigitem(
1561 1569 b'experimental',
1562 1570 b'merge.checkpathconflicts',
1563 1571 default=False,
1564 1572 )
1565 1573 coreconfigitem(
1566 1574 b'merge',
1567 1575 b'followcopies',
1568 1576 default=True,
1569 1577 )
1570 1578 coreconfigitem(
1571 1579 b'merge',
1572 1580 b'on-failure',
1573 1581 default=b'continue',
1574 1582 )
1575 1583 coreconfigitem(
1576 1584 b'merge',
1577 1585 b'preferancestor',
1578 1586 default=lambda: [b'*'],
1579 1587 experimental=True,
1580 1588 )
1581 1589 coreconfigitem(
1582 1590 b'merge',
1583 1591 b'strict-capability-check',
1584 1592 default=False,
1585 1593 )
1586 1594 coreconfigitem(
1587 1595 b'merge-tools',
1588 1596 b'.*',
1589 1597 default=None,
1590 1598 generic=True,
1591 1599 )
1592 1600 coreconfigitem(
1593 1601 b'merge-tools',
1594 1602 br'.*\.args$',
1595 1603 default=b"$local $base $other",
1596 1604 generic=True,
1597 1605 priority=-1,
1598 1606 )
1599 1607 coreconfigitem(
1600 1608 b'merge-tools',
1601 1609 br'.*\.binary$',
1602 1610 default=False,
1603 1611 generic=True,
1604 1612 priority=-1,
1605 1613 )
1606 1614 coreconfigitem(
1607 1615 b'merge-tools',
1608 1616 br'.*\.check$',
1609 1617 default=list,
1610 1618 generic=True,
1611 1619 priority=-1,
1612 1620 )
1613 1621 coreconfigitem(
1614 1622 b'merge-tools',
1615 1623 br'.*\.checkchanged$',
1616 1624 default=False,
1617 1625 generic=True,
1618 1626 priority=-1,
1619 1627 )
1620 1628 coreconfigitem(
1621 1629 b'merge-tools',
1622 1630 br'.*\.executable$',
1623 1631 default=dynamicdefault,
1624 1632 generic=True,
1625 1633 priority=-1,
1626 1634 )
1627 1635 coreconfigitem(
1628 1636 b'merge-tools',
1629 1637 br'.*\.fixeol$',
1630 1638 default=False,
1631 1639 generic=True,
1632 1640 priority=-1,
1633 1641 )
1634 1642 coreconfigitem(
1635 1643 b'merge-tools',
1636 1644 br'.*\.gui$',
1637 1645 default=False,
1638 1646 generic=True,
1639 1647 priority=-1,
1640 1648 )
1641 1649 coreconfigitem(
1642 1650 b'merge-tools',
1643 1651 br'.*\.mergemarkers$',
1644 1652 default=b'basic',
1645 1653 generic=True,
1646 1654 priority=-1,
1647 1655 )
1648 1656 coreconfigitem(
1649 1657 b'merge-tools',
1650 1658 br'.*\.mergemarkertemplate$',
1651 1659 default=dynamicdefault, # take from command-templates.mergemarker
1652 1660 generic=True,
1653 1661 priority=-1,
1654 1662 )
1655 1663 coreconfigitem(
1656 1664 b'merge-tools',
1657 1665 br'.*\.priority$',
1658 1666 default=0,
1659 1667 generic=True,
1660 1668 priority=-1,
1661 1669 )
1662 1670 coreconfigitem(
1663 1671 b'merge-tools',
1664 1672 br'.*\.premerge$',
1665 1673 default=dynamicdefault,
1666 1674 generic=True,
1667 1675 priority=-1,
1668 1676 )
1669 1677 coreconfigitem(
1670 1678 b'merge-tools',
1671 1679 br'.*\.symlink$',
1672 1680 default=False,
1673 1681 generic=True,
1674 1682 priority=-1,
1675 1683 )
1676 1684 coreconfigitem(
1677 1685 b'pager',
1678 1686 b'attend-.*',
1679 1687 default=dynamicdefault,
1680 1688 generic=True,
1681 1689 )
1682 1690 coreconfigitem(
1683 1691 b'pager',
1684 1692 b'ignore',
1685 1693 default=list,
1686 1694 )
1687 1695 coreconfigitem(
1688 1696 b'pager',
1689 1697 b'pager',
1690 1698 default=dynamicdefault,
1691 1699 )
1692 1700 coreconfigitem(
1693 1701 b'patch',
1694 1702 b'eol',
1695 1703 default=b'strict',
1696 1704 )
1697 1705 coreconfigitem(
1698 1706 b'patch',
1699 1707 b'fuzz',
1700 1708 default=2,
1701 1709 )
1702 1710 coreconfigitem(
1703 1711 b'paths',
1704 1712 b'default',
1705 1713 default=None,
1706 1714 )
1707 1715 coreconfigitem(
1708 1716 b'paths',
1709 1717 b'default-push',
1710 1718 default=None,
1711 1719 )
1712 1720 coreconfigitem(
1713 1721 b'paths',
1714 1722 b'.*',
1715 1723 default=None,
1716 1724 generic=True,
1717 1725 )
1718 1726 coreconfigitem(
1719 1727 b'phases',
1720 1728 b'checksubrepos',
1721 1729 default=b'follow',
1722 1730 )
1723 1731 coreconfigitem(
1724 1732 b'phases',
1725 1733 b'new-commit',
1726 1734 default=b'draft',
1727 1735 )
1728 1736 coreconfigitem(
1729 1737 b'phases',
1730 1738 b'publish',
1731 1739 default=True,
1732 1740 )
1733 1741 coreconfigitem(
1734 1742 b'profiling',
1735 1743 b'enabled',
1736 1744 default=False,
1737 1745 )
1738 1746 coreconfigitem(
1739 1747 b'profiling',
1740 1748 b'format',
1741 1749 default=b'text',
1742 1750 )
1743 1751 coreconfigitem(
1744 1752 b'profiling',
1745 1753 b'freq',
1746 1754 default=1000,
1747 1755 )
1748 1756 coreconfigitem(
1749 1757 b'profiling',
1750 1758 b'limit',
1751 1759 default=30,
1752 1760 )
1753 1761 coreconfigitem(
1754 1762 b'profiling',
1755 1763 b'nested',
1756 1764 default=0,
1757 1765 )
1758 1766 coreconfigitem(
1759 1767 b'profiling',
1760 1768 b'output',
1761 1769 default=None,
1762 1770 )
1763 1771 coreconfigitem(
1764 1772 b'profiling',
1765 1773 b'showmax',
1766 1774 default=0.999,
1767 1775 )
1768 1776 coreconfigitem(
1769 1777 b'profiling',
1770 1778 b'showmin',
1771 1779 default=dynamicdefault,
1772 1780 )
1773 1781 coreconfigitem(
1774 1782 b'profiling',
1775 1783 b'showtime',
1776 1784 default=True,
1777 1785 )
1778 1786 coreconfigitem(
1779 1787 b'profiling',
1780 1788 b'sort',
1781 1789 default=b'inlinetime',
1782 1790 )
1783 1791 coreconfigitem(
1784 1792 b'profiling',
1785 1793 b'statformat',
1786 1794 default=b'hotpath',
1787 1795 )
1788 1796 coreconfigitem(
1789 1797 b'profiling',
1790 1798 b'time-track',
1791 1799 default=dynamicdefault,
1792 1800 )
1793 1801 coreconfigitem(
1794 1802 b'profiling',
1795 1803 b'type',
1796 1804 default=b'stat',
1797 1805 )
1798 1806 coreconfigitem(
1799 1807 b'progress',
1800 1808 b'assume-tty',
1801 1809 default=False,
1802 1810 )
1803 1811 coreconfigitem(
1804 1812 b'progress',
1805 1813 b'changedelay',
1806 1814 default=1,
1807 1815 )
1808 1816 coreconfigitem(
1809 1817 b'progress',
1810 1818 b'clear-complete',
1811 1819 default=True,
1812 1820 )
1813 1821 coreconfigitem(
1814 1822 b'progress',
1815 1823 b'debug',
1816 1824 default=False,
1817 1825 )
1818 1826 coreconfigitem(
1819 1827 b'progress',
1820 1828 b'delay',
1821 1829 default=3,
1822 1830 )
1823 1831 coreconfigitem(
1824 1832 b'progress',
1825 1833 b'disable',
1826 1834 default=False,
1827 1835 )
1828 1836 coreconfigitem(
1829 1837 b'progress',
1830 1838 b'estimateinterval',
1831 1839 default=60.0,
1832 1840 )
1833 1841 coreconfigitem(
1834 1842 b'progress',
1835 1843 b'format',
1836 1844 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1837 1845 )
1838 1846 coreconfigitem(
1839 1847 b'progress',
1840 1848 b'refresh',
1841 1849 default=0.1,
1842 1850 )
1843 1851 coreconfigitem(
1844 1852 b'progress',
1845 1853 b'width',
1846 1854 default=dynamicdefault,
1847 1855 )
1848 1856 coreconfigitem(
1849 1857 b'pull',
1850 1858 b'confirm',
1851 1859 default=False,
1852 1860 )
1853 1861 coreconfigitem(
1854 1862 b'push',
1855 1863 b'pushvars.server',
1856 1864 default=False,
1857 1865 )
1858 1866 coreconfigitem(
1859 1867 b'rewrite',
1860 1868 b'backup-bundle',
1861 1869 default=True,
1862 1870 alias=[(b'ui', b'history-editing-backup')],
1863 1871 )
1864 1872 coreconfigitem(
1865 1873 b'rewrite',
1866 1874 b'update-timestamp',
1867 1875 default=False,
1868 1876 )
1869 1877 coreconfigitem(
1870 1878 b'rewrite',
1871 1879 b'empty-successor',
1872 1880 default=b'skip',
1873 1881 experimental=True,
1874 1882 )
1875 1883 coreconfigitem(
1876 1884 b'storage',
1877 1885 b'new-repo-backend',
1878 1886 default=b'revlogv1',
1879 1887 experimental=True,
1880 1888 )
1881 1889 coreconfigitem(
1882 1890 b'storage',
1883 1891 b'revlog.optimize-delta-parent-choice',
1884 1892 default=True,
1885 1893 alias=[(b'format', b'aggressivemergedeltas')],
1886 1894 )
1887 1895 # experimental as long as rust is experimental (or a C version is implemented)
1888 1896 coreconfigitem(
1889 1897 b'storage',
1890 1898 b'revlog.persistent-nodemap.mmap',
1891 1899 default=True,
1892 1900 )
1893 1901 # experimental as long as format.use-persistent-nodemap is.
1894 1902 coreconfigitem(
1895 1903 b'storage',
1896 1904 b'revlog.persistent-nodemap.slow-path',
1897 1905 default=b"abort",
1898 1906 )
1899 1907
1900 1908 coreconfigitem(
1901 1909 b'storage',
1902 1910 b'revlog.reuse-external-delta',
1903 1911 default=True,
1904 1912 )
1905 1913 coreconfigitem(
1906 1914 b'storage',
1907 1915 b'revlog.reuse-external-delta-parent',
1908 1916 default=None,
1909 1917 )
1910 1918 coreconfigitem(
1911 1919 b'storage',
1912 1920 b'revlog.zlib.level',
1913 1921 default=None,
1914 1922 )
1915 1923 coreconfigitem(
1916 1924 b'storage',
1917 1925 b'revlog.zstd.level',
1918 1926 default=None,
1919 1927 )
1920 1928 coreconfigitem(
1921 1929 b'server',
1922 1930 b'bookmarks-pushkey-compat',
1923 1931 default=True,
1924 1932 )
1925 1933 coreconfigitem(
1926 1934 b'server',
1927 1935 b'bundle1',
1928 1936 default=True,
1929 1937 )
1930 1938 coreconfigitem(
1931 1939 b'server',
1932 1940 b'bundle1gd',
1933 1941 default=None,
1934 1942 )
1935 1943 coreconfigitem(
1936 1944 b'server',
1937 1945 b'bundle1.pull',
1938 1946 default=None,
1939 1947 )
1940 1948 coreconfigitem(
1941 1949 b'server',
1942 1950 b'bundle1gd.pull',
1943 1951 default=None,
1944 1952 )
1945 1953 coreconfigitem(
1946 1954 b'server',
1947 1955 b'bundle1.push',
1948 1956 default=None,
1949 1957 )
1950 1958 coreconfigitem(
1951 1959 b'server',
1952 1960 b'bundle1gd.push',
1953 1961 default=None,
1954 1962 )
1955 1963 coreconfigitem(
1956 1964 b'server',
1957 1965 b'bundle2.stream',
1958 1966 default=True,
1959 1967 alias=[(b'experimental', b'bundle2.stream')],
1960 1968 )
1961 1969 coreconfigitem(
1962 1970 b'server',
1963 1971 b'compressionengines',
1964 1972 default=list,
1965 1973 )
1966 1974 coreconfigitem(
1967 1975 b'server',
1968 1976 b'concurrent-push-mode',
1969 1977 default=b'check-related',
1970 1978 )
1971 1979 coreconfigitem(
1972 1980 b'server',
1973 1981 b'disablefullbundle',
1974 1982 default=False,
1975 1983 )
1976 1984 coreconfigitem(
1977 1985 b'server',
1978 1986 b'maxhttpheaderlen',
1979 1987 default=1024,
1980 1988 )
1981 1989 coreconfigitem(
1982 1990 b'server',
1983 1991 b'pullbundle',
1984 1992 default=False,
1985 1993 )
1986 1994 coreconfigitem(
1987 1995 b'server',
1988 1996 b'preferuncompressed',
1989 1997 default=False,
1990 1998 )
1991 1999 coreconfigitem(
1992 2000 b'server',
1993 2001 b'streamunbundle',
1994 2002 default=False,
1995 2003 )
1996 2004 coreconfigitem(
1997 2005 b'server',
1998 2006 b'uncompressed',
1999 2007 default=True,
2000 2008 )
2001 2009 coreconfigitem(
2002 2010 b'server',
2003 2011 b'uncompressedallowsecret',
2004 2012 default=False,
2005 2013 )
2006 2014 coreconfigitem(
2007 2015 b'server',
2008 2016 b'view',
2009 2017 default=b'served',
2010 2018 )
2011 2019 coreconfigitem(
2012 2020 b'server',
2013 2021 b'validate',
2014 2022 default=False,
2015 2023 )
2016 2024 coreconfigitem(
2017 2025 b'server',
2018 2026 b'zliblevel',
2019 2027 default=-1,
2020 2028 )
2021 2029 coreconfigitem(
2022 2030 b'server',
2023 2031 b'zstdlevel',
2024 2032 default=3,
2025 2033 )
2026 2034 coreconfigitem(
2027 2035 b'share',
2028 2036 b'pool',
2029 2037 default=None,
2030 2038 )
2031 2039 coreconfigitem(
2032 2040 b'share',
2033 2041 b'poolnaming',
2034 2042 default=b'identity',
2035 2043 )
2036 2044 coreconfigitem(
2037 2045 b'share',
2038 2046 b'safe-mismatch.source-not-safe',
2039 2047 default=b'abort',
2040 2048 )
2041 2049 coreconfigitem(
2042 2050 b'share',
2043 2051 b'safe-mismatch.source-safe',
2044 2052 default=b'abort',
2045 2053 )
2046 2054 coreconfigitem(
2047 2055 b'share',
2048 2056 b'safe-mismatch.source-not-safe.warn',
2049 2057 default=True,
2050 2058 )
2051 2059 coreconfigitem(
2052 2060 b'share',
2053 2061 b'safe-mismatch.source-safe.warn',
2054 2062 default=True,
2055 2063 )
2056 2064 coreconfigitem(
2057 2065 b'shelve',
2058 2066 b'maxbackups',
2059 2067 default=10,
2060 2068 )
2061 2069 coreconfigitem(
2062 2070 b'smtp',
2063 2071 b'host',
2064 2072 default=None,
2065 2073 )
2066 2074 coreconfigitem(
2067 2075 b'smtp',
2068 2076 b'local_hostname',
2069 2077 default=None,
2070 2078 )
2071 2079 coreconfigitem(
2072 2080 b'smtp',
2073 2081 b'password',
2074 2082 default=None,
2075 2083 )
2076 2084 coreconfigitem(
2077 2085 b'smtp',
2078 2086 b'port',
2079 2087 default=dynamicdefault,
2080 2088 )
2081 2089 coreconfigitem(
2082 2090 b'smtp',
2083 2091 b'tls',
2084 2092 default=b'none',
2085 2093 )
2086 2094 coreconfigitem(
2087 2095 b'smtp',
2088 2096 b'username',
2089 2097 default=None,
2090 2098 )
2091 2099 coreconfigitem(
2092 2100 b'sparse',
2093 2101 b'missingwarning',
2094 2102 default=True,
2095 2103 experimental=True,
2096 2104 )
2097 2105 coreconfigitem(
2098 2106 b'subrepos',
2099 2107 b'allowed',
2100 2108 default=dynamicdefault, # to make backporting simpler
2101 2109 )
2102 2110 coreconfigitem(
2103 2111 b'subrepos',
2104 2112 b'hg:allowed',
2105 2113 default=dynamicdefault,
2106 2114 )
2107 2115 coreconfigitem(
2108 2116 b'subrepos',
2109 2117 b'git:allowed',
2110 2118 default=dynamicdefault,
2111 2119 )
2112 2120 coreconfigitem(
2113 2121 b'subrepos',
2114 2122 b'svn:allowed',
2115 2123 default=dynamicdefault,
2116 2124 )
2117 2125 coreconfigitem(
2118 2126 b'templates',
2119 2127 b'.*',
2120 2128 default=None,
2121 2129 generic=True,
2122 2130 )
2123 2131 coreconfigitem(
2124 2132 b'templateconfig',
2125 2133 b'.*',
2126 2134 default=dynamicdefault,
2127 2135 generic=True,
2128 2136 )
2129 2137 coreconfigitem(
2130 2138 b'trusted',
2131 2139 b'groups',
2132 2140 default=list,
2133 2141 )
2134 2142 coreconfigitem(
2135 2143 b'trusted',
2136 2144 b'users',
2137 2145 default=list,
2138 2146 )
2139 2147 coreconfigitem(
2140 2148 b'ui',
2141 2149 b'_usedassubrepo',
2142 2150 default=False,
2143 2151 )
2144 2152 coreconfigitem(
2145 2153 b'ui',
2146 2154 b'allowemptycommit',
2147 2155 default=False,
2148 2156 )
2149 2157 coreconfigitem(
2150 2158 b'ui',
2151 2159 b'archivemeta',
2152 2160 default=True,
2153 2161 )
2154 2162 coreconfigitem(
2155 2163 b'ui',
2156 2164 b'askusername',
2157 2165 default=False,
2158 2166 )
2159 2167 coreconfigitem(
2160 2168 b'ui',
2161 2169 b'available-memory',
2162 2170 default=None,
2163 2171 )
2164 2172
2165 2173 coreconfigitem(
2166 2174 b'ui',
2167 2175 b'clonebundlefallback',
2168 2176 default=False,
2169 2177 )
2170 2178 coreconfigitem(
2171 2179 b'ui',
2172 2180 b'clonebundleprefers',
2173 2181 default=list,
2174 2182 )
2175 2183 coreconfigitem(
2176 2184 b'ui',
2177 2185 b'clonebundles',
2178 2186 default=True,
2179 2187 )
2180 2188 coreconfigitem(
2181 2189 b'ui',
2182 2190 b'color',
2183 2191 default=b'auto',
2184 2192 )
2185 2193 coreconfigitem(
2186 2194 b'ui',
2187 2195 b'commitsubrepos',
2188 2196 default=False,
2189 2197 )
2190 2198 coreconfigitem(
2191 2199 b'ui',
2192 2200 b'debug',
2193 2201 default=False,
2194 2202 )
2195 2203 coreconfigitem(
2196 2204 b'ui',
2197 2205 b'debugger',
2198 2206 default=None,
2199 2207 )
2200 2208 coreconfigitem(
2201 2209 b'ui',
2202 2210 b'editor',
2203 2211 default=dynamicdefault,
2204 2212 )
2205 2213 coreconfigitem(
2206 2214 b'ui',
2207 2215 b'detailed-exit-code',
2208 2216 default=False,
2209 2217 experimental=True,
2210 2218 )
2211 2219 coreconfigitem(
2212 2220 b'ui',
2213 2221 b'fallbackencoding',
2214 2222 default=None,
2215 2223 )
2216 2224 coreconfigitem(
2217 2225 b'ui',
2218 2226 b'forcecwd',
2219 2227 default=None,
2220 2228 )
2221 2229 coreconfigitem(
2222 2230 b'ui',
2223 2231 b'forcemerge',
2224 2232 default=None,
2225 2233 )
2226 2234 coreconfigitem(
2227 2235 b'ui',
2228 2236 b'formatdebug',
2229 2237 default=False,
2230 2238 )
2231 2239 coreconfigitem(
2232 2240 b'ui',
2233 2241 b'formatjson',
2234 2242 default=False,
2235 2243 )
2236 2244 coreconfigitem(
2237 2245 b'ui',
2238 2246 b'formatted',
2239 2247 default=None,
2240 2248 )
2241 2249 coreconfigitem(
2242 2250 b'ui',
2243 2251 b'interactive',
2244 2252 default=None,
2245 2253 )
2246 2254 coreconfigitem(
2247 2255 b'ui',
2248 2256 b'interface',
2249 2257 default=None,
2250 2258 )
2251 2259 coreconfigitem(
2252 2260 b'ui',
2253 2261 b'interface.chunkselector',
2254 2262 default=None,
2255 2263 )
2256 2264 coreconfigitem(
2257 2265 b'ui',
2258 2266 b'large-file-limit',
2259 2267 default=10000000,
2260 2268 )
2261 2269 coreconfigitem(
2262 2270 b'ui',
2263 2271 b'logblockedtimes',
2264 2272 default=False,
2265 2273 )
2266 2274 coreconfigitem(
2267 2275 b'ui',
2268 2276 b'merge',
2269 2277 default=None,
2270 2278 )
2271 2279 coreconfigitem(
2272 2280 b'ui',
2273 2281 b'mergemarkers',
2274 2282 default=b'basic',
2275 2283 )
2276 2284 coreconfigitem(
2277 2285 b'ui',
2278 2286 b'message-output',
2279 2287 default=b'stdio',
2280 2288 )
2281 2289 coreconfigitem(
2282 2290 b'ui',
2283 2291 b'nontty',
2284 2292 default=False,
2285 2293 )
2286 2294 coreconfigitem(
2287 2295 b'ui',
2288 2296 b'origbackuppath',
2289 2297 default=None,
2290 2298 )
2291 2299 coreconfigitem(
2292 2300 b'ui',
2293 2301 b'paginate',
2294 2302 default=True,
2295 2303 )
2296 2304 coreconfigitem(
2297 2305 b'ui',
2298 2306 b'patch',
2299 2307 default=None,
2300 2308 )
2301 2309 coreconfigitem(
2302 2310 b'ui',
2303 2311 b'portablefilenames',
2304 2312 default=b'warn',
2305 2313 )
2306 2314 coreconfigitem(
2307 2315 b'ui',
2308 2316 b'promptecho',
2309 2317 default=False,
2310 2318 )
2311 2319 coreconfigitem(
2312 2320 b'ui',
2313 2321 b'quiet',
2314 2322 default=False,
2315 2323 )
2316 2324 coreconfigitem(
2317 2325 b'ui',
2318 2326 b'quietbookmarkmove',
2319 2327 default=False,
2320 2328 )
2321 2329 coreconfigitem(
2322 2330 b'ui',
2323 2331 b'relative-paths',
2324 2332 default=b'legacy',
2325 2333 )
2326 2334 coreconfigitem(
2327 2335 b'ui',
2328 2336 b'remotecmd',
2329 2337 default=b'hg',
2330 2338 )
2331 2339 coreconfigitem(
2332 2340 b'ui',
2333 2341 b'report_untrusted',
2334 2342 default=True,
2335 2343 )
2336 2344 coreconfigitem(
2337 2345 b'ui',
2338 2346 b'rollback',
2339 2347 default=True,
2340 2348 )
2341 2349 coreconfigitem(
2342 2350 b'ui',
2343 2351 b'signal-safe-lock',
2344 2352 default=True,
2345 2353 )
2346 2354 coreconfigitem(
2347 2355 b'ui',
2348 2356 b'slash',
2349 2357 default=False,
2350 2358 )
2351 2359 coreconfigitem(
2352 2360 b'ui',
2353 2361 b'ssh',
2354 2362 default=b'ssh',
2355 2363 )
2356 2364 coreconfigitem(
2357 2365 b'ui',
2358 2366 b'ssherrorhint',
2359 2367 default=None,
2360 2368 )
2361 2369 coreconfigitem(
2362 2370 b'ui',
2363 2371 b'statuscopies',
2364 2372 default=False,
2365 2373 )
2366 2374 coreconfigitem(
2367 2375 b'ui',
2368 2376 b'strict',
2369 2377 default=False,
2370 2378 )
2371 2379 coreconfigitem(
2372 2380 b'ui',
2373 2381 b'style',
2374 2382 default=b'',
2375 2383 )
2376 2384 coreconfigitem(
2377 2385 b'ui',
2378 2386 b'supportcontact',
2379 2387 default=None,
2380 2388 )
2381 2389 coreconfigitem(
2382 2390 b'ui',
2383 2391 b'textwidth',
2384 2392 default=78,
2385 2393 )
2386 2394 coreconfigitem(
2387 2395 b'ui',
2388 2396 b'timeout',
2389 2397 default=b'600',
2390 2398 )
2391 2399 coreconfigitem(
2392 2400 b'ui',
2393 2401 b'timeout.warn',
2394 2402 default=0,
2395 2403 )
2396 2404 coreconfigitem(
2397 2405 b'ui',
2398 2406 b'timestamp-output',
2399 2407 default=False,
2400 2408 )
2401 2409 coreconfigitem(
2402 2410 b'ui',
2403 2411 b'traceback',
2404 2412 default=False,
2405 2413 )
2406 2414 coreconfigitem(
2407 2415 b'ui',
2408 2416 b'tweakdefaults',
2409 2417 default=False,
2410 2418 )
2411 2419 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2412 2420 coreconfigitem(
2413 2421 b'ui',
2414 2422 b'verbose',
2415 2423 default=False,
2416 2424 )
2417 2425 coreconfigitem(
2418 2426 b'verify',
2419 2427 b'skipflags',
2420 2428 default=None,
2421 2429 )
2422 2430 coreconfigitem(
2423 2431 b'web',
2424 2432 b'allowbz2',
2425 2433 default=False,
2426 2434 )
2427 2435 coreconfigitem(
2428 2436 b'web',
2429 2437 b'allowgz',
2430 2438 default=False,
2431 2439 )
2432 2440 coreconfigitem(
2433 2441 b'web',
2434 2442 b'allow-pull',
2435 2443 alias=[(b'web', b'allowpull')],
2436 2444 default=True,
2437 2445 )
2438 2446 coreconfigitem(
2439 2447 b'web',
2440 2448 b'allow-push',
2441 2449 alias=[(b'web', b'allow_push')],
2442 2450 default=list,
2443 2451 )
2444 2452 coreconfigitem(
2445 2453 b'web',
2446 2454 b'allowzip',
2447 2455 default=False,
2448 2456 )
2449 2457 coreconfigitem(
2450 2458 b'web',
2451 2459 b'archivesubrepos',
2452 2460 default=False,
2453 2461 )
2454 2462 coreconfigitem(
2455 2463 b'web',
2456 2464 b'cache',
2457 2465 default=True,
2458 2466 )
2459 2467 coreconfigitem(
2460 2468 b'web',
2461 2469 b'comparisoncontext',
2462 2470 default=5,
2463 2471 )
2464 2472 coreconfigitem(
2465 2473 b'web',
2466 2474 b'contact',
2467 2475 default=None,
2468 2476 )
2469 2477 coreconfigitem(
2470 2478 b'web',
2471 2479 b'deny_push',
2472 2480 default=list,
2473 2481 )
2474 2482 coreconfigitem(
2475 2483 b'web',
2476 2484 b'guessmime',
2477 2485 default=False,
2478 2486 )
2479 2487 coreconfigitem(
2480 2488 b'web',
2481 2489 b'hidden',
2482 2490 default=False,
2483 2491 )
2484 2492 coreconfigitem(
2485 2493 b'web',
2486 2494 b'labels',
2487 2495 default=list,
2488 2496 )
2489 2497 coreconfigitem(
2490 2498 b'web',
2491 2499 b'logoimg',
2492 2500 default=b'hglogo.png',
2493 2501 )
2494 2502 coreconfigitem(
2495 2503 b'web',
2496 2504 b'logourl',
2497 2505 default=b'https://mercurial-scm.org/',
2498 2506 )
2499 2507 coreconfigitem(
2500 2508 b'web',
2501 2509 b'accesslog',
2502 2510 default=b'-',
2503 2511 )
2504 2512 coreconfigitem(
2505 2513 b'web',
2506 2514 b'address',
2507 2515 default=b'',
2508 2516 )
2509 2517 coreconfigitem(
2510 2518 b'web',
2511 2519 b'allow-archive',
2512 2520 alias=[(b'web', b'allow_archive')],
2513 2521 default=list,
2514 2522 )
2515 2523 coreconfigitem(
2516 2524 b'web',
2517 2525 b'allow_read',
2518 2526 default=list,
2519 2527 )
2520 2528 coreconfigitem(
2521 2529 b'web',
2522 2530 b'baseurl',
2523 2531 default=None,
2524 2532 )
2525 2533 coreconfigitem(
2526 2534 b'web',
2527 2535 b'cacerts',
2528 2536 default=None,
2529 2537 )
2530 2538 coreconfigitem(
2531 2539 b'web',
2532 2540 b'certificate',
2533 2541 default=None,
2534 2542 )
2535 2543 coreconfigitem(
2536 2544 b'web',
2537 2545 b'collapse',
2538 2546 default=False,
2539 2547 )
2540 2548 coreconfigitem(
2541 2549 b'web',
2542 2550 b'csp',
2543 2551 default=None,
2544 2552 )
2545 2553 coreconfigitem(
2546 2554 b'web',
2547 2555 b'deny_read',
2548 2556 default=list,
2549 2557 )
2550 2558 coreconfigitem(
2551 2559 b'web',
2552 2560 b'descend',
2553 2561 default=True,
2554 2562 )
2555 2563 coreconfigitem(
2556 2564 b'web',
2557 2565 b'description',
2558 2566 default=b"",
2559 2567 )
2560 2568 coreconfigitem(
2561 2569 b'web',
2562 2570 b'encoding',
2563 2571 default=lambda: encoding.encoding,
2564 2572 )
2565 2573 coreconfigitem(
2566 2574 b'web',
2567 2575 b'errorlog',
2568 2576 default=b'-',
2569 2577 )
2570 2578 coreconfigitem(
2571 2579 b'web',
2572 2580 b'ipv6',
2573 2581 default=False,
2574 2582 )
2575 2583 coreconfigitem(
2576 2584 b'web',
2577 2585 b'maxchanges',
2578 2586 default=10,
2579 2587 )
2580 2588 coreconfigitem(
2581 2589 b'web',
2582 2590 b'maxfiles',
2583 2591 default=10,
2584 2592 )
2585 2593 coreconfigitem(
2586 2594 b'web',
2587 2595 b'maxshortchanges',
2588 2596 default=60,
2589 2597 )
2590 2598 coreconfigitem(
2591 2599 b'web',
2592 2600 b'motd',
2593 2601 default=b'',
2594 2602 )
2595 2603 coreconfigitem(
2596 2604 b'web',
2597 2605 b'name',
2598 2606 default=dynamicdefault,
2599 2607 )
2600 2608 coreconfigitem(
2601 2609 b'web',
2602 2610 b'port',
2603 2611 default=8000,
2604 2612 )
2605 2613 coreconfigitem(
2606 2614 b'web',
2607 2615 b'prefix',
2608 2616 default=b'',
2609 2617 )
2610 2618 coreconfigitem(
2611 2619 b'web',
2612 2620 b'push_ssl',
2613 2621 default=True,
2614 2622 )
2615 2623 coreconfigitem(
2616 2624 b'web',
2617 2625 b'refreshinterval',
2618 2626 default=20,
2619 2627 )
2620 2628 coreconfigitem(
2621 2629 b'web',
2622 2630 b'server-header',
2623 2631 default=None,
2624 2632 )
2625 2633 coreconfigitem(
2626 2634 b'web',
2627 2635 b'static',
2628 2636 default=None,
2629 2637 )
2630 2638 coreconfigitem(
2631 2639 b'web',
2632 2640 b'staticurl',
2633 2641 default=None,
2634 2642 )
2635 2643 coreconfigitem(
2636 2644 b'web',
2637 2645 b'stripes',
2638 2646 default=1,
2639 2647 )
2640 2648 coreconfigitem(
2641 2649 b'web',
2642 2650 b'style',
2643 2651 default=b'paper',
2644 2652 )
2645 2653 coreconfigitem(
2646 2654 b'web',
2647 2655 b'templates',
2648 2656 default=None,
2649 2657 )
2650 2658 coreconfigitem(
2651 2659 b'web',
2652 2660 b'view',
2653 2661 default=b'served',
2654 2662 experimental=True,
2655 2663 )
2656 2664 coreconfigitem(
2657 2665 b'worker',
2658 2666 b'backgroundclose',
2659 2667 default=dynamicdefault,
2660 2668 )
2661 2669 # Windows defaults to a limit of 512 open files. A buffer of 128
2662 2670 # should give us enough headway.
2663 2671 coreconfigitem(
2664 2672 b'worker',
2665 2673 b'backgroundclosemaxqueue',
2666 2674 default=384,
2667 2675 )
2668 2676 coreconfigitem(
2669 2677 b'worker',
2670 2678 b'backgroundcloseminfilecount',
2671 2679 default=2048,
2672 2680 )
2673 2681 coreconfigitem(
2674 2682 b'worker',
2675 2683 b'backgroundclosethreadcount',
2676 2684 default=4,
2677 2685 )
2678 2686 coreconfigitem(
2679 2687 b'worker',
2680 2688 b'enabled',
2681 2689 default=True,
2682 2690 )
2683 2691 coreconfigitem(
2684 2692 b'worker',
2685 2693 b'numcpus',
2686 2694 default=None,
2687 2695 )
2688 2696
2689 2697 # Rebase related configuration moved to core because other extension are doing
2690 2698 # strange things. For example, shelve import the extensions to reuse some bit
2691 2699 # without formally loading it.
2692 2700 coreconfigitem(
2693 2701 b'commands',
2694 2702 b'rebase.requiredest',
2695 2703 default=False,
2696 2704 )
2697 2705 coreconfigitem(
2698 2706 b'experimental',
2699 2707 b'rebaseskipobsolete',
2700 2708 default=True,
2701 2709 )
2702 2710 coreconfigitem(
2703 2711 b'rebase',
2704 2712 b'singletransaction',
2705 2713 default=False,
2706 2714 )
2707 2715 coreconfigitem(
2708 2716 b'rebase',
2709 2717 b'experimental.inmemory',
2710 2718 default=False,
2711 2719 )
@@ -1,1952 +1,1954 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 pathutil,
26 26 policy,
27 27 pycompat,
28 28 scmutil,
29 29 sparse,
30 30 txnutil,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
42 44 propertycache = util.propertycache
43 45 filecache = scmutil.filecache
44 46 _rangemask = 0x7FFFFFFF
45 47
46 48 dirstatetuple = parsers.dirstatetuple
47 49
48 50
49 51 class repocache(filecache):
50 52 """filecache for files in .hg/"""
51 53
52 54 def join(self, obj, fname):
53 55 return obj._opener.join(fname)
54 56
55 57
56 58 class rootcache(filecache):
57 59 """filecache for files in the repository root"""
58 60
59 61 def join(self, obj, fname):
60 62 return obj._join(fname)
61 63
62 64
63 65 def _getfsnow(vfs):
64 66 '''Get "now" timestamp on filesystem'''
65 67 tmpfd, tmpname = vfs.mkstemp()
66 68 try:
67 69 return os.fstat(tmpfd)[stat.ST_MTIME]
68 70 finally:
69 71 os.close(tmpfd)
70 72 vfs.unlink(tmpname)
71 73
72 74
73 75 @interfaceutil.implementer(intdirstate.idirstate)
74 76 class dirstate(object):
75 77 def __init__(
76 78 self, opener, ui, root, validate, sparsematchfn, nodeconstants
77 79 ):
78 80 """Create a new dirstate object.
79 81
80 82 opener is an open()-like callable that can be used to open the
81 83 dirstate file; root is the root of the directory tracked by
82 84 the dirstate.
83 85 """
84 86 self._nodeconstants = nodeconstants
85 87 self._opener = opener
86 88 self._validate = validate
87 89 self._root = root
88 90 self._sparsematchfn = sparsematchfn
89 91 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
90 92 # UNC path pointing to root share (issue4557)
91 93 self._rootdir = pathutil.normasprefix(root)
92 94 self._dirty = False
93 95 self._lastnormaltime = 0
94 96 self._ui = ui
95 97 self._filecache = {}
96 98 self._parentwriters = 0
97 99 self._filename = b'dirstate'
98 100 self._pendingfilename = b'%s.pending' % self._filename
99 101 self._plchangecallbacks = {}
100 102 self._origpl = None
101 103 self._updatedfiles = set()
102 104 self._mapcls = dirstatemap
103 105 # Access and cache cwd early, so we don't access it for the first time
104 106 # after a working-copy update caused it to not exist (accessing it then
105 107 # raises an exception).
106 108 self._cwd
107 109
108 110 def prefetch_parents(self):
109 111 """make sure the parents are loaded
110 112
111 113 Used to avoid a race condition.
112 114 """
113 115 self._pl
114 116
115 117 @contextlib.contextmanager
116 118 def parentchange(self):
117 119 """Context manager for handling dirstate parents.
118 120
119 121 If an exception occurs in the scope of the context manager,
120 122 the incoherent dirstate won't be written when wlock is
121 123 released.
122 124 """
123 125 self._parentwriters += 1
124 126 yield
125 127 # Typically we want the "undo" step of a context manager in a
126 128 # finally block so it happens even when an exception
127 129 # occurs. In this case, however, we only want to decrement
128 130 # parentwriters if the code in the with statement exits
129 131 # normally, so we don't have a try/finally here on purpose.
130 132 self._parentwriters -= 1
131 133
132 134 def pendingparentchange(self):
133 135 """Returns true if the dirstate is in the middle of a set of changes
134 136 that modify the dirstate parent.
135 137 """
136 138 return self._parentwriters > 0
137 139
138 140 @propertycache
139 141 def _map(self):
140 142 """Return the dirstate contents (see documentation for dirstatemap)."""
141 143 self._map = self._mapcls(
142 144 self._ui, self._opener, self._root, self._nodeconstants
143 145 )
144 146 return self._map
145 147
146 148 @property
147 149 def _sparsematcher(self):
148 150 """The matcher for the sparse checkout.
149 151
150 152 The working directory may not include every file from a manifest. The
151 153 matcher obtained by this property will match a path if it is to be
152 154 included in the working directory.
153 155 """
154 156 # TODO there is potential to cache this property. For now, the matcher
155 157 # is resolved on every access. (But the called function does use a
156 158 # cache to keep the lookup fast.)
157 159 return self._sparsematchfn()
158 160
159 161 @repocache(b'branch')
160 162 def _branch(self):
161 163 try:
162 164 return self._opener.read(b"branch").strip() or b"default"
163 165 except IOError as inst:
164 166 if inst.errno != errno.ENOENT:
165 167 raise
166 168 return b"default"
167 169
168 170 @property
169 171 def _pl(self):
170 172 return self._map.parents()
171 173
172 174 def hasdir(self, d):
173 175 return self._map.hastrackeddir(d)
174 176
175 177 @rootcache(b'.hgignore')
176 178 def _ignore(self):
177 179 files = self._ignorefiles()
178 180 if not files:
179 181 return matchmod.never()
180 182
181 183 pats = [b'include:%s' % f for f in files]
182 184 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
183 185
184 186 @propertycache
185 187 def _slash(self):
186 188 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
187 189
188 190 @propertycache
189 191 def _checklink(self):
190 192 return util.checklink(self._root)
191 193
192 194 @propertycache
193 195 def _checkexec(self):
194 196 return bool(util.checkexec(self._root))
195 197
196 198 @propertycache
197 199 def _checkcase(self):
198 200 return not util.fscasesensitive(self._join(b'.hg'))
199 201
200 202 def _join(self, f):
201 203 # much faster than os.path.join()
202 204 # it's safe because f is always a relative path
203 205 return self._rootdir + f
204 206
205 207 def flagfunc(self, buildfallback):
206 208 if self._checklink and self._checkexec:
207 209
208 210 def f(x):
209 211 try:
210 212 st = os.lstat(self._join(x))
211 213 if util.statislink(st):
212 214 return b'l'
213 215 if util.statisexec(st):
214 216 return b'x'
215 217 except OSError:
216 218 pass
217 219 return b''
218 220
219 221 return f
220 222
221 223 fallback = buildfallback()
222 224 if self._checklink:
223 225
224 226 def f(x):
225 227 if os.path.islink(self._join(x)):
226 228 return b'l'
227 229 if b'x' in fallback(x):
228 230 return b'x'
229 231 return b''
230 232
231 233 return f
232 234 if self._checkexec:
233 235
234 236 def f(x):
235 237 if b'l' in fallback(x):
236 238 return b'l'
237 239 if util.isexec(self._join(x)):
238 240 return b'x'
239 241 return b''
240 242
241 243 return f
242 244 else:
243 245 return fallback
244 246
245 247 @propertycache
246 248 def _cwd(self):
247 249 # internal config: ui.forcecwd
248 250 forcecwd = self._ui.config(b'ui', b'forcecwd')
249 251 if forcecwd:
250 252 return forcecwd
251 253 return encoding.getcwd()
252 254
253 255 def getcwd(self):
254 256 """Return the path from which a canonical path is calculated.
255 257
256 258 This path should be used to resolve file patterns or to convert
257 259 canonical paths back to file paths for display. It shouldn't be
258 260 used to get real file paths. Use vfs functions instead.
259 261 """
260 262 cwd = self._cwd
261 263 if cwd == self._root:
262 264 return b''
263 265 # self._root ends with a path separator if self._root is '/' or 'C:\'
264 266 rootsep = self._root
265 267 if not util.endswithsep(rootsep):
266 268 rootsep += pycompat.ossep
267 269 if cwd.startswith(rootsep):
268 270 return cwd[len(rootsep) :]
269 271 else:
270 272 # we're outside the repo. return an absolute path.
271 273 return cwd
272 274
273 275 def pathto(self, f, cwd=None):
274 276 if cwd is None:
275 277 cwd = self.getcwd()
276 278 path = util.pathto(self._root, cwd, f)
277 279 if self._slash:
278 280 return util.pconvert(path)
279 281 return path
280 282
281 283 def __getitem__(self, key):
282 284 """Return the current state of key (a filename) in the dirstate.
283 285
284 286 States are:
285 287 n normal
286 288 m needs merging
287 289 r marked for removal
288 290 a marked for addition
289 291 ? not tracked
290 292 """
291 293 return self._map.get(key, (b"?",))[0]
292 294
293 295 def __contains__(self, key):
294 296 return key in self._map
295 297
296 298 def __iter__(self):
297 299 return iter(sorted(self._map))
298 300
299 301 def items(self):
300 302 return pycompat.iteritems(self._map)
301 303
302 304 iteritems = items
303 305
304 306 def parents(self):
305 307 return [self._validate(p) for p in self._pl]
306 308
307 309 def p1(self):
308 310 return self._validate(self._pl[0])
309 311
310 312 def p2(self):
311 313 return self._validate(self._pl[1])
312 314
313 315 def branch(self):
314 316 return encoding.tolocal(self._branch)
315 317
316 318 def setparents(self, p1, p2=None):
317 319 """Set dirstate parents to p1 and p2.
318 320
319 321 When moving from two parents to one, 'm' merged entries a
320 322 adjusted to normal and previous copy records discarded and
321 323 returned by the call.
322 324
323 325 See localrepo.setparents()
324 326 """
325 327 if p2 is None:
326 328 p2 = self._nodeconstants.nullid
327 329 if self._parentwriters == 0:
328 330 raise ValueError(
329 331 b"cannot set dirstate parent outside of "
330 332 b"dirstate.parentchange context manager"
331 333 )
332 334
333 335 self._dirty = True
334 336 oldp2 = self._pl[1]
335 337 if self._origpl is None:
336 338 self._origpl = self._pl
337 339 self._map.setparents(p1, p2)
338 340 copies = {}
339 341 if (
340 342 oldp2 != self._nodeconstants.nullid
341 343 and p2 == self._nodeconstants.nullid
342 344 ):
343 345 candidatefiles = self._map.non_normal_or_other_parent_paths()
344 346
345 347 for f in candidatefiles:
346 348 s = self._map.get(f)
347 349 if s is None:
348 350 continue
349 351
350 352 # Discard 'm' markers when moving away from a merge state
351 353 if s[0] == b'm':
352 354 source = self._map.copymap.get(f)
353 355 if source:
354 356 copies[f] = source
355 357 self.normallookup(f)
356 358 # Also fix up otherparent markers
357 359 elif s[0] == b'n' and s[2] == -2:
358 360 source = self._map.copymap.get(f)
359 361 if source:
360 362 copies[f] = source
361 363 self.add(f)
362 364 return copies
363 365
364 366 def setbranch(self, branch):
365 367 self.__class__._branch.set(self, encoding.fromlocal(branch))
366 368 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
367 369 try:
368 370 f.write(self._branch + b'\n')
369 371 f.close()
370 372
371 373 # make sure filecache has the correct stat info for _branch after
372 374 # replacing the underlying file
373 375 ce = self._filecache[b'_branch']
374 376 if ce:
375 377 ce.refresh()
376 378 except: # re-raises
377 379 f.discard()
378 380 raise
379 381
380 382 def invalidate(self):
381 383 """Causes the next access to reread the dirstate.
382 384
383 385 This is different from localrepo.invalidatedirstate() because it always
384 386 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
385 387 check whether the dirstate has changed before rereading it."""
386 388
387 389 for a in ("_map", "_branch", "_ignore"):
388 390 if a in self.__dict__:
389 391 delattr(self, a)
390 392 self._lastnormaltime = 0
391 393 self._dirty = False
392 394 self._updatedfiles.clear()
393 395 self._parentwriters = 0
394 396 self._origpl = None
395 397
396 398 def copy(self, source, dest):
397 399 """Mark dest as a copy of source. Unmark dest if source is None."""
398 400 if source == dest:
399 401 return
400 402 self._dirty = True
401 403 if source is not None:
402 404 self._map.copymap[dest] = source
403 405 self._updatedfiles.add(source)
404 406 self._updatedfiles.add(dest)
405 407 elif self._map.copymap.pop(dest, None):
406 408 self._updatedfiles.add(dest)
407 409
408 410 def copied(self, file):
409 411 return self._map.copymap.get(file, None)
410 412
411 413 def copies(self):
412 414 return self._map.copymap
413 415
414 416 def _addpath(self, f, state, mode, size, mtime):
415 417 oldstate = self[f]
416 418 if state == b'a' or oldstate == b'r':
417 419 scmutil.checkfilename(f)
418 420 if self._map.hastrackeddir(f):
419 421 raise error.Abort(
420 422 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
421 423 )
422 424 # shadows
423 425 for d in pathutil.finddirs(f):
424 426 if self._map.hastrackeddir(d):
425 427 break
426 428 entry = self._map.get(d)
427 429 if entry is not None and entry[0] != b'r':
428 430 raise error.Abort(
429 431 _(b'file %r in dirstate clashes with %r')
430 432 % (pycompat.bytestr(d), pycompat.bytestr(f))
431 433 )
432 434 self._dirty = True
433 435 self._updatedfiles.add(f)
434 436 self._map.addfile(f, oldstate, state, mode, size, mtime)
435 437
436 438 def normal(self, f, parentfiledata=None):
437 439 """Mark a file normal and clean.
438 440
439 441 parentfiledata: (mode, size, mtime) of the clean file
440 442
441 443 parentfiledata should be computed from memory (for mode,
442 444 size), as or close as possible from the point where we
443 445 determined the file was clean, to limit the risk of the
444 446 file having been changed by an external process between the
445 447 moment where the file was determined to be clean and now."""
446 448 if parentfiledata:
447 449 (mode, size, mtime) = parentfiledata
448 450 else:
449 451 s = os.lstat(self._join(f))
450 452 mode = s.st_mode
451 453 size = s.st_size
452 454 mtime = s[stat.ST_MTIME]
453 455 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
454 456 self._map.copymap.pop(f, None)
455 457 if f in self._map.nonnormalset:
456 458 self._map.nonnormalset.remove(f)
457 459 if mtime > self._lastnormaltime:
458 460 # Remember the most recent modification timeslot for status(),
459 461 # to make sure we won't miss future size-preserving file content
460 462 # modifications that happen within the same timeslot.
461 463 self._lastnormaltime = mtime
462 464
463 465 def normallookup(self, f):
464 466 '''Mark a file normal, but possibly dirty.'''
465 467 if self._pl[1] != self._nodeconstants.nullid:
466 468 # if there is a merge going on and the file was either
467 469 # in state 'm' (-1) or coming from other parent (-2) before
468 470 # being removed, restore that state.
469 471 entry = self._map.get(f)
470 472 if entry is not None:
471 473 if entry[0] == b'r' and entry[2] in (-1, -2):
472 474 source = self._map.copymap.get(f)
473 475 if entry[2] == -1:
474 476 self.merge(f)
475 477 elif entry[2] == -2:
476 478 self.otherparent(f)
477 479 if source:
478 480 self.copy(source, f)
479 481 return
480 482 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
481 483 return
482 484 self._addpath(f, b'n', 0, -1, -1)
483 485 self._map.copymap.pop(f, None)
484 486
485 487 def otherparent(self, f):
486 488 '''Mark as coming from the other parent, always dirty.'''
487 489 if self._pl[1] == self._nodeconstants.nullid:
488 490 raise error.Abort(
489 491 _(b"setting %r to other parent only allowed in merges") % f
490 492 )
491 493 if f in self and self[f] == b'n':
492 494 # merge-like
493 495 self._addpath(f, b'm', 0, -2, -1)
494 496 else:
495 497 # add-like
496 498 self._addpath(f, b'n', 0, -2, -1)
497 499 self._map.copymap.pop(f, None)
498 500
499 501 def add(self, f):
500 502 '''Mark a file added.'''
501 503 self._addpath(f, b'a', 0, -1, -1)
502 504 self._map.copymap.pop(f, None)
503 505
504 506 def remove(self, f):
505 507 '''Mark a file removed.'''
506 508 self._dirty = True
507 509 oldstate = self[f]
508 510 size = 0
509 511 if self._pl[1] != self._nodeconstants.nullid:
510 512 entry = self._map.get(f)
511 513 if entry is not None:
512 514 # backup the previous state
513 515 if entry[0] == b'm': # merge
514 516 size = -1
515 517 elif entry[0] == b'n' and entry[2] == -2: # other parent
516 518 size = -2
517 519 self._map.otherparentset.add(f)
518 520 self._updatedfiles.add(f)
519 521 self._map.removefile(f, oldstate, size)
520 522 if size == 0:
521 523 self._map.copymap.pop(f, None)
522 524
523 525 def merge(self, f):
524 526 '''Mark a file merged.'''
525 527 if self._pl[1] == self._nodeconstants.nullid:
526 528 return self.normallookup(f)
527 529 return self.otherparent(f)
528 530
529 531 def drop(self, f):
530 532 '''Drop a file from the dirstate'''
531 533 oldstate = self[f]
532 534 if self._map.dropfile(f, oldstate):
533 535 self._dirty = True
534 536 self._updatedfiles.add(f)
535 537 self._map.copymap.pop(f, None)
536 538
537 539 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
538 540 if exists is None:
539 541 exists = os.path.lexists(os.path.join(self._root, path))
540 542 if not exists:
541 543 # Maybe a path component exists
542 544 if not ignoremissing and b'/' in path:
543 545 d, f = path.rsplit(b'/', 1)
544 546 d = self._normalize(d, False, ignoremissing, None)
545 547 folded = d + b"/" + f
546 548 else:
547 549 # No path components, preserve original case
548 550 folded = path
549 551 else:
550 552 # recursively normalize leading directory components
551 553 # against dirstate
552 554 if b'/' in normed:
553 555 d, f = normed.rsplit(b'/', 1)
554 556 d = self._normalize(d, False, ignoremissing, True)
555 557 r = self._root + b"/" + d
556 558 folded = d + b"/" + util.fspath(f, r)
557 559 else:
558 560 folded = util.fspath(normed, self._root)
559 561 storemap[normed] = folded
560 562
561 563 return folded
562 564
563 565 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
564 566 normed = util.normcase(path)
565 567 folded = self._map.filefoldmap.get(normed, None)
566 568 if folded is None:
567 569 if isknown:
568 570 folded = path
569 571 else:
570 572 folded = self._discoverpath(
571 573 path, normed, ignoremissing, exists, self._map.filefoldmap
572 574 )
573 575 return folded
574 576
575 577 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
576 578 normed = util.normcase(path)
577 579 folded = self._map.filefoldmap.get(normed, None)
578 580 if folded is None:
579 581 folded = self._map.dirfoldmap.get(normed, None)
580 582 if folded is None:
581 583 if isknown:
582 584 folded = path
583 585 else:
584 586 # store discovered result in dirfoldmap so that future
585 587 # normalizefile calls don't start matching directories
586 588 folded = self._discoverpath(
587 589 path, normed, ignoremissing, exists, self._map.dirfoldmap
588 590 )
589 591 return folded
590 592
591 593 def normalize(self, path, isknown=False, ignoremissing=False):
592 594 """
593 595 normalize the case of a pathname when on a casefolding filesystem
594 596
595 597 isknown specifies whether the filename came from walking the
596 598 disk, to avoid extra filesystem access.
597 599
598 600 If ignoremissing is True, missing path are returned
599 601 unchanged. Otherwise, we try harder to normalize possibly
600 602 existing path components.
601 603
602 604 The normalized case is determined based on the following precedence:
603 605
604 606 - version of name already stored in the dirstate
605 607 - version of name stored on disk
606 608 - version provided via command arguments
607 609 """
608 610
609 611 if self._checkcase:
610 612 return self._normalize(path, isknown, ignoremissing)
611 613 return path
612 614
613 615 def clear(self):
614 616 self._map.clear()
615 617 self._lastnormaltime = 0
616 618 self._updatedfiles.clear()
617 619 self._dirty = True
618 620
619 621 def rebuild(self, parent, allfiles, changedfiles=None):
620 622 if changedfiles is None:
621 623 # Rebuild entire dirstate
622 624 to_lookup = allfiles
623 625 to_drop = []
624 626 lastnormaltime = self._lastnormaltime
625 627 self.clear()
626 628 self._lastnormaltime = lastnormaltime
627 629 elif len(changedfiles) < 10:
628 630 # Avoid turning allfiles into a set, which can be expensive if it's
629 631 # large.
630 632 to_lookup = []
631 633 to_drop = []
632 634 for f in changedfiles:
633 635 if f in allfiles:
634 636 to_lookup.append(f)
635 637 else:
636 638 to_drop.append(f)
637 639 else:
638 640 changedfilesset = set(changedfiles)
639 641 to_lookup = changedfilesset & set(allfiles)
640 642 to_drop = changedfilesset - to_lookup
641 643
642 644 if self._origpl is None:
643 645 self._origpl = self._pl
644 646 self._map.setparents(parent, self._nodeconstants.nullid)
645 647
646 648 for f in to_lookup:
647 649 self.normallookup(f)
648 650 for f in to_drop:
649 651 self.drop(f)
650 652
651 653 self._dirty = True
652 654
653 655 def identity(self):
654 656 """Return identity of dirstate itself to detect changing in storage
655 657
656 658 If identity of previous dirstate is equal to this, writing
657 659 changes based on the former dirstate out can keep consistency.
658 660 """
659 661 return self._map.identity
660 662
661 663 def write(self, tr):
662 664 if not self._dirty:
663 665 return
664 666
665 667 filename = self._filename
666 668 if tr:
667 669 # 'dirstate.write()' is not only for writing in-memory
668 670 # changes out, but also for dropping ambiguous timestamp.
669 671 # delayed writing re-raise "ambiguous timestamp issue".
670 672 # See also the wiki page below for detail:
671 673 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
672 674
673 675 # emulate dropping timestamp in 'parsers.pack_dirstate'
674 676 now = _getfsnow(self._opener)
675 677 self._map.clearambiguoustimes(self._updatedfiles, now)
676 678
677 679 # emulate that all 'dirstate.normal' results are written out
678 680 self._lastnormaltime = 0
679 681 self._updatedfiles.clear()
680 682
681 683 # delay writing in-memory changes out
682 684 tr.addfilegenerator(
683 685 b'dirstate',
684 686 (self._filename,),
685 687 self._writedirstate,
686 688 location=b'plain',
687 689 )
688 690 return
689 691
690 692 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
691 693 self._writedirstate(st)
692 694
693 695 def addparentchangecallback(self, category, callback):
694 696 """add a callback to be called when the wd parents are changed
695 697
696 698 Callback will be called with the following arguments:
697 699 dirstate, (oldp1, oldp2), (newp1, newp2)
698 700
699 701 Category is a unique identifier to allow overwriting an old callback
700 702 with a newer callback.
701 703 """
702 704 self._plchangecallbacks[category] = callback
703 705
704 706 def _writedirstate(self, st):
705 707 # notify callbacks about parents change
706 708 if self._origpl is not None and self._origpl != self._pl:
707 709 for c, callback in sorted(
708 710 pycompat.iteritems(self._plchangecallbacks)
709 711 ):
710 712 callback(self, self._origpl, self._pl)
711 713 self._origpl = None
712 714 # use the modification time of the newly created temporary file as the
713 715 # filesystem's notion of 'now'
714 716 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
715 717
716 718 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
717 719 # timestamp of each entries in dirstate, because of 'now > mtime'
718 720 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
719 721 if delaywrite > 0:
720 722 # do we have any files to delay for?
721 723 for f, e in pycompat.iteritems(self._map):
722 724 if e[0] == b'n' and e[3] == now:
723 725 import time # to avoid useless import
724 726
725 727 # rather than sleep n seconds, sleep until the next
726 728 # multiple of n seconds
727 729 clock = time.time()
728 730 start = int(clock) - (int(clock) % delaywrite)
729 731 end = start + delaywrite
730 732 time.sleep(end - clock)
731 733 now = end # trust our estimate that the end is near now
732 734 break
733 735
734 736 self._map.write(st, now)
735 737 self._lastnormaltime = 0
736 738 self._dirty = False
737 739
738 740 def _dirignore(self, f):
739 741 if self._ignore(f):
740 742 return True
741 743 for p in pathutil.finddirs(f):
742 744 if self._ignore(p):
743 745 return True
744 746 return False
745 747
746 748 def _ignorefiles(self):
747 749 files = []
748 750 if os.path.exists(self._join(b'.hgignore')):
749 751 files.append(self._join(b'.hgignore'))
750 752 for name, path in self._ui.configitems(b"ui"):
751 753 if name == b'ignore' or name.startswith(b'ignore.'):
752 754 # we need to use os.path.join here rather than self._join
753 755 # because path is arbitrary and user-specified
754 756 files.append(os.path.join(self._rootdir, util.expandpath(path)))
755 757 return files
756 758
757 759 def _ignorefileandline(self, f):
758 760 files = collections.deque(self._ignorefiles())
759 761 visited = set()
760 762 while files:
761 763 i = files.popleft()
762 764 patterns = matchmod.readpatternfile(
763 765 i, self._ui.warn, sourceinfo=True
764 766 )
765 767 for pattern, lineno, line in patterns:
766 768 kind, p = matchmod._patsplit(pattern, b'glob')
767 769 if kind == b"subinclude":
768 770 if p not in visited:
769 771 files.append(p)
770 772 continue
771 773 m = matchmod.match(
772 774 self._root, b'', [], [pattern], warn=self._ui.warn
773 775 )
774 776 if m(f):
775 777 return (i, lineno, line)
776 778 visited.add(i)
777 779 return (None, -1, b"")
778 780
779 781 def _walkexplicit(self, match, subrepos):
780 782 """Get stat data about the files explicitly specified by match.
781 783
782 784 Return a triple (results, dirsfound, dirsnotfound).
783 785 - results is a mapping from filename to stat result. It also contains
784 786 listings mapping subrepos and .hg to None.
785 787 - dirsfound is a list of files found to be directories.
786 788 - dirsnotfound is a list of files that the dirstate thinks are
787 789 directories and that were not found."""
788 790
789 791 def badtype(mode):
790 792 kind = _(b'unknown')
791 793 if stat.S_ISCHR(mode):
792 794 kind = _(b'character device')
793 795 elif stat.S_ISBLK(mode):
794 796 kind = _(b'block device')
795 797 elif stat.S_ISFIFO(mode):
796 798 kind = _(b'fifo')
797 799 elif stat.S_ISSOCK(mode):
798 800 kind = _(b'socket')
799 801 elif stat.S_ISDIR(mode):
800 802 kind = _(b'directory')
801 803 return _(b'unsupported file type (type is %s)') % kind
802 804
803 805 badfn = match.bad
804 806 dmap = self._map
805 807 lstat = os.lstat
806 808 getkind = stat.S_IFMT
807 809 dirkind = stat.S_IFDIR
808 810 regkind = stat.S_IFREG
809 811 lnkkind = stat.S_IFLNK
810 812 join = self._join
811 813 dirsfound = []
812 814 foundadd = dirsfound.append
813 815 dirsnotfound = []
814 816 notfoundadd = dirsnotfound.append
815 817
816 818 if not match.isexact() and self._checkcase:
817 819 normalize = self._normalize
818 820 else:
819 821 normalize = None
820 822
821 823 files = sorted(match.files())
822 824 subrepos.sort()
823 825 i, j = 0, 0
824 826 while i < len(files) and j < len(subrepos):
825 827 subpath = subrepos[j] + b"/"
826 828 if files[i] < subpath:
827 829 i += 1
828 830 continue
829 831 while i < len(files) and files[i].startswith(subpath):
830 832 del files[i]
831 833 j += 1
832 834
833 835 if not files or b'' in files:
834 836 files = [b'']
835 837 # constructing the foldmap is expensive, so don't do it for the
836 838 # common case where files is ['']
837 839 normalize = None
838 840 results = dict.fromkeys(subrepos)
839 841 results[b'.hg'] = None
840 842
841 843 for ff in files:
842 844 if normalize:
843 845 nf = normalize(ff, False, True)
844 846 else:
845 847 nf = ff
846 848 if nf in results:
847 849 continue
848 850
849 851 try:
850 852 st = lstat(join(nf))
851 853 kind = getkind(st.st_mode)
852 854 if kind == dirkind:
853 855 if nf in dmap:
854 856 # file replaced by dir on disk but still in dirstate
855 857 results[nf] = None
856 858 foundadd((nf, ff))
857 859 elif kind == regkind or kind == lnkkind:
858 860 results[nf] = st
859 861 else:
860 862 badfn(ff, badtype(kind))
861 863 if nf in dmap:
862 864 results[nf] = None
863 865 except OSError as inst: # nf not found on disk - it is dirstate only
864 866 if nf in dmap: # does it exactly match a missing file?
865 867 results[nf] = None
866 868 else: # does it match a missing directory?
867 869 if self._map.hasdir(nf):
868 870 notfoundadd(nf)
869 871 else:
870 872 badfn(ff, encoding.strtolocal(inst.strerror))
871 873
872 874 # match.files() may contain explicitly-specified paths that shouldn't
873 875 # be taken; drop them from the list of files found. dirsfound/notfound
874 876 # aren't filtered here because they will be tested later.
875 877 if match.anypats():
876 878 for f in list(results):
877 879 if f == b'.hg' or f in subrepos:
878 880 # keep sentinel to disable further out-of-repo walks
879 881 continue
880 882 if not match(f):
881 883 del results[f]
882 884
883 885 # Case insensitive filesystems cannot rely on lstat() failing to detect
884 886 # a case-only rename. Prune the stat object for any file that does not
885 887 # match the case in the filesystem, if there are multiple files that
886 888 # normalize to the same path.
887 889 if match.isexact() and self._checkcase:
888 890 normed = {}
889 891
890 892 for f, st in pycompat.iteritems(results):
891 893 if st is None:
892 894 continue
893 895
894 896 nc = util.normcase(f)
895 897 paths = normed.get(nc)
896 898
897 899 if paths is None:
898 900 paths = set()
899 901 normed[nc] = paths
900 902
901 903 paths.add(f)
902 904
903 905 for norm, paths in pycompat.iteritems(normed):
904 906 if len(paths) > 1:
905 907 for path in paths:
906 908 folded = self._discoverpath(
907 909 path, norm, True, None, self._map.dirfoldmap
908 910 )
909 911 if path != folded:
910 912 results[path] = None
911 913
912 914 return results, dirsfound, dirsnotfound
913 915
914 916 def walk(self, match, subrepos, unknown, ignored, full=True):
915 917 """
916 918 Walk recursively through the directory tree, finding all files
917 919 matched by match.
918 920
919 921 If full is False, maybe skip some known-clean files.
920 922
921 923 Return a dict mapping filename to stat-like object (either
922 924 mercurial.osutil.stat instance or return value of os.stat()).
923 925
924 926 """
925 927 # full is a flag that extensions that hook into walk can use -- this
926 928 # implementation doesn't use it at all. This satisfies the contract
927 929 # because we only guarantee a "maybe".
928 930
929 931 if ignored:
930 932 ignore = util.never
931 933 dirignore = util.never
932 934 elif unknown:
933 935 ignore = self._ignore
934 936 dirignore = self._dirignore
935 937 else:
936 938 # if not unknown and not ignored, drop dir recursion and step 2
937 939 ignore = util.always
938 940 dirignore = util.always
939 941
940 942 matchfn = match.matchfn
941 943 matchalways = match.always()
942 944 matchtdir = match.traversedir
943 945 dmap = self._map
944 946 listdir = util.listdir
945 947 lstat = os.lstat
946 948 dirkind = stat.S_IFDIR
947 949 regkind = stat.S_IFREG
948 950 lnkkind = stat.S_IFLNK
949 951 join = self._join
950 952
951 953 exact = skipstep3 = False
952 954 if match.isexact(): # match.exact
953 955 exact = True
954 956 dirignore = util.always # skip step 2
955 957 elif match.prefix(): # match.match, no patterns
956 958 skipstep3 = True
957 959
958 960 if not exact and self._checkcase:
959 961 normalize = self._normalize
960 962 normalizefile = self._normalizefile
961 963 skipstep3 = False
962 964 else:
963 965 normalize = self._normalize
964 966 normalizefile = None
965 967
966 968 # step 1: find all explicit files
967 969 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
968 970 if matchtdir:
969 971 for d in work:
970 972 matchtdir(d[0])
971 973 for d in dirsnotfound:
972 974 matchtdir(d)
973 975
974 976 skipstep3 = skipstep3 and not (work or dirsnotfound)
975 977 work = [d for d in work if not dirignore(d[0])]
976 978
977 979 # step 2: visit subdirectories
978 980 def traverse(work, alreadynormed):
979 981 wadd = work.append
980 982 while work:
981 983 tracing.counter('dirstate.walk work', len(work))
982 984 nd = work.pop()
983 985 visitentries = match.visitchildrenset(nd)
984 986 if not visitentries:
985 987 continue
986 988 if visitentries == b'this' or visitentries == b'all':
987 989 visitentries = None
988 990 skip = None
989 991 if nd != b'':
990 992 skip = b'.hg'
991 993 try:
992 994 with tracing.log('dirstate.walk.traverse listdir %s', nd):
993 995 entries = listdir(join(nd), stat=True, skip=skip)
994 996 except OSError as inst:
995 997 if inst.errno in (errno.EACCES, errno.ENOENT):
996 998 match.bad(
997 999 self.pathto(nd), encoding.strtolocal(inst.strerror)
998 1000 )
999 1001 continue
1000 1002 raise
1001 1003 for f, kind, st in entries:
1002 1004 # Some matchers may return files in the visitentries set,
1003 1005 # instead of 'this', if the matcher explicitly mentions them
1004 1006 # and is not an exactmatcher. This is acceptable; we do not
1005 1007 # make any hard assumptions about file-or-directory below
1006 1008 # based on the presence of `f` in visitentries. If
1007 1009 # visitchildrenset returned a set, we can always skip the
1008 1010 # entries *not* in the set it provided regardless of whether
1009 1011 # they're actually a file or a directory.
1010 1012 if visitentries and f not in visitentries:
1011 1013 continue
1012 1014 if normalizefile:
1013 1015 # even though f might be a directory, we're only
1014 1016 # interested in comparing it to files currently in the
1015 1017 # dmap -- therefore normalizefile is enough
1016 1018 nf = normalizefile(
1017 1019 nd and (nd + b"/" + f) or f, True, True
1018 1020 )
1019 1021 else:
1020 1022 nf = nd and (nd + b"/" + f) or f
1021 1023 if nf not in results:
1022 1024 if kind == dirkind:
1023 1025 if not ignore(nf):
1024 1026 if matchtdir:
1025 1027 matchtdir(nf)
1026 1028 wadd(nf)
1027 1029 if nf in dmap and (matchalways or matchfn(nf)):
1028 1030 results[nf] = None
1029 1031 elif kind == regkind or kind == lnkkind:
1030 1032 if nf in dmap:
1031 1033 if matchalways or matchfn(nf):
1032 1034 results[nf] = st
1033 1035 elif (matchalways or matchfn(nf)) and not ignore(
1034 1036 nf
1035 1037 ):
1036 1038 # unknown file -- normalize if necessary
1037 1039 if not alreadynormed:
1038 1040 nf = normalize(nf, False, True)
1039 1041 results[nf] = st
1040 1042 elif nf in dmap and (matchalways or matchfn(nf)):
1041 1043 results[nf] = None
1042 1044
1043 1045 for nd, d in work:
1044 1046 # alreadynormed means that processwork doesn't have to do any
1045 1047 # expensive directory normalization
1046 1048 alreadynormed = not normalize or nd == d
1047 1049 traverse([d], alreadynormed)
1048 1050
1049 1051 for s in subrepos:
1050 1052 del results[s]
1051 1053 del results[b'.hg']
1052 1054
1053 1055 # step 3: visit remaining files from dmap
1054 1056 if not skipstep3 and not exact:
1055 1057 # If a dmap file is not in results yet, it was either
1056 1058 # a) not matching matchfn b) ignored, c) missing, or d) under a
1057 1059 # symlink directory.
1058 1060 if not results and matchalways:
1059 1061 visit = [f for f in dmap]
1060 1062 else:
1061 1063 visit = [f for f in dmap if f not in results and matchfn(f)]
1062 1064 visit.sort()
1063 1065
1064 1066 if unknown:
1065 1067 # unknown == True means we walked all dirs under the roots
1066 1068 # that wasn't ignored, and everything that matched was stat'ed
1067 1069 # and is already in results.
1068 1070 # The rest must thus be ignored or under a symlink.
1069 1071 audit_path = pathutil.pathauditor(self._root, cached=True)
1070 1072
1071 1073 for nf in iter(visit):
1072 1074 # If a stat for the same file was already added with a
1073 1075 # different case, don't add one for this, since that would
1074 1076 # make it appear as if the file exists under both names
1075 1077 # on disk.
1076 1078 if (
1077 1079 normalizefile
1078 1080 and normalizefile(nf, True, True) in results
1079 1081 ):
1080 1082 results[nf] = None
1081 1083 # Report ignored items in the dmap as long as they are not
1082 1084 # under a symlink directory.
1083 1085 elif audit_path.check(nf):
1084 1086 try:
1085 1087 results[nf] = lstat(join(nf))
1086 1088 # file was just ignored, no links, and exists
1087 1089 except OSError:
1088 1090 # file doesn't exist
1089 1091 results[nf] = None
1090 1092 else:
1091 1093 # It's either missing or under a symlink directory
1092 1094 # which we in this case report as missing
1093 1095 results[nf] = None
1094 1096 else:
1095 1097 # We may not have walked the full directory tree above,
1096 1098 # so stat and check everything we missed.
1097 1099 iv = iter(visit)
1098 1100 for st in util.statfiles([join(i) for i in visit]):
1099 1101 results[next(iv)] = st
1100 1102 return results
1101 1103
1102 1104 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1103 1105 # Force Rayon (Rust parallelism library) to respect the number of
1104 1106 # workers. This is a temporary workaround until Rust code knows
1105 1107 # how to read the config file.
1106 1108 numcpus = self._ui.configint(b"worker", b"numcpus")
1107 1109 if numcpus is not None:
1108 1110 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1109 1111
1110 1112 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1111 1113 if not workers_enabled:
1112 1114 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1113 1115
1114 1116 (
1115 1117 lookup,
1116 1118 modified,
1117 1119 added,
1118 1120 removed,
1119 1121 deleted,
1120 1122 clean,
1121 1123 ignored,
1122 1124 unknown,
1123 1125 warnings,
1124 1126 bad,
1125 1127 traversed,
1126 1128 ) = rustmod.status(
1127 1129 self._map._rustmap,
1128 1130 matcher,
1129 1131 self._rootdir,
1130 1132 self._ignorefiles(),
1131 1133 self._checkexec,
1132 1134 self._lastnormaltime,
1133 1135 bool(list_clean),
1134 1136 bool(list_ignored),
1135 1137 bool(list_unknown),
1136 1138 bool(matcher.traversedir),
1137 1139 )
1138 1140
1139 1141 if matcher.traversedir:
1140 1142 for dir in traversed:
1141 1143 matcher.traversedir(dir)
1142 1144
1143 1145 if self._ui.warn:
1144 1146 for item in warnings:
1145 1147 if isinstance(item, tuple):
1146 1148 file_path, syntax = item
1147 1149 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1148 1150 file_path,
1149 1151 syntax,
1150 1152 )
1151 1153 self._ui.warn(msg)
1152 1154 else:
1153 1155 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1154 1156 self._ui.warn(
1155 1157 msg
1156 1158 % (
1157 1159 pathutil.canonpath(
1158 1160 self._rootdir, self._rootdir, item
1159 1161 ),
1160 1162 b"No such file or directory",
1161 1163 )
1162 1164 )
1163 1165
1164 1166 for (fn, message) in bad:
1165 1167 matcher.bad(fn, encoding.strtolocal(message))
1166 1168
1167 1169 status = scmutil.status(
1168 1170 modified=modified,
1169 1171 added=added,
1170 1172 removed=removed,
1171 1173 deleted=deleted,
1172 1174 unknown=unknown,
1173 1175 ignored=ignored,
1174 1176 clean=clean,
1175 1177 )
1176 1178 return (lookup, status)
1177 1179
1178 1180 def status(self, match, subrepos, ignored, clean, unknown):
1179 1181 """Determine the status of the working copy relative to the
1180 1182 dirstate and return a pair of (unsure, status), where status is of type
1181 1183 scmutil.status and:
1182 1184
1183 1185 unsure:
1184 1186 files that might have been modified since the dirstate was
1185 1187 written, but need to be read to be sure (size is the same
1186 1188 but mtime differs)
1187 1189 status.modified:
1188 1190 files that have definitely been modified since the dirstate
1189 1191 was written (different size or mode)
1190 1192 status.clean:
1191 1193 files that have definitely not been modified since the
1192 1194 dirstate was written
1193 1195 """
1194 1196 listignored, listclean, listunknown = ignored, clean, unknown
1195 1197 lookup, modified, added, unknown, ignored = [], [], [], [], []
1196 1198 removed, deleted, clean = [], [], []
1197 1199
1198 1200 dmap = self._map
1199 1201 dmap.preload()
1200 1202
1201 1203 use_rust = True
1202 1204
1203 1205 allowed_matchers = (
1204 1206 matchmod.alwaysmatcher,
1205 1207 matchmod.exactmatcher,
1206 1208 matchmod.includematcher,
1207 1209 )
1208 1210
1209 1211 if rustmod is None:
1210 1212 use_rust = False
1211 1213 elif self._checkcase:
1212 1214 # Case-insensitive filesystems are not handled yet
1213 1215 use_rust = False
1214 1216 elif subrepos:
1215 1217 use_rust = False
1216 1218 elif sparse.enabled:
1217 1219 use_rust = False
1218 1220 elif not isinstance(match, allowed_matchers):
1219 1221 # Some matchers have yet to be implemented
1220 1222 use_rust = False
1221 1223
1222 1224 if use_rust:
1223 1225 try:
1224 1226 return self._rust_status(
1225 1227 match, listclean, listignored, listunknown
1226 1228 )
1227 1229 except rustmod.FallbackError:
1228 1230 pass
1229 1231
1230 1232 def noop(f):
1231 1233 pass
1232 1234
1233 1235 dcontains = dmap.__contains__
1234 1236 dget = dmap.__getitem__
1235 1237 ladd = lookup.append # aka "unsure"
1236 1238 madd = modified.append
1237 1239 aadd = added.append
1238 1240 uadd = unknown.append if listunknown else noop
1239 1241 iadd = ignored.append if listignored else noop
1240 1242 radd = removed.append
1241 1243 dadd = deleted.append
1242 1244 cadd = clean.append if listclean else noop
1243 1245 mexact = match.exact
1244 1246 dirignore = self._dirignore
1245 1247 checkexec = self._checkexec
1246 1248 copymap = self._map.copymap
1247 1249 lastnormaltime = self._lastnormaltime
1248 1250
1249 1251 # We need to do full walks when either
1250 1252 # - we're listing all clean files, or
1251 1253 # - match.traversedir does something, because match.traversedir should
1252 1254 # be called for every dir in the working dir
1253 1255 full = listclean or match.traversedir is not None
1254 1256 for fn, st in pycompat.iteritems(
1255 1257 self.walk(match, subrepos, listunknown, listignored, full=full)
1256 1258 ):
1257 1259 if not dcontains(fn):
1258 1260 if (listignored or mexact(fn)) and dirignore(fn):
1259 1261 if listignored:
1260 1262 iadd(fn)
1261 1263 else:
1262 1264 uadd(fn)
1263 1265 continue
1264 1266
1265 1267 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1266 1268 # written like that for performance reasons. dmap[fn] is not a
1267 1269 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1268 1270 # opcode has fast paths when the value to be unpacked is a tuple or
1269 1271 # a list, but falls back to creating a full-fledged iterator in
1270 1272 # general. That is much slower than simply accessing and storing the
1271 1273 # tuple members one by one.
1272 1274 t = dget(fn)
1273 1275 state = t[0]
1274 1276 mode = t[1]
1275 1277 size = t[2]
1276 1278 time = t[3]
1277 1279
1278 1280 if not st and state in b"nma":
1279 1281 dadd(fn)
1280 1282 elif state == b'n':
1281 1283 if (
1282 1284 size >= 0
1283 1285 and (
1284 1286 (size != st.st_size and size != st.st_size & _rangemask)
1285 1287 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1286 1288 )
1287 1289 or size == -2 # other parent
1288 1290 or fn in copymap
1289 1291 ):
1290 1292 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1291 1293 # issue6456: Size returned may be longer due to
1292 1294 # encryption on EXT-4 fscrypt, undecided.
1293 1295 ladd(fn)
1294 1296 else:
1295 1297 madd(fn)
1296 1298 elif (
1297 1299 time != st[stat.ST_MTIME]
1298 1300 and time != st[stat.ST_MTIME] & _rangemask
1299 1301 ):
1300 1302 ladd(fn)
1301 1303 elif st[stat.ST_MTIME] == lastnormaltime:
1302 1304 # fn may have just been marked as normal and it may have
1303 1305 # changed in the same second without changing its size.
1304 1306 # This can happen if we quickly do multiple commits.
1305 1307 # Force lookup, so we don't miss such a racy file change.
1306 1308 ladd(fn)
1307 1309 elif listclean:
1308 1310 cadd(fn)
1309 1311 elif state == b'm':
1310 1312 madd(fn)
1311 1313 elif state == b'a':
1312 1314 aadd(fn)
1313 1315 elif state == b'r':
1314 1316 radd(fn)
1315 1317 status = scmutil.status(
1316 1318 modified, added, removed, deleted, unknown, ignored, clean
1317 1319 )
1318 1320 return (lookup, status)
1319 1321
1320 1322 def matches(self, match):
1321 1323 """
1322 1324 return files in the dirstate (in whatever state) filtered by match
1323 1325 """
1324 1326 dmap = self._map
1325 1327 if rustmod is not None:
1326 1328 dmap = self._map._rustmap
1327 1329
1328 1330 if match.always():
1329 1331 return dmap.keys()
1330 1332 files = match.files()
1331 1333 if match.isexact():
1332 1334 # fast path -- filter the other way around, since typically files is
1333 1335 # much smaller than dmap
1334 1336 return [f for f in files if f in dmap]
1335 1337 if match.prefix() and all(fn in dmap for fn in files):
1336 1338 # fast path -- all the values are known to be files, so just return
1337 1339 # that
1338 1340 return list(files)
1339 1341 return [f for f in dmap if match(f)]
1340 1342
1341 1343 def _actualfilename(self, tr):
1342 1344 if tr:
1343 1345 return self._pendingfilename
1344 1346 else:
1345 1347 return self._filename
1346 1348
1347 1349 def savebackup(self, tr, backupname):
1348 1350 '''Save current dirstate into backup file'''
1349 1351 filename = self._actualfilename(tr)
1350 1352 assert backupname != filename
1351 1353
1352 1354 # use '_writedirstate' instead of 'write' to write changes certainly,
1353 1355 # because the latter omits writing out if transaction is running.
1354 1356 # output file will be used to create backup of dirstate at this point.
1355 1357 if self._dirty or not self._opener.exists(filename):
1356 1358 self._writedirstate(
1357 1359 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1358 1360 )
1359 1361
1360 1362 if tr:
1361 1363 # ensure that subsequent tr.writepending returns True for
1362 1364 # changes written out above, even if dirstate is never
1363 1365 # changed after this
1364 1366 tr.addfilegenerator(
1365 1367 b'dirstate',
1366 1368 (self._filename,),
1367 1369 self._writedirstate,
1368 1370 location=b'plain',
1369 1371 )
1370 1372
1371 1373 # ensure that pending file written above is unlinked at
1372 1374 # failure, even if tr.writepending isn't invoked until the
1373 1375 # end of this transaction
1374 1376 tr.registertmp(filename, location=b'plain')
1375 1377
1376 1378 self._opener.tryunlink(backupname)
1377 1379 # hardlink backup is okay because _writedirstate is always called
1378 1380 # with an "atomictemp=True" file.
1379 1381 util.copyfile(
1380 1382 self._opener.join(filename),
1381 1383 self._opener.join(backupname),
1382 1384 hardlink=True,
1383 1385 )
1384 1386
1385 1387 def restorebackup(self, tr, backupname):
1386 1388 '''Restore dirstate by backup file'''
1387 1389 # this "invalidate()" prevents "wlock.release()" from writing
1388 1390 # changes of dirstate out after restoring from backup file
1389 1391 self.invalidate()
1390 1392 filename = self._actualfilename(tr)
1391 1393 o = self._opener
1392 1394 if util.samefile(o.join(backupname), o.join(filename)):
1393 1395 o.unlink(backupname)
1394 1396 else:
1395 1397 o.rename(backupname, filename, checkambig=True)
1396 1398
1397 1399 def clearbackup(self, tr, backupname):
1398 1400 '''Clear backup file'''
1399 1401 self._opener.unlink(backupname)
1400 1402
1401 1403
1402 1404 class dirstatemap(object):
1403 1405 """Map encapsulating the dirstate's contents.
1404 1406
1405 1407 The dirstate contains the following state:
1406 1408
1407 1409 - `identity` is the identity of the dirstate file, which can be used to
1408 1410 detect when changes have occurred to the dirstate file.
1409 1411
1410 1412 - `parents` is a pair containing the parents of the working copy. The
1411 1413 parents are updated by calling `setparents`.
1412 1414
1413 1415 - the state map maps filenames to tuples of (state, mode, size, mtime),
1414 1416 where state is a single character representing 'normal', 'added',
1415 1417 'removed', or 'merged'. It is read by treating the dirstate as a
1416 1418 dict. File state is updated by calling the `addfile`, `removefile` and
1417 1419 `dropfile` methods.
1418 1420
1419 1421 - `copymap` maps destination filenames to their source filename.
1420 1422
1421 1423 The dirstate also provides the following views onto the state:
1422 1424
1423 1425 - `nonnormalset` is a set of the filenames that have state other
1424 1426 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1425 1427
1426 1428 - `otherparentset` is a set of the filenames that are marked as coming
1427 1429 from the second parent when the dirstate is currently being merged.
1428 1430
1429 1431 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1430 1432 form that they appear as in the dirstate.
1431 1433
1432 1434 - `dirfoldmap` is a dict mapping normalized directory names to the
1433 1435 denormalized form that they appear as in the dirstate.
1434 1436 """
1435 1437
1436 1438 def __init__(self, ui, opener, root, nodeconstants):
1437 1439 self._ui = ui
1438 1440 self._opener = opener
1439 1441 self._root = root
1440 1442 self._filename = b'dirstate'
1441 1443 self._nodelen = 20
1442 1444 self._nodeconstants = nodeconstants
1443 1445
1444 1446 self._parents = None
1445 1447 self._dirtyparents = False
1446 1448
1447 1449 # for consistent view between _pl() and _read() invocations
1448 1450 self._pendingmode = None
1449 1451
1450 1452 @propertycache
1451 1453 def _map(self):
1452 1454 self._map = {}
1453 1455 self.read()
1454 1456 return self._map
1455 1457
1456 1458 @propertycache
1457 1459 def copymap(self):
1458 1460 self.copymap = {}
1459 1461 self._map
1460 1462 return self.copymap
1461 1463
1462 1464 def clear(self):
1463 1465 self._map.clear()
1464 1466 self.copymap.clear()
1465 1467 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1466 1468 util.clearcachedproperty(self, b"_dirs")
1467 1469 util.clearcachedproperty(self, b"_alldirs")
1468 1470 util.clearcachedproperty(self, b"filefoldmap")
1469 1471 util.clearcachedproperty(self, b"dirfoldmap")
1470 1472 util.clearcachedproperty(self, b"nonnormalset")
1471 1473 util.clearcachedproperty(self, b"otherparentset")
1472 1474
1473 1475 def items(self):
1474 1476 return pycompat.iteritems(self._map)
1475 1477
1476 1478 # forward for python2,3 compat
1477 1479 iteritems = items
1478 1480
1479 1481 def __len__(self):
1480 1482 return len(self._map)
1481 1483
1482 1484 def __iter__(self):
1483 1485 return iter(self._map)
1484 1486
1485 1487 def get(self, key, default=None):
1486 1488 return self._map.get(key, default)
1487 1489
1488 1490 def __contains__(self, key):
1489 1491 return key in self._map
1490 1492
1491 1493 def __getitem__(self, key):
1492 1494 return self._map[key]
1493 1495
1494 1496 def keys(self):
1495 1497 return self._map.keys()
1496 1498
1497 1499 def preload(self):
1498 1500 """Loads the underlying data, if it's not already loaded"""
1499 1501 self._map
1500 1502
1501 1503 def addfile(self, f, oldstate, state, mode, size, mtime):
1502 1504 """Add a tracked file to the dirstate."""
1503 1505 if oldstate in b"?r" and "_dirs" in self.__dict__:
1504 1506 self._dirs.addpath(f)
1505 1507 if oldstate == b"?" and "_alldirs" in self.__dict__:
1506 1508 self._alldirs.addpath(f)
1507 1509 self._map[f] = dirstatetuple(state, mode, size, mtime)
1508 1510 if state != b'n' or mtime == -1:
1509 1511 self.nonnormalset.add(f)
1510 1512 if size == -2:
1511 1513 self.otherparentset.add(f)
1512 1514
1513 1515 def removefile(self, f, oldstate, size):
1514 1516 """
1515 1517 Mark a file as removed in the dirstate.
1516 1518
1517 1519 The `size` parameter is used to store sentinel values that indicate
1518 1520 the file's previous state. In the future, we should refactor this
1519 1521 to be more explicit about what that state is.
1520 1522 """
1521 1523 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1522 1524 self._dirs.delpath(f)
1523 1525 if oldstate == b"?" and "_alldirs" in self.__dict__:
1524 1526 self._alldirs.addpath(f)
1525 1527 if "filefoldmap" in self.__dict__:
1526 1528 normed = util.normcase(f)
1527 1529 self.filefoldmap.pop(normed, None)
1528 1530 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1529 1531 self.nonnormalset.add(f)
1530 1532
1531 1533 def dropfile(self, f, oldstate):
1532 1534 """
1533 1535 Remove a file from the dirstate. Returns True if the file was
1534 1536 previously recorded.
1535 1537 """
1536 1538 exists = self._map.pop(f, None) is not None
1537 1539 if exists:
1538 1540 if oldstate != b"r" and "_dirs" in self.__dict__:
1539 1541 self._dirs.delpath(f)
1540 1542 if "_alldirs" in self.__dict__:
1541 1543 self._alldirs.delpath(f)
1542 1544 if "filefoldmap" in self.__dict__:
1543 1545 normed = util.normcase(f)
1544 1546 self.filefoldmap.pop(normed, None)
1545 1547 self.nonnormalset.discard(f)
1546 1548 return exists
1547 1549
1548 1550 def clearambiguoustimes(self, files, now):
1549 1551 for f in files:
1550 1552 e = self.get(f)
1551 1553 if e is not None and e[0] == b'n' and e[3] == now:
1552 1554 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1553 1555 self.nonnormalset.add(f)
1554 1556
1555 1557 def nonnormalentries(self):
1556 1558 '''Compute the nonnormal dirstate entries from the dmap'''
1557 1559 try:
1558 1560 return parsers.nonnormalotherparententries(self._map)
1559 1561 except AttributeError:
1560 1562 nonnorm = set()
1561 1563 otherparent = set()
1562 1564 for fname, e in pycompat.iteritems(self._map):
1563 1565 if e[0] != b'n' or e[3] == -1:
1564 1566 nonnorm.add(fname)
1565 1567 if e[0] == b'n' and e[2] == -2:
1566 1568 otherparent.add(fname)
1567 1569 return nonnorm, otherparent
1568 1570
1569 1571 @propertycache
1570 1572 def filefoldmap(self):
1571 1573 """Returns a dictionary mapping normalized case paths to their
1572 1574 non-normalized versions.
1573 1575 """
1574 1576 try:
1575 1577 makefilefoldmap = parsers.make_file_foldmap
1576 1578 except AttributeError:
1577 1579 pass
1578 1580 else:
1579 1581 return makefilefoldmap(
1580 1582 self._map, util.normcasespec, util.normcasefallback
1581 1583 )
1582 1584
1583 1585 f = {}
1584 1586 normcase = util.normcase
1585 1587 for name, s in pycompat.iteritems(self._map):
1586 1588 if s[0] != b'r':
1587 1589 f[normcase(name)] = name
1588 1590 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1589 1591 return f
1590 1592
1591 1593 def hastrackeddir(self, d):
1592 1594 """
1593 1595 Returns True if the dirstate contains a tracked (not removed) file
1594 1596 in this directory.
1595 1597 """
1596 1598 return d in self._dirs
1597 1599
1598 1600 def hasdir(self, d):
1599 1601 """
1600 1602 Returns True if the dirstate contains a file (tracked or removed)
1601 1603 in this directory.
1602 1604 """
1603 1605 return d in self._alldirs
1604 1606
1605 1607 @propertycache
1606 1608 def _dirs(self):
1607 1609 return pathutil.dirs(self._map, b'r')
1608 1610
1609 1611 @propertycache
1610 1612 def _alldirs(self):
1611 1613 return pathutil.dirs(self._map)
1612 1614
1613 1615 def _opendirstatefile(self):
1614 1616 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1615 1617 if self._pendingmode is not None and self._pendingmode != mode:
1616 1618 fp.close()
1617 1619 raise error.Abort(
1618 1620 _(b'working directory state may be changed parallelly')
1619 1621 )
1620 1622 self._pendingmode = mode
1621 1623 return fp
1622 1624
1623 1625 def parents(self):
1624 1626 if not self._parents:
1625 1627 try:
1626 1628 fp = self._opendirstatefile()
1627 1629 st = fp.read(2 * self._nodelen)
1628 1630 fp.close()
1629 1631 except IOError as err:
1630 1632 if err.errno != errno.ENOENT:
1631 1633 raise
1632 1634 # File doesn't exist, so the current state is empty
1633 1635 st = b''
1634 1636
1635 1637 l = len(st)
1636 1638 if l == self._nodelen * 2:
1637 1639 self._parents = (
1638 1640 st[: self._nodelen],
1639 1641 st[self._nodelen : 2 * self._nodelen],
1640 1642 )
1641 1643 elif l == 0:
1642 1644 self._parents = (
1643 1645 self._nodeconstants.nullid,
1644 1646 self._nodeconstants.nullid,
1645 1647 )
1646 1648 else:
1647 1649 raise error.Abort(
1648 1650 _(b'working directory state appears damaged!')
1649 1651 )
1650 1652
1651 1653 return self._parents
1652 1654
1653 1655 def setparents(self, p1, p2):
1654 1656 self._parents = (p1, p2)
1655 1657 self._dirtyparents = True
1656 1658
1657 1659 def read(self):
1658 1660 # ignore HG_PENDING because identity is used only for writing
1659 1661 self.identity = util.filestat.frompath(
1660 1662 self._opener.join(self._filename)
1661 1663 )
1662 1664
1663 1665 try:
1664 1666 fp = self._opendirstatefile()
1665 1667 try:
1666 1668 st = fp.read()
1667 1669 finally:
1668 1670 fp.close()
1669 1671 except IOError as err:
1670 1672 if err.errno != errno.ENOENT:
1671 1673 raise
1672 1674 return
1673 1675 if not st:
1674 1676 return
1675 1677
1676 1678 if util.safehasattr(parsers, b'dict_new_presized'):
1677 1679 # Make an estimate of the number of files in the dirstate based on
1678 1680 # its size. This trades wasting some memory for avoiding costly
1679 1681 # resizes. Each entry have a prefix of 17 bytes followed by one or
1680 1682 # two path names. Studies on various large-scale real-world repositories
1681 1683 # found 54 bytes a reasonable upper limit for the average path names.
1682 1684 # Copy entries are ignored for the sake of this estimate.
1683 1685 self._map = parsers.dict_new_presized(len(st) // 71)
1684 1686
1685 1687 # Python's garbage collector triggers a GC each time a certain number
1686 1688 # of container objects (the number being defined by
1687 1689 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1688 1690 # for each file in the dirstate. The C version then immediately marks
1689 1691 # them as not to be tracked by the collector. However, this has no
1690 1692 # effect on when GCs are triggered, only on what objects the GC looks
1691 1693 # into. This means that O(number of files) GCs are unavoidable.
1692 1694 # Depending on when in the process's lifetime the dirstate is parsed,
1693 1695 # this can get very expensive. As a workaround, disable GC while
1694 1696 # parsing the dirstate.
1695 1697 #
1696 1698 # (we cannot decorate the function directly since it is in a C module)
1697 1699 parse_dirstate = util.nogc(parsers.parse_dirstate)
1698 1700 p = parse_dirstate(self._map, self.copymap, st)
1699 1701 if not self._dirtyparents:
1700 1702 self.setparents(*p)
1701 1703
1702 1704 # Avoid excess attribute lookups by fast pathing certain checks
1703 1705 self.__contains__ = self._map.__contains__
1704 1706 self.__getitem__ = self._map.__getitem__
1705 1707 self.get = self._map.get
1706 1708
1707 1709 def write(self, st, now):
1708 1710 st.write(
1709 1711 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1710 1712 )
1711 1713 st.close()
1712 1714 self._dirtyparents = False
1713 1715 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1714 1716
1715 1717 @propertycache
1716 1718 def nonnormalset(self):
1717 1719 nonnorm, otherparents = self.nonnormalentries()
1718 1720 self.otherparentset = otherparents
1719 1721 return nonnorm
1720 1722
1721 1723 @propertycache
1722 1724 def otherparentset(self):
1723 1725 nonnorm, otherparents = self.nonnormalentries()
1724 1726 self.nonnormalset = nonnorm
1725 1727 return otherparents
1726 1728
1727 1729 def non_normal_or_other_parent_paths(self):
1728 1730 return self.nonnormalset.union(self.otherparentset)
1729 1731
1730 1732 @propertycache
1731 1733 def identity(self):
1732 1734 self._map
1733 1735 return self.identity
1734 1736
1735 1737 @propertycache
1736 1738 def dirfoldmap(self):
1737 1739 f = {}
1738 1740 normcase = util.normcase
1739 1741 for name in self._dirs:
1740 1742 f[normcase(name)] = name
1741 1743 return f
1742 1744
1743 1745
1744 1746 if rustmod is not None:
1745 1747
1746 1748 class dirstatemap(object):
1747 1749 def __init__(self, ui, opener, root, nodeconstants):
1748 1750 self._nodeconstants = nodeconstants
1749 1751 self._ui = ui
1750 1752 self._opener = opener
1751 1753 self._root = root
1752 1754 self._filename = b'dirstate'
1753 1755 self._nodelen = 20
1754 1756 self._parents = None
1755 1757 self._dirtyparents = False
1756 1758
1757 1759 # for consistent view between _pl() and _read() invocations
1758 1760 self._pendingmode = None
1759 1761
1760 1762 def addfile(self, *args, **kwargs):
1761 1763 return self._rustmap.addfile(*args, **kwargs)
1762 1764
1763 1765 def removefile(self, *args, **kwargs):
1764 1766 return self._rustmap.removefile(*args, **kwargs)
1765 1767
1766 1768 def dropfile(self, *args, **kwargs):
1767 1769 return self._rustmap.dropfile(*args, **kwargs)
1768 1770
1769 1771 def clearambiguoustimes(self, *args, **kwargs):
1770 1772 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1771 1773
1772 1774 def nonnormalentries(self):
1773 1775 return self._rustmap.nonnormalentries()
1774 1776
1775 1777 def get(self, *args, **kwargs):
1776 1778 return self._rustmap.get(*args, **kwargs)
1777 1779
1778 1780 @property
1779 1781 def copymap(self):
1780 1782 return self._rustmap.copymap()
1781 1783
1782 1784 def preload(self):
1783 1785 self._rustmap
1784 1786
1785 1787 def clear(self):
1786 1788 self._rustmap.clear()
1787 1789 self.setparents(
1788 1790 self._nodeconstants.nullid, self._nodeconstants.nullid
1789 1791 )
1790 1792 util.clearcachedproperty(self, b"_dirs")
1791 1793 util.clearcachedproperty(self, b"_alldirs")
1792 1794 util.clearcachedproperty(self, b"dirfoldmap")
1793 1795
1794 1796 def items(self):
1795 1797 return self._rustmap.items()
1796 1798
1797 1799 def keys(self):
1798 1800 return iter(self._rustmap)
1799 1801
1800 1802 def __contains__(self, key):
1801 1803 return key in self._rustmap
1802 1804
1803 1805 def __getitem__(self, item):
1804 1806 return self._rustmap[item]
1805 1807
1806 1808 def __len__(self):
1807 1809 return len(self._rustmap)
1808 1810
1809 1811 def __iter__(self):
1810 1812 return iter(self._rustmap)
1811 1813
1812 1814 # forward for python2,3 compat
1813 1815 iteritems = items
1814 1816
1815 1817 def _opendirstatefile(self):
1816 1818 fp, mode = txnutil.trypending(
1817 1819 self._root, self._opener, self._filename
1818 1820 )
1819 1821 if self._pendingmode is not None and self._pendingmode != mode:
1820 1822 fp.close()
1821 1823 raise error.Abort(
1822 1824 _(b'working directory state may be changed parallelly')
1823 1825 )
1824 1826 self._pendingmode = mode
1825 1827 return fp
1826 1828
1827 1829 def setparents(self, p1, p2):
1828 1830 self._parents = (p1, p2)
1829 1831 self._dirtyparents = True
1830 1832
1831 1833 def parents(self):
1832 1834 if not self._parents:
1833 1835 try:
1834 1836 fp = self._opendirstatefile()
1835 1837 st = fp.read(40)
1836 1838 fp.close()
1837 1839 except IOError as err:
1838 1840 if err.errno != errno.ENOENT:
1839 1841 raise
1840 1842 # File doesn't exist, so the current state is empty
1841 1843 st = b''
1842 1844
1843 1845 l = len(st)
1844 1846 if l == self._nodelen * 2:
1845 1847 self._parents = (
1846 1848 st[: self._nodelen],
1847 1849 st[self._nodelen : 2 * self._nodelen],
1848 1850 )
1849 1851 elif l == 0:
1850 1852 self._parents = (
1851 1853 self._nodeconstants.nullid,
1852 1854 self._nodeconstants.nullid,
1853 1855 )
1854 1856 else:
1855 1857 raise error.Abort(
1856 1858 _(b'working directory state appears damaged!')
1857 1859 )
1858 1860
1859 1861 return self._parents
1860 1862
1861 1863 @propertycache
1862 1864 def _rustmap(self):
1863 1865 """
1864 1866 Fills the Dirstatemap when called.
1865 1867 """
1866 1868 # ignore HG_PENDING because identity is used only for writing
1867 1869 self.identity = util.filestat.frompath(
1868 1870 self._opener.join(self._filename)
1869 1871 )
1870 1872
1871 1873 try:
1872 1874 fp = self._opendirstatefile()
1873 1875 try:
1874 1876 st = fp.read()
1875 1877 finally:
1876 1878 fp.close()
1877 1879 except IOError as err:
1878 1880 if err.errno != errno.ENOENT:
1879 1881 raise
1880 1882 st = b''
1881 1883
1882 1884 use_dirstate_tree = self._ui.configbool(
1883 1885 b"experimental",
1884 1886 b"dirstate-tree.in-memory",
1885 1887 False,
1886 1888 )
1887 1889 self._rustmap, parents = rustmod.DirstateMap.new(
1888 1890 use_dirstate_tree, st
1889 1891 )
1890 1892
1891 1893 if parents and not self._dirtyparents:
1892 1894 self.setparents(*parents)
1893 1895
1894 1896 self.__contains__ = self._rustmap.__contains__
1895 1897 self.__getitem__ = self._rustmap.__getitem__
1896 1898 self.get = self._rustmap.get
1897 1899 return self._rustmap
1898 1900
1899 1901 def write(self, st, now):
1900 1902 parents = self.parents()
1901 1903 st.write(self._rustmap.write(parents[0], parents[1], now))
1902 1904 st.close()
1903 1905 self._dirtyparents = False
1904 1906
1905 1907 @propertycache
1906 1908 def filefoldmap(self):
1907 1909 """Returns a dictionary mapping normalized case paths to their
1908 1910 non-normalized versions.
1909 1911 """
1910 1912 return self._rustmap.filefoldmapasdict()
1911 1913
1912 1914 def hastrackeddir(self, d):
1913 1915 self._dirs # Trigger Python's propertycache
1914 1916 return self._rustmap.hastrackeddir(d)
1915 1917
1916 1918 def hasdir(self, d):
1917 1919 self._dirs # Trigger Python's propertycache
1918 1920 return self._rustmap.hasdir(d)
1919 1921
1920 1922 @propertycache
1921 1923 def _dirs(self):
1922 1924 return self._rustmap.getdirs()
1923 1925
1924 1926 @propertycache
1925 1927 def _alldirs(self):
1926 1928 return self._rustmap.getalldirs()
1927 1929
1928 1930 @propertycache
1929 1931 def identity(self):
1930 1932 self._rustmap
1931 1933 return self.identity
1932 1934
1933 1935 @property
1934 1936 def nonnormalset(self):
1935 1937 nonnorm = self._rustmap.non_normal_entries()
1936 1938 return nonnorm
1937 1939
1938 1940 @propertycache
1939 1941 def otherparentset(self):
1940 1942 otherparents = self._rustmap.other_parent_entries()
1941 1943 return otherparents
1942 1944
1943 1945 def non_normal_or_other_parent_paths(self):
1944 1946 return self._rustmap.non_normal_or_other_parent_paths()
1945 1947
1946 1948 @propertycache
1947 1949 def dirfoldmap(self):
1948 1950 f = {}
1949 1951 normcase = util.normcase
1950 1952 for name in self._dirs:
1951 1953 f[normcase(name)] = name
1952 1954 return f
@@ -1,3780 +1,3795 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 147 class mixedrepostorecache(_basefilecache):
148 148 """filecache for a mix files in .hg/store and outside"""
149 149
150 150 def __init__(self, *pathsandlocations):
151 151 # scmutil.filecache only uses the path for passing back into our
152 152 # join(), so we can safely pass a list of paths and locations
153 153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 154 _cachedfiles.update(pathsandlocations)
155 155
156 156 def join(self, obj, fnameandlocation):
157 157 fname, location = fnameandlocation
158 158 if location == b'plain':
159 159 return obj.vfs.join(fname)
160 160 else:
161 161 if location != b'':
162 162 raise error.ProgrammingError(
163 163 b'unexpected location: %s' % location
164 164 )
165 165 return obj.sjoin(fname)
166 166
167 167
168 168 def isfilecached(repo, name):
169 169 """check if a repo has already cached "name" filecache-ed property
170 170
171 171 This returns (cachedobj-or-None, iscached) tuple.
172 172 """
173 173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 174 if not cacheentry:
175 175 return None, False
176 176 return cacheentry.obj, True
177 177
178 178
179 179 class unfilteredpropertycache(util.propertycache):
180 180 """propertycache that apply to unfiltered repo only"""
181 181
182 182 def __get__(self, repo, type=None):
183 183 unfi = repo.unfiltered()
184 184 if unfi is repo:
185 185 return super(unfilteredpropertycache, self).__get__(unfi)
186 186 return getattr(unfi, self.name)
187 187
188 188
189 189 class filteredpropertycache(util.propertycache):
190 190 """propertycache that must take filtering in account"""
191 191
192 192 def cachevalue(self, obj, value):
193 193 object.__setattr__(obj, self.name, value)
194 194
195 195
196 196 def hasunfilteredcache(repo, name):
197 197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 198 return name in vars(repo.unfiltered())
199 199
200 200
201 201 def unfilteredmethod(orig):
202 202 """decorate method that always need to be run on unfiltered version"""
203 203
204 204 @functools.wraps(orig)
205 205 def wrapper(repo, *args, **kwargs):
206 206 return orig(repo.unfiltered(), *args, **kwargs)
207 207
208 208 return wrapper
209 209
210 210
211 211 moderncaps = {
212 212 b'lookup',
213 213 b'branchmap',
214 214 b'pushkey',
215 215 b'known',
216 216 b'getbundle',
217 217 b'unbundle',
218 218 }
219 219 legacycaps = moderncaps.union({b'changegroupsubset'})
220 220
221 221
222 222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 223 class localcommandexecutor(object):
224 224 def __init__(self, peer):
225 225 self._peer = peer
226 226 self._sent = False
227 227 self._closed = False
228 228
229 229 def __enter__(self):
230 230 return self
231 231
232 232 def __exit__(self, exctype, excvalue, exctb):
233 233 self.close()
234 234
235 235 def callcommand(self, command, args):
236 236 if self._sent:
237 237 raise error.ProgrammingError(
238 238 b'callcommand() cannot be used after sendcommands()'
239 239 )
240 240
241 241 if self._closed:
242 242 raise error.ProgrammingError(
243 243 b'callcommand() cannot be used after close()'
244 244 )
245 245
246 246 # We don't need to support anything fancy. Just call the named
247 247 # method on the peer and return a resolved future.
248 248 fn = getattr(self._peer, pycompat.sysstr(command))
249 249
250 250 f = pycompat.futures.Future()
251 251
252 252 try:
253 253 result = fn(**pycompat.strkwargs(args))
254 254 except Exception:
255 255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 256 else:
257 257 f.set_result(result)
258 258
259 259 return f
260 260
261 261 def sendcommands(self):
262 262 self._sent = True
263 263
264 264 def close(self):
265 265 self._closed = True
266 266
267 267
268 268 @interfaceutil.implementer(repository.ipeercommands)
269 269 class localpeer(repository.peer):
270 270 '''peer for a local repo; reflects only the most recent API'''
271 271
272 272 def __init__(self, repo, caps=None):
273 273 super(localpeer, self).__init__()
274 274
275 275 if caps is None:
276 276 caps = moderncaps.copy()
277 277 self._repo = repo.filtered(b'served')
278 278 self.ui = repo.ui
279 279
280 280 if repo._wanted_sidedata:
281 281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 282 caps.add(b'exp-wanted-sidedata=' + formatted)
283 283
284 284 self._caps = repo._restrictcapabilities(caps)
285 285
286 286 # Begin of _basepeer interface.
287 287
288 288 def url(self):
289 289 return self._repo.url()
290 290
291 291 def local(self):
292 292 return self._repo
293 293
294 294 def peer(self):
295 295 return self
296 296
297 297 def canpush(self):
298 298 return True
299 299
300 300 def close(self):
301 301 self._repo.close()
302 302
303 303 # End of _basepeer interface.
304 304
305 305 # Begin of _basewirecommands interface.
306 306
307 307 def branchmap(self):
308 308 return self._repo.branchmap()
309 309
310 310 def capabilities(self):
311 311 return self._caps
312 312
313 313 def clonebundles(self):
314 314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315 315
316 316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 317 """Used to test argument passing over the wire"""
318 318 return b"%s %s %s %s %s" % (
319 319 one,
320 320 two,
321 321 pycompat.bytestr(three),
322 322 pycompat.bytestr(four),
323 323 pycompat.bytestr(five),
324 324 )
325 325
326 326 def getbundle(
327 327 self,
328 328 source,
329 329 heads=None,
330 330 common=None,
331 331 bundlecaps=None,
332 332 remote_sidedata=None,
333 333 **kwargs
334 334 ):
335 335 chunks = exchange.getbundlechunks(
336 336 self._repo,
337 337 source,
338 338 heads=heads,
339 339 common=common,
340 340 bundlecaps=bundlecaps,
341 341 remote_sidedata=remote_sidedata,
342 342 **kwargs
343 343 )[1]
344 344 cb = util.chunkbuffer(chunks)
345 345
346 346 if exchange.bundle2requested(bundlecaps):
347 347 # When requesting a bundle2, getbundle returns a stream to make the
348 348 # wire level function happier. We need to build a proper object
349 349 # from it in local peer.
350 350 return bundle2.getunbundler(self.ui, cb)
351 351 else:
352 352 return changegroup.getunbundler(b'01', cb, None)
353 353
354 354 def heads(self):
355 355 return self._repo.heads()
356 356
357 357 def known(self, nodes):
358 358 return self._repo.known(nodes)
359 359
360 360 def listkeys(self, namespace):
361 361 return self._repo.listkeys(namespace)
362 362
363 363 def lookup(self, key):
364 364 return self._repo.lookup(key)
365 365
366 366 def pushkey(self, namespace, key, old, new):
367 367 return self._repo.pushkey(namespace, key, old, new)
368 368
369 369 def stream_out(self):
370 370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371 371
372 372 def unbundle(self, bundle, heads, url):
373 373 """apply a bundle on a repo
374 374
375 375 This function handles the repo locking itself."""
376 376 try:
377 377 try:
378 378 bundle = exchange.readbundle(self.ui, bundle, None)
379 379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 380 if util.safehasattr(ret, b'getchunks'):
381 381 # This is a bundle20 object, turn it into an unbundler.
382 382 # This little dance should be dropped eventually when the
383 383 # API is finally improved.
384 384 stream = util.chunkbuffer(ret.getchunks())
385 385 ret = bundle2.getunbundler(self.ui, stream)
386 386 return ret
387 387 except Exception as exc:
388 388 # If the exception contains output salvaged from a bundle2
389 389 # reply, we need to make sure it is printed before continuing
390 390 # to fail. So we build a bundle2 with such output and consume
391 391 # it directly.
392 392 #
393 393 # This is not very elegant but allows a "simple" solution for
394 394 # issue4594
395 395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 396 if output:
397 397 bundler = bundle2.bundle20(self._repo.ui)
398 398 for out in output:
399 399 bundler.addpart(out)
400 400 stream = util.chunkbuffer(bundler.getchunks())
401 401 b = bundle2.getunbundler(self.ui, stream)
402 402 bundle2.processbundle(self._repo, b)
403 403 raise
404 404 except error.PushRaced as exc:
405 405 raise error.ResponseError(
406 406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 407 )
408 408
409 409 # End of _basewirecommands interface.
410 410
411 411 # Begin of peer interface.
412 412
413 413 def commandexecutor(self):
414 414 return localcommandexecutor(self)
415 415
416 416 # End of peer interface.
417 417
418 418
419 419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 420 class locallegacypeer(localpeer):
421 421 """peer extension which implements legacy methods too; used for tests with
422 422 restricted capabilities"""
423 423
424 424 def __init__(self, repo):
425 425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426 426
427 427 # Begin of baselegacywirecommands interface.
428 428
429 429 def between(self, pairs):
430 430 return self._repo.between(pairs)
431 431
432 432 def branches(self, nodes):
433 433 return self._repo.branches(nodes)
434 434
435 435 def changegroup(self, nodes, source):
436 436 outgoing = discovery.outgoing(
437 437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 438 )
439 439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440 440
441 441 def changegroupsubset(self, bases, heads, source):
442 442 outgoing = discovery.outgoing(
443 443 self._repo, missingroots=bases, ancestorsof=heads
444 444 )
445 445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446 446
447 447 # End of baselegacywirecommands interface.
448 448
449 449
450 450 # Functions receiving (ui, features) that extensions can register to impact
451 451 # the ability to load repositories with custom requirements. Only
452 452 # functions defined in loaded extensions are called.
453 453 #
454 454 # The function receives a set of requirement strings that the repository
455 455 # is capable of opening. Functions will typically add elements to the
456 456 # set to reflect that the extension knows how to handle that requirements.
457 457 featuresetupfuncs = set()
458 458
459 459
460 460 def _getsharedvfs(hgvfs, requirements):
461 461 """returns the vfs object pointing to root of shared source
462 462 repo for a shared repository
463 463
464 464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 465 requirements is a set of requirements of current repo (shared one)
466 466 """
467 467 # The ``shared`` or ``relshared`` requirements indicate the
468 468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 469 # This is an absolute path for ``shared`` and relative to
470 470 # ``.hg/`` for ``relshared``.
471 471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474 474
475 475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476 476
477 477 if not sharedvfs.exists():
478 478 raise error.RepoError(
479 479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 480 % sharedvfs.base
481 481 )
482 482 return sharedvfs
483 483
484 484
485 485 def _readrequires(vfs, allowmissing):
486 486 """reads the require file present at root of this vfs
487 487 and return a set of requirements
488 488
489 489 If allowmissing is True, we suppress ENOENT if raised"""
490 490 # requires file contains a newline-delimited list of
491 491 # features/capabilities the opener (us) must have in order to use
492 492 # the repository. This file was introduced in Mercurial 0.9.2,
493 493 # which means very old repositories may not have one. We assume
494 494 # a missing file translates to no requirements.
495 495 try:
496 496 requirements = set(vfs.read(b'requires').splitlines())
497 497 except IOError as e:
498 498 if not (allowmissing and e.errno == errno.ENOENT):
499 499 raise
500 500 requirements = set()
501 501 return requirements
502 502
503 503
504 504 def makelocalrepository(baseui, path, intents=None):
505 505 """Create a local repository object.
506 506
507 507 Given arguments needed to construct a local repository, this function
508 508 performs various early repository loading functionality (such as
509 509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 510 the repository can be opened, derives a type suitable for representing
511 511 that repository, and returns an instance of it.
512 512
513 513 The returned object conforms to the ``repository.completelocalrepository``
514 514 interface.
515 515
516 516 The repository type is derived by calling a series of factory functions
517 517 for each aspect/interface of the final repository. These are defined by
518 518 ``REPO_INTERFACES``.
519 519
520 520 Each factory function is called to produce a type implementing a specific
521 521 interface. The cumulative list of returned types will be combined into a
522 522 new type and that type will be instantiated to represent the local
523 523 repository.
524 524
525 525 The factory functions each receive various state that may be consulted
526 526 as part of deriving a type.
527 527
528 528 Extensions should wrap these factory functions to customize repository type
529 529 creation. Note that an extension's wrapped function may be called even if
530 530 that extension is not loaded for the repo being constructed. Extensions
531 531 should check if their ``__name__`` appears in the
532 532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 533 not.
534 534 """
535 535 ui = baseui.copy()
536 536 # Prevent copying repo configuration.
537 537 ui.copy = baseui.copy
538 538
539 539 # Working directory VFS rooted at repository root.
540 540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541 541
542 542 # Main VFS for .hg/ directory.
543 543 hgpath = wdirvfs.join(b'.hg')
544 544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 545 # Whether this repository is shared one or not
546 546 shared = False
547 547 # If this repository is shared, vfs pointing to shared repo
548 548 sharedvfs = None
549 549
550 550 # The .hg/ path should exist and should be a directory. All other
551 551 # cases are errors.
552 552 if not hgvfs.isdir():
553 553 try:
554 554 hgvfs.stat()
555 555 except OSError as e:
556 556 if e.errno != errno.ENOENT:
557 557 raise
558 558 except ValueError as e:
559 559 # Can be raised on Python 3.8 when path is invalid.
560 560 raise error.Abort(
561 561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 562 )
563 563
564 564 raise error.RepoError(_(b'repository %s not found') % path)
565 565
566 566 requirements = _readrequires(hgvfs, True)
567 567 shared = (
568 568 requirementsmod.SHARED_REQUIREMENT in requirements
569 569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 570 )
571 571 storevfs = None
572 572 if shared:
573 573 # This is a shared repo
574 574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 576 else:
577 577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578 578
579 579 # if .hg/requires contains the sharesafe requirement, it means
580 580 # there exists a `.hg/store/requires` too and we should read it
581 581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 583 # is not present, refer checkrequirementscompat() for that
584 584 #
585 585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 586 # repository was shared the old way. We check the share source .hg/requires
587 587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 588 # to be reshared
589 589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591 591
592 592 if (
593 593 shared
594 594 and requirementsmod.SHARESAFE_REQUIREMENT
595 595 not in _readrequires(sharedvfs, True)
596 596 ):
597 597 mismatch_warn = ui.configbool(
598 598 b'share', b'safe-mismatch.source-not-safe.warn'
599 599 )
600 600 mismatch_config = ui.config(
601 601 b'share', b'safe-mismatch.source-not-safe'
602 602 )
603 603 if mismatch_config in (
604 604 b'downgrade-allow',
605 605 b'allow',
606 606 b'downgrade-abort',
607 607 ):
608 608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 609 from . import upgrade
610 610
611 611 upgrade.downgrade_share_to_non_safe(
612 612 ui,
613 613 hgvfs,
614 614 sharedvfs,
615 615 requirements,
616 616 mismatch_config,
617 617 mismatch_warn,
618 618 )
619 619 elif mismatch_config == b'abort':
620 620 raise error.Abort(
621 621 _(b"share source does not support share-safe requirement"),
622 622 hint=hint,
623 623 )
624 624 else:
625 625 raise error.Abort(
626 626 _(
627 627 b"share-safe mismatch with source.\nUnrecognized"
628 628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 629 b" set."
630 630 )
631 631 % mismatch_config,
632 632 hint=hint,
633 633 )
634 634 else:
635 635 requirements |= _readrequires(storevfs, False)
636 636 elif shared:
637 637 sourcerequires = _readrequires(sharedvfs, False)
638 638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 640 mismatch_warn = ui.configbool(
641 641 b'share', b'safe-mismatch.source-safe.warn'
642 642 )
643 643 if mismatch_config in (
644 644 b'upgrade-allow',
645 645 b'allow',
646 646 b'upgrade-abort',
647 647 ):
648 648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 649 from . import upgrade
650 650
651 651 upgrade.upgrade_share_to_safe(
652 652 ui,
653 653 hgvfs,
654 654 storevfs,
655 655 requirements,
656 656 mismatch_config,
657 657 mismatch_warn,
658 658 )
659 659 elif mismatch_config == b'abort':
660 660 raise error.Abort(
661 661 _(
662 662 b'version mismatch: source uses share-safe'
663 663 b' functionality while the current share does not'
664 664 ),
665 665 hint=hint,
666 666 )
667 667 else:
668 668 raise error.Abort(
669 669 _(
670 670 b"share-safe mismatch with source.\nUnrecognized"
671 671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 672 )
673 673 % mismatch_config,
674 674 hint=hint,
675 675 )
676 676
677 677 # The .hg/hgrc file may load extensions or contain config options
678 678 # that influence repository construction. Attempt to load it and
679 679 # process any new extensions that it may have pulled in.
680 680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 682 extensions.loadall(ui)
683 683 extensions.populateui(ui)
684 684
685 685 # Set of module names of extensions loaded for this repository.
686 686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687 687
688 688 supportedrequirements = gathersupportedrequirements(ui)
689 689
690 690 # We first validate the requirements are known.
691 691 ensurerequirementsrecognized(requirements, supportedrequirements)
692 692
693 693 # Then we validate that the known set is reasonable to use together.
694 694 ensurerequirementscompatible(ui, requirements)
695 695
696 696 # TODO there are unhandled edge cases related to opening repositories with
697 697 # shared storage. If storage is shared, we should also test for requirements
698 698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 699 # that repo, as that repo may load extensions needed to open it. This is a
700 700 # bit complicated because we don't want the other hgrc to overwrite settings
701 701 # in this hgrc.
702 702 #
703 703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 704 # file when sharing repos. But if a requirement is added after the share is
705 705 # performed, thereby introducing a new requirement for the opener, we may
706 706 # will not see that and could encounter a run-time error interacting with
707 707 # that shared store since it has an unknown-to-us requirement.
708 708
709 709 # At this point, we know we should be capable of opening the repository.
710 710 # Now get on with doing that.
711 711
712 712 features = set()
713 713
714 714 # The "store" part of the repository holds versioned data. How it is
715 715 # accessed is determined by various requirements. If `shared` or
716 716 # `relshared` requirements are present, this indicates current repository
717 717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 718 if shared:
719 719 storebasepath = sharedvfs.base
720 720 cachepath = sharedvfs.join(b'cache')
721 721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 722 else:
723 723 storebasepath = hgvfs.base
724 724 cachepath = hgvfs.join(b'cache')
725 725 wcachepath = hgvfs.join(b'wcache')
726 726
727 727 # The store has changed over time and the exact layout is dictated by
728 728 # requirements. The store interface abstracts differences across all
729 729 # of them.
730 730 store = makestore(
731 731 requirements,
732 732 storebasepath,
733 733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 734 )
735 735 hgvfs.createmode = store.createmode
736 736
737 737 storevfs = store.vfs
738 738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739 739
740 740 if (
741 741 requirementsmod.REVLOGV2_REQUIREMENT in requirements
742 742 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
743 743 ):
744 744 features.add(repository.REPO_FEATURE_SIDE_DATA)
745 745 # the revlogv2 docket introduced race condition that we need to fix
746 746 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
747 747
748 748 # The cache vfs is used to manage cache files.
749 749 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
750 750 cachevfs.createmode = store.createmode
751 751 # The cache vfs is used to manage cache files related to the working copy
752 752 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
753 753 wcachevfs.createmode = store.createmode
754 754
755 755 # Now resolve the type for the repository object. We do this by repeatedly
756 756 # calling a factory function to produces types for specific aspects of the
757 757 # repo's operation. The aggregate returned types are used as base classes
758 758 # for a dynamically-derived type, which will represent our new repository.
759 759
760 760 bases = []
761 761 extrastate = {}
762 762
763 763 for iface, fn in REPO_INTERFACES:
764 764 # We pass all potentially useful state to give extensions tons of
765 765 # flexibility.
766 766 typ = fn()(
767 767 ui=ui,
768 768 intents=intents,
769 769 requirements=requirements,
770 770 features=features,
771 771 wdirvfs=wdirvfs,
772 772 hgvfs=hgvfs,
773 773 store=store,
774 774 storevfs=storevfs,
775 775 storeoptions=storevfs.options,
776 776 cachevfs=cachevfs,
777 777 wcachevfs=wcachevfs,
778 778 extensionmodulenames=extensionmodulenames,
779 779 extrastate=extrastate,
780 780 baseclasses=bases,
781 781 )
782 782
783 783 if not isinstance(typ, type):
784 784 raise error.ProgrammingError(
785 785 b'unable to construct type for %s' % iface
786 786 )
787 787
788 788 bases.append(typ)
789 789
790 790 # type() allows you to use characters in type names that wouldn't be
791 791 # recognized as Python symbols in source code. We abuse that to add
792 792 # rich information about our constructed repo.
793 793 name = pycompat.sysstr(
794 794 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
795 795 )
796 796
797 797 cls = type(name, tuple(bases), {})
798 798
799 799 return cls(
800 800 baseui=baseui,
801 801 ui=ui,
802 802 origroot=path,
803 803 wdirvfs=wdirvfs,
804 804 hgvfs=hgvfs,
805 805 requirements=requirements,
806 806 supportedrequirements=supportedrequirements,
807 807 sharedpath=storebasepath,
808 808 store=store,
809 809 cachevfs=cachevfs,
810 810 wcachevfs=wcachevfs,
811 811 features=features,
812 812 intents=intents,
813 813 )
814 814
815 815
816 816 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
817 817 """Load hgrc files/content into a ui instance.
818 818
819 819 This is called during repository opening to load any additional
820 820 config files or settings relevant to the current repository.
821 821
822 822 Returns a bool indicating whether any additional configs were loaded.
823 823
824 824 Extensions should monkeypatch this function to modify how per-repo
825 825 configs are loaded. For example, an extension may wish to pull in
826 826 configs from alternate files or sources.
827 827
828 828 sharedvfs is vfs object pointing to source repo if the current one is a
829 829 shared one
830 830 """
831 831 if not rcutil.use_repo_hgrc():
832 832 return False
833 833
834 834 ret = False
835 835 # first load config from shared source if we has to
836 836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
837 837 try:
838 838 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
839 839 ret = True
840 840 except IOError:
841 841 pass
842 842
843 843 try:
844 844 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
845 845 ret = True
846 846 except IOError:
847 847 pass
848 848
849 849 try:
850 850 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
851 851 ret = True
852 852 except IOError:
853 853 pass
854 854
855 855 return ret
856 856
857 857
858 858 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
859 859 """Perform additional actions after .hg/hgrc is loaded.
860 860
861 861 This function is called during repository loading immediately after
862 862 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
863 863
864 864 The function can be used to validate configs, automatically add
865 865 options (including extensions) based on requirements, etc.
866 866 """
867 867
868 868 # Map of requirements to list of extensions to load automatically when
869 869 # requirement is present.
870 870 autoextensions = {
871 871 b'git': [b'git'],
872 872 b'largefiles': [b'largefiles'],
873 873 b'lfs': [b'lfs'],
874 874 }
875 875
876 876 for requirement, names in sorted(autoextensions.items()):
877 877 if requirement not in requirements:
878 878 continue
879 879
880 880 for name in names:
881 881 if not ui.hasconfig(b'extensions', name):
882 882 ui.setconfig(b'extensions', name, b'', source=b'autoload')
883 883
884 884
885 885 def gathersupportedrequirements(ui):
886 886 """Determine the complete set of recognized requirements."""
887 887 # Start with all requirements supported by this file.
888 888 supported = set(localrepository._basesupported)
889 889
890 if dirstate.SUPPORTS_DIRSTATE_V2:
891 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
892
890 893 # Execute ``featuresetupfuncs`` entries if they belong to an extension
891 894 # relevant to this ui instance.
892 895 modules = {m.__name__ for n, m in extensions.extensions(ui)}
893 896
894 897 for fn in featuresetupfuncs:
895 898 if fn.__module__ in modules:
896 899 fn(ui, supported)
897 900
898 901 # Add derived requirements from registered compression engines.
899 902 for name in util.compengines:
900 903 engine = util.compengines[name]
901 904 if engine.available() and engine.revlogheader():
902 905 supported.add(b'exp-compression-%s' % name)
903 906 if engine.name() == b'zstd':
904 907 supported.add(b'revlog-compression-zstd')
905 908
906 909 return supported
907 910
908 911
909 912 def ensurerequirementsrecognized(requirements, supported):
910 913 """Validate that a set of local requirements is recognized.
911 914
912 915 Receives a set of requirements. Raises an ``error.RepoError`` if there
913 916 exists any requirement in that set that currently loaded code doesn't
914 917 recognize.
915 918
916 919 Returns a set of supported requirements.
917 920 """
918 921 missing = set()
919 922
920 923 for requirement in requirements:
921 924 if requirement in supported:
922 925 continue
923 926
924 927 if not requirement or not requirement[0:1].isalnum():
925 928 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
926 929
927 930 missing.add(requirement)
928 931
929 932 if missing:
930 933 raise error.RequirementError(
931 934 _(b'repository requires features unknown to this Mercurial: %s')
932 935 % b' '.join(sorted(missing)),
933 936 hint=_(
934 937 b'see https://mercurial-scm.org/wiki/MissingRequirement '
935 938 b'for more information'
936 939 ),
937 940 )
938 941
939 942
940 943 def ensurerequirementscompatible(ui, requirements):
941 944 """Validates that a set of recognized requirements is mutually compatible.
942 945
943 946 Some requirements may not be compatible with others or require
944 947 config options that aren't enabled. This function is called during
945 948 repository opening to ensure that the set of requirements needed
946 949 to open a repository is sane and compatible with config options.
947 950
948 951 Extensions can monkeypatch this function to perform additional
949 952 checking.
950 953
951 954 ``error.RepoError`` should be raised on failure.
952 955 """
953 956 if (
954 957 requirementsmod.SPARSE_REQUIREMENT in requirements
955 958 and not sparse.enabled
956 959 ):
957 960 raise error.RepoError(
958 961 _(
959 962 b'repository is using sparse feature but '
960 963 b'sparse is not enabled; enable the '
961 964 b'"sparse" extensions to access'
962 965 )
963 966 )
964 967
965 968
966 969 def makestore(requirements, path, vfstype):
967 970 """Construct a storage object for a repository."""
968 971 if requirementsmod.STORE_REQUIREMENT in requirements:
969 972 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
970 973 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
971 974 return storemod.fncachestore(path, vfstype, dotencode)
972 975
973 976 return storemod.encodedstore(path, vfstype)
974 977
975 978 return storemod.basicstore(path, vfstype)
976 979
977 980
978 981 def resolvestorevfsoptions(ui, requirements, features):
979 982 """Resolve the options to pass to the store vfs opener.
980 983
981 984 The returned dict is used to influence behavior of the storage layer.
982 985 """
983 986 options = {}
984 987
985 988 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
986 989 options[b'treemanifest'] = True
987 990
988 991 # experimental config: format.manifestcachesize
989 992 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
990 993 if manifestcachesize is not None:
991 994 options[b'manifestcachesize'] = manifestcachesize
992 995
993 996 # In the absence of another requirement superseding a revlog-related
994 997 # requirement, we have to assume the repo is using revlog version 0.
995 998 # This revlog format is super old and we don't bother trying to parse
996 999 # opener options for it because those options wouldn't do anything
997 1000 # meaningful on such old repos.
998 1001 if (
999 1002 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1000 1003 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1001 1004 ):
1002 1005 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1003 1006 else: # explicitly mark repo as using revlogv0
1004 1007 options[b'revlogv0'] = True
1005 1008
1006 1009 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1007 1010 options[b'copies-storage'] = b'changeset-sidedata'
1008 1011 else:
1009 1012 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1010 1013 copiesextramode = (b'changeset-only', b'compatibility')
1011 1014 if writecopiesto in copiesextramode:
1012 1015 options[b'copies-storage'] = b'extra'
1013 1016
1014 1017 return options
1015 1018
1016 1019
1017 1020 def resolverevlogstorevfsoptions(ui, requirements, features):
1018 1021 """Resolve opener options specific to revlogs."""
1019 1022
1020 1023 options = {}
1021 1024 options[b'flagprocessors'] = {}
1022 1025
1023 1026 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1024 1027 options[b'revlogv1'] = True
1025 1028 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1026 1029 options[b'revlogv2'] = True
1027 1030 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1028 1031 options[b'changelogv2'] = True
1029 1032
1030 1033 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1031 1034 options[b'generaldelta'] = True
1032 1035
1033 1036 # experimental config: format.chunkcachesize
1034 1037 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1035 1038 if chunkcachesize is not None:
1036 1039 options[b'chunkcachesize'] = chunkcachesize
1037 1040
1038 1041 deltabothparents = ui.configbool(
1039 1042 b'storage', b'revlog.optimize-delta-parent-choice'
1040 1043 )
1041 1044 options[b'deltabothparents'] = deltabothparents
1042 1045
1043 1046 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1044 1047 lazydeltabase = False
1045 1048 if lazydelta:
1046 1049 lazydeltabase = ui.configbool(
1047 1050 b'storage', b'revlog.reuse-external-delta-parent'
1048 1051 )
1049 1052 if lazydeltabase is None:
1050 1053 lazydeltabase = not scmutil.gddeltaconfig(ui)
1051 1054 options[b'lazydelta'] = lazydelta
1052 1055 options[b'lazydeltabase'] = lazydeltabase
1053 1056
1054 1057 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1055 1058 if 0 <= chainspan:
1056 1059 options[b'maxdeltachainspan'] = chainspan
1057 1060
1058 1061 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1059 1062 if mmapindexthreshold is not None:
1060 1063 options[b'mmapindexthreshold'] = mmapindexthreshold
1061 1064
1062 1065 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1063 1066 srdensitythres = float(
1064 1067 ui.config(b'experimental', b'sparse-read.density-threshold')
1065 1068 )
1066 1069 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1067 1070 options[b'with-sparse-read'] = withsparseread
1068 1071 options[b'sparse-read-density-threshold'] = srdensitythres
1069 1072 options[b'sparse-read-min-gap-size'] = srmingapsize
1070 1073
1071 1074 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1072 1075 options[b'sparse-revlog'] = sparserevlog
1073 1076 if sparserevlog:
1074 1077 options[b'generaldelta'] = True
1075 1078
1076 1079 maxchainlen = None
1077 1080 if sparserevlog:
1078 1081 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1079 1082 # experimental config: format.maxchainlen
1080 1083 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1081 1084 if maxchainlen is not None:
1082 1085 options[b'maxchainlen'] = maxchainlen
1083 1086
1084 1087 for r in requirements:
1085 1088 # we allow multiple compression engine requirement to co-exist because
1086 1089 # strickly speaking, revlog seems to support mixed compression style.
1087 1090 #
1088 1091 # The compression used for new entries will be "the last one"
1089 1092 prefix = r.startswith
1090 1093 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1091 1094 options[b'compengine'] = r.split(b'-', 2)[2]
1092 1095
1093 1096 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1094 1097 if options[b'zlib.level'] is not None:
1095 1098 if not (0 <= options[b'zlib.level'] <= 9):
1096 1099 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1097 1100 raise error.Abort(msg % options[b'zlib.level'])
1098 1101 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1099 1102 if options[b'zstd.level'] is not None:
1100 1103 if not (0 <= options[b'zstd.level'] <= 22):
1101 1104 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1102 1105 raise error.Abort(msg % options[b'zstd.level'])
1103 1106
1104 1107 if requirementsmod.NARROW_REQUIREMENT in requirements:
1105 1108 options[b'enableellipsis'] = True
1106 1109
1107 1110 if ui.configbool(b'experimental', b'rust.index'):
1108 1111 options[b'rust.index'] = True
1109 1112 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1110 1113 slow_path = ui.config(
1111 1114 b'storage', b'revlog.persistent-nodemap.slow-path'
1112 1115 )
1113 1116 if slow_path not in (b'allow', b'warn', b'abort'):
1114 1117 default = ui.config_default(
1115 1118 b'storage', b'revlog.persistent-nodemap.slow-path'
1116 1119 )
1117 1120 msg = _(
1118 1121 b'unknown value for config '
1119 1122 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1120 1123 )
1121 1124 ui.warn(msg % slow_path)
1122 1125 if not ui.quiet:
1123 1126 ui.warn(_(b'falling back to default value: %s\n') % default)
1124 1127 slow_path = default
1125 1128
1126 1129 msg = _(
1127 1130 b"accessing `persistent-nodemap` repository without associated "
1128 1131 b"fast implementation."
1129 1132 )
1130 1133 hint = _(
1131 1134 b"check `hg help config.format.use-persistent-nodemap` "
1132 1135 b"for details"
1133 1136 )
1134 1137 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1135 1138 if slow_path == b'warn':
1136 1139 msg = b"warning: " + msg + b'\n'
1137 1140 ui.warn(msg)
1138 1141 if not ui.quiet:
1139 1142 hint = b'(' + hint + b')\n'
1140 1143 ui.warn(hint)
1141 1144 if slow_path == b'abort':
1142 1145 raise error.Abort(msg, hint=hint)
1143 1146 options[b'persistent-nodemap'] = True
1144 1147 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1145 1148 options[b'persistent-nodemap.mmap'] = True
1146 1149 if ui.configbool(b'devel', b'persistent-nodemap'):
1147 1150 options[b'devel-force-nodemap'] = True
1148 1151
1149 1152 return options
1150 1153
1151 1154
1152 1155 def makemain(**kwargs):
1153 1156 """Produce a type conforming to ``ilocalrepositorymain``."""
1154 1157 return localrepository
1155 1158
1156 1159
1157 1160 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1158 1161 class revlogfilestorage(object):
1159 1162 """File storage when using revlogs."""
1160 1163
1161 1164 def file(self, path):
1162 1165 if path.startswith(b'/'):
1163 1166 path = path[1:]
1164 1167
1165 1168 return filelog.filelog(self.svfs, path)
1166 1169
1167 1170
1168 1171 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1169 1172 class revlognarrowfilestorage(object):
1170 1173 """File storage when using revlogs and narrow files."""
1171 1174
1172 1175 def file(self, path):
1173 1176 if path.startswith(b'/'):
1174 1177 path = path[1:]
1175 1178
1176 1179 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1177 1180
1178 1181
1179 1182 def makefilestorage(requirements, features, **kwargs):
1180 1183 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1181 1184 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1182 1185 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1183 1186
1184 1187 if requirementsmod.NARROW_REQUIREMENT in requirements:
1185 1188 return revlognarrowfilestorage
1186 1189 else:
1187 1190 return revlogfilestorage
1188 1191
1189 1192
1190 1193 # List of repository interfaces and factory functions for them. Each
1191 1194 # will be called in order during ``makelocalrepository()`` to iteratively
1192 1195 # derive the final type for a local repository instance. We capture the
1193 1196 # function as a lambda so we don't hold a reference and the module-level
1194 1197 # functions can be wrapped.
1195 1198 REPO_INTERFACES = [
1196 1199 (repository.ilocalrepositorymain, lambda: makemain),
1197 1200 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1198 1201 ]
1199 1202
1200 1203
1201 1204 @interfaceutil.implementer(repository.ilocalrepositorymain)
1202 1205 class localrepository(object):
1203 1206 """Main class for representing local repositories.
1204 1207
1205 1208 All local repositories are instances of this class.
1206 1209
1207 1210 Constructed on its own, instances of this class are not usable as
1208 1211 repository objects. To obtain a usable repository object, call
1209 1212 ``hg.repository()``, ``localrepo.instance()``, or
1210 1213 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1211 1214 ``instance()`` adds support for creating new repositories.
1212 1215 ``hg.repository()`` adds more extension integration, including calling
1213 1216 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1214 1217 used.
1215 1218 """
1216 1219
1217 1220 # obsolete experimental requirements:
1218 1221 # - manifestv2: An experimental new manifest format that allowed
1219 1222 # for stem compression of long paths. Experiment ended up not
1220 1223 # being successful (repository sizes went up due to worse delta
1221 1224 # chains), and the code was deleted in 4.6.
1222 1225 supportedformats = {
1223 1226 requirementsmod.REVLOGV1_REQUIREMENT,
1224 1227 requirementsmod.GENERALDELTA_REQUIREMENT,
1225 1228 requirementsmod.TREEMANIFEST_REQUIREMENT,
1226 1229 requirementsmod.COPIESSDC_REQUIREMENT,
1227 1230 requirementsmod.REVLOGV2_REQUIREMENT,
1228 1231 requirementsmod.CHANGELOGV2_REQUIREMENT,
1229 1232 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1230 1233 requirementsmod.NODEMAP_REQUIREMENT,
1231 1234 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1232 1235 requirementsmod.SHARESAFE_REQUIREMENT,
1233 1236 }
1234 1237 _basesupported = supportedformats | {
1235 1238 requirementsmod.STORE_REQUIREMENT,
1236 1239 requirementsmod.FNCACHE_REQUIREMENT,
1237 1240 requirementsmod.SHARED_REQUIREMENT,
1238 1241 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1239 1242 requirementsmod.DOTENCODE_REQUIREMENT,
1240 1243 requirementsmod.SPARSE_REQUIREMENT,
1241 1244 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1242 1245 }
1243 1246
1244 1247 # list of prefix for file which can be written without 'wlock'
1245 1248 # Extensions should extend this list when needed
1246 1249 _wlockfreeprefix = {
1247 1250 # We migh consider requiring 'wlock' for the next
1248 1251 # two, but pretty much all the existing code assume
1249 1252 # wlock is not needed so we keep them excluded for
1250 1253 # now.
1251 1254 b'hgrc',
1252 1255 b'requires',
1253 1256 # XXX cache is a complicatged business someone
1254 1257 # should investigate this in depth at some point
1255 1258 b'cache/',
1256 1259 # XXX shouldn't be dirstate covered by the wlock?
1257 1260 b'dirstate',
1258 1261 # XXX bisect was still a bit too messy at the time
1259 1262 # this changeset was introduced. Someone should fix
1260 1263 # the remainig bit and drop this line
1261 1264 b'bisect.state',
1262 1265 }
1263 1266
1264 1267 def __init__(
1265 1268 self,
1266 1269 baseui,
1267 1270 ui,
1268 1271 origroot,
1269 1272 wdirvfs,
1270 1273 hgvfs,
1271 1274 requirements,
1272 1275 supportedrequirements,
1273 1276 sharedpath,
1274 1277 store,
1275 1278 cachevfs,
1276 1279 wcachevfs,
1277 1280 features,
1278 1281 intents=None,
1279 1282 ):
1280 1283 """Create a new local repository instance.
1281 1284
1282 1285 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1283 1286 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1284 1287 object.
1285 1288
1286 1289 Arguments:
1287 1290
1288 1291 baseui
1289 1292 ``ui.ui`` instance that ``ui`` argument was based off of.
1290 1293
1291 1294 ui
1292 1295 ``ui.ui`` instance for use by the repository.
1293 1296
1294 1297 origroot
1295 1298 ``bytes`` path to working directory root of this repository.
1296 1299
1297 1300 wdirvfs
1298 1301 ``vfs.vfs`` rooted at the working directory.
1299 1302
1300 1303 hgvfs
1301 1304 ``vfs.vfs`` rooted at .hg/
1302 1305
1303 1306 requirements
1304 1307 ``set`` of bytestrings representing repository opening requirements.
1305 1308
1306 1309 supportedrequirements
1307 1310 ``set`` of bytestrings representing repository requirements that we
1308 1311 know how to open. May be a supetset of ``requirements``.
1309 1312
1310 1313 sharedpath
1311 1314 ``bytes`` Defining path to storage base directory. Points to a
1312 1315 ``.hg/`` directory somewhere.
1313 1316
1314 1317 store
1315 1318 ``store.basicstore`` (or derived) instance providing access to
1316 1319 versioned storage.
1317 1320
1318 1321 cachevfs
1319 1322 ``vfs.vfs`` used for cache files.
1320 1323
1321 1324 wcachevfs
1322 1325 ``vfs.vfs`` used for cache files related to the working copy.
1323 1326
1324 1327 features
1325 1328 ``set`` of bytestrings defining features/capabilities of this
1326 1329 instance.
1327 1330
1328 1331 intents
1329 1332 ``set`` of system strings indicating what this repo will be used
1330 1333 for.
1331 1334 """
1332 1335 self.baseui = baseui
1333 1336 self.ui = ui
1334 1337 self.origroot = origroot
1335 1338 # vfs rooted at working directory.
1336 1339 self.wvfs = wdirvfs
1337 1340 self.root = wdirvfs.base
1338 1341 # vfs rooted at .hg/. Used to access most non-store paths.
1339 1342 self.vfs = hgvfs
1340 1343 self.path = hgvfs.base
1341 1344 self.requirements = requirements
1342 1345 self.nodeconstants = sha1nodeconstants
1343 1346 self.nullid = self.nodeconstants.nullid
1344 1347 self.supported = supportedrequirements
1345 1348 self.sharedpath = sharedpath
1346 1349 self.store = store
1347 1350 self.cachevfs = cachevfs
1348 1351 self.wcachevfs = wcachevfs
1349 1352 self.features = features
1350 1353
1351 1354 self.filtername = None
1352 1355
1353 1356 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1354 1357 b'devel', b'check-locks'
1355 1358 ):
1356 1359 self.vfs.audit = self._getvfsward(self.vfs.audit)
1357 1360 # A list of callback to shape the phase if no data were found.
1358 1361 # Callback are in the form: func(repo, roots) --> processed root.
1359 1362 # This list it to be filled by extension during repo setup
1360 1363 self._phasedefaults = []
1361 1364
1362 1365 color.setup(self.ui)
1363 1366
1364 1367 self.spath = self.store.path
1365 1368 self.svfs = self.store.vfs
1366 1369 self.sjoin = self.store.join
1367 1370 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1368 1371 b'devel', b'check-locks'
1369 1372 ):
1370 1373 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1371 1374 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1372 1375 else: # standard vfs
1373 1376 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1374 1377
1375 1378 self._dirstatevalidatewarned = False
1376 1379
1377 1380 self._branchcaches = branchmap.BranchMapCache()
1378 1381 self._revbranchcache = None
1379 1382 self._filterpats = {}
1380 1383 self._datafilters = {}
1381 1384 self._transref = self._lockref = self._wlockref = None
1382 1385
1383 1386 # A cache for various files under .hg/ that tracks file changes,
1384 1387 # (used by the filecache decorator)
1385 1388 #
1386 1389 # Maps a property name to its util.filecacheentry
1387 1390 self._filecache = {}
1388 1391
1389 1392 # hold sets of revision to be filtered
1390 1393 # should be cleared when something might have changed the filter value:
1391 1394 # - new changesets,
1392 1395 # - phase change,
1393 1396 # - new obsolescence marker,
1394 1397 # - working directory parent change,
1395 1398 # - bookmark changes
1396 1399 self.filteredrevcache = {}
1397 1400
1398 1401 # post-dirstate-status hooks
1399 1402 self._postdsstatus = []
1400 1403
1401 1404 # generic mapping between names and nodes
1402 1405 self.names = namespaces.namespaces()
1403 1406
1404 1407 # Key to signature value.
1405 1408 self._sparsesignaturecache = {}
1406 1409 # Signature to cached matcher instance.
1407 1410 self._sparsematchercache = {}
1408 1411
1409 1412 self._extrafilterid = repoview.extrafilter(ui)
1410 1413
1411 1414 self.filecopiesmode = None
1412 1415 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1413 1416 self.filecopiesmode = b'changeset-sidedata'
1414 1417
1415 1418 self._wanted_sidedata = set()
1416 1419 self._sidedata_computers = {}
1417 1420 sidedatamod.set_sidedata_spec_for_repo(self)
1418 1421
1419 1422 def _getvfsward(self, origfunc):
1420 1423 """build a ward for self.vfs"""
1421 1424 rref = weakref.ref(self)
1422 1425
1423 1426 def checkvfs(path, mode=None):
1424 1427 ret = origfunc(path, mode=mode)
1425 1428 repo = rref()
1426 1429 if (
1427 1430 repo is None
1428 1431 or not util.safehasattr(repo, b'_wlockref')
1429 1432 or not util.safehasattr(repo, b'_lockref')
1430 1433 ):
1431 1434 return
1432 1435 if mode in (None, b'r', b'rb'):
1433 1436 return
1434 1437 if path.startswith(repo.path):
1435 1438 # truncate name relative to the repository (.hg)
1436 1439 path = path[len(repo.path) + 1 :]
1437 1440 if path.startswith(b'cache/'):
1438 1441 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1439 1442 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1440 1443 # path prefixes covered by 'lock'
1441 1444 vfs_path_prefixes = (
1442 1445 b'journal.',
1443 1446 b'undo.',
1444 1447 b'strip-backup/',
1445 1448 b'cache/',
1446 1449 )
1447 1450 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1448 1451 if repo._currentlock(repo._lockref) is None:
1449 1452 repo.ui.develwarn(
1450 1453 b'write with no lock: "%s"' % path,
1451 1454 stacklevel=3,
1452 1455 config=b'check-locks',
1453 1456 )
1454 1457 elif repo._currentlock(repo._wlockref) is None:
1455 1458 # rest of vfs files are covered by 'wlock'
1456 1459 #
1457 1460 # exclude special files
1458 1461 for prefix in self._wlockfreeprefix:
1459 1462 if path.startswith(prefix):
1460 1463 return
1461 1464 repo.ui.develwarn(
1462 1465 b'write with no wlock: "%s"' % path,
1463 1466 stacklevel=3,
1464 1467 config=b'check-locks',
1465 1468 )
1466 1469 return ret
1467 1470
1468 1471 return checkvfs
1469 1472
1470 1473 def _getsvfsward(self, origfunc):
1471 1474 """build a ward for self.svfs"""
1472 1475 rref = weakref.ref(self)
1473 1476
1474 1477 def checksvfs(path, mode=None):
1475 1478 ret = origfunc(path, mode=mode)
1476 1479 repo = rref()
1477 1480 if repo is None or not util.safehasattr(repo, b'_lockref'):
1478 1481 return
1479 1482 if mode in (None, b'r', b'rb'):
1480 1483 return
1481 1484 if path.startswith(repo.sharedpath):
1482 1485 # truncate name relative to the repository (.hg)
1483 1486 path = path[len(repo.sharedpath) + 1 :]
1484 1487 if repo._currentlock(repo._lockref) is None:
1485 1488 repo.ui.develwarn(
1486 1489 b'write with no lock: "%s"' % path, stacklevel=4
1487 1490 )
1488 1491 return ret
1489 1492
1490 1493 return checksvfs
1491 1494
1492 1495 def close(self):
1493 1496 self._writecaches()
1494 1497
1495 1498 def _writecaches(self):
1496 1499 if self._revbranchcache:
1497 1500 self._revbranchcache.write()
1498 1501
1499 1502 def _restrictcapabilities(self, caps):
1500 1503 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1501 1504 caps = set(caps)
1502 1505 capsblob = bundle2.encodecaps(
1503 1506 bundle2.getrepocaps(self, role=b'client')
1504 1507 )
1505 1508 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1506 1509 if self.ui.configbool(b'experimental', b'narrow'):
1507 1510 caps.add(wireprototypes.NARROWCAP)
1508 1511 return caps
1509 1512
1510 1513 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1511 1514 # self -> auditor -> self._checknested -> self
1512 1515
1513 1516 @property
1514 1517 def auditor(self):
1515 1518 # This is only used by context.workingctx.match in order to
1516 1519 # detect files in subrepos.
1517 1520 return pathutil.pathauditor(self.root, callback=self._checknested)
1518 1521
1519 1522 @property
1520 1523 def nofsauditor(self):
1521 1524 # This is only used by context.basectx.match in order to detect
1522 1525 # files in subrepos.
1523 1526 return pathutil.pathauditor(
1524 1527 self.root, callback=self._checknested, realfs=False, cached=True
1525 1528 )
1526 1529
1527 1530 def _checknested(self, path):
1528 1531 """Determine if path is a legal nested repository."""
1529 1532 if not path.startswith(self.root):
1530 1533 return False
1531 1534 subpath = path[len(self.root) + 1 :]
1532 1535 normsubpath = util.pconvert(subpath)
1533 1536
1534 1537 # XXX: Checking against the current working copy is wrong in
1535 1538 # the sense that it can reject things like
1536 1539 #
1537 1540 # $ hg cat -r 10 sub/x.txt
1538 1541 #
1539 1542 # if sub/ is no longer a subrepository in the working copy
1540 1543 # parent revision.
1541 1544 #
1542 1545 # However, it can of course also allow things that would have
1543 1546 # been rejected before, such as the above cat command if sub/
1544 1547 # is a subrepository now, but was a normal directory before.
1545 1548 # The old path auditor would have rejected by mistake since it
1546 1549 # panics when it sees sub/.hg/.
1547 1550 #
1548 1551 # All in all, checking against the working copy seems sensible
1549 1552 # since we want to prevent access to nested repositories on
1550 1553 # the filesystem *now*.
1551 1554 ctx = self[None]
1552 1555 parts = util.splitpath(subpath)
1553 1556 while parts:
1554 1557 prefix = b'/'.join(parts)
1555 1558 if prefix in ctx.substate:
1556 1559 if prefix == normsubpath:
1557 1560 return True
1558 1561 else:
1559 1562 sub = ctx.sub(prefix)
1560 1563 return sub.checknested(subpath[len(prefix) + 1 :])
1561 1564 else:
1562 1565 parts.pop()
1563 1566 return False
1564 1567
1565 1568 def peer(self):
1566 1569 return localpeer(self) # not cached to avoid reference cycle
1567 1570
1568 1571 def unfiltered(self):
1569 1572 """Return unfiltered version of the repository
1570 1573
1571 1574 Intended to be overwritten by filtered repo."""
1572 1575 return self
1573 1576
1574 1577 def filtered(self, name, visibilityexceptions=None):
1575 1578 """Return a filtered version of a repository
1576 1579
1577 1580 The `name` parameter is the identifier of the requested view. This
1578 1581 will return a repoview object set "exactly" to the specified view.
1579 1582
1580 1583 This function does not apply recursive filtering to a repository. For
1581 1584 example calling `repo.filtered("served")` will return a repoview using
1582 1585 the "served" view, regardless of the initial view used by `repo`.
1583 1586
1584 1587 In other word, there is always only one level of `repoview` "filtering".
1585 1588 """
1586 1589 if self._extrafilterid is not None and b'%' not in name:
1587 1590 name = name + b'%' + self._extrafilterid
1588 1591
1589 1592 cls = repoview.newtype(self.unfiltered().__class__)
1590 1593 return cls(self, name, visibilityexceptions)
1591 1594
1592 1595 @mixedrepostorecache(
1593 1596 (b'bookmarks', b'plain'),
1594 1597 (b'bookmarks.current', b'plain'),
1595 1598 (b'bookmarks', b''),
1596 1599 (b'00changelog.i', b''),
1597 1600 )
1598 1601 def _bookmarks(self):
1599 1602 # Since the multiple files involved in the transaction cannot be
1600 1603 # written atomically (with current repository format), there is a race
1601 1604 # condition here.
1602 1605 #
1603 1606 # 1) changelog content A is read
1604 1607 # 2) outside transaction update changelog to content B
1605 1608 # 3) outside transaction update bookmark file referring to content B
1606 1609 # 4) bookmarks file content is read and filtered against changelog-A
1607 1610 #
1608 1611 # When this happens, bookmarks against nodes missing from A are dropped.
1609 1612 #
1610 1613 # Having this happening during read is not great, but it become worse
1611 1614 # when this happen during write because the bookmarks to the "unknown"
1612 1615 # nodes will be dropped for good. However, writes happen within locks.
1613 1616 # This locking makes it possible to have a race free consistent read.
1614 1617 # For this purpose data read from disc before locking are
1615 1618 # "invalidated" right after the locks are taken. This invalidations are
1616 1619 # "light", the `filecache` mechanism keep the data in memory and will
1617 1620 # reuse them if the underlying files did not changed. Not parsing the
1618 1621 # same data multiple times helps performances.
1619 1622 #
1620 1623 # Unfortunately in the case describe above, the files tracked by the
1621 1624 # bookmarks file cache might not have changed, but the in-memory
1622 1625 # content is still "wrong" because we used an older changelog content
1623 1626 # to process the on-disk data. So after locking, the changelog would be
1624 1627 # refreshed but `_bookmarks` would be preserved.
1625 1628 # Adding `00changelog.i` to the list of tracked file is not
1626 1629 # enough, because at the time we build the content for `_bookmarks` in
1627 1630 # (4), the changelog file has already diverged from the content used
1628 1631 # for loading `changelog` in (1)
1629 1632 #
1630 1633 # To prevent the issue, we force the changelog to be explicitly
1631 1634 # reloaded while computing `_bookmarks`. The data race can still happen
1632 1635 # without the lock (with a narrower window), but it would no longer go
1633 1636 # undetected during the lock time refresh.
1634 1637 #
1635 1638 # The new schedule is as follow
1636 1639 #
1637 1640 # 1) filecache logic detect that `_bookmarks` needs to be computed
1638 1641 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1639 1642 # 3) We force `changelog` filecache to be tested
1640 1643 # 4) cachestat for `changelog` are captured (for changelog)
1641 1644 # 5) `_bookmarks` is computed and cached
1642 1645 #
1643 1646 # The step in (3) ensure we have a changelog at least as recent as the
1644 1647 # cache stat computed in (1). As a result at locking time:
1645 1648 # * if the changelog did not changed since (1) -> we can reuse the data
1646 1649 # * otherwise -> the bookmarks get refreshed.
1647 1650 self._refreshchangelog()
1648 1651 return bookmarks.bmstore(self)
1649 1652
1650 1653 def _refreshchangelog(self):
1651 1654 """make sure the in memory changelog match the on-disk one"""
1652 1655 if 'changelog' in vars(self) and self.currenttransaction() is None:
1653 1656 del self.changelog
1654 1657
1655 1658 @property
1656 1659 def _activebookmark(self):
1657 1660 return self._bookmarks.active
1658 1661
1659 1662 # _phasesets depend on changelog. what we need is to call
1660 1663 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1661 1664 # can't be easily expressed in filecache mechanism.
1662 1665 @storecache(b'phaseroots', b'00changelog.i')
1663 1666 def _phasecache(self):
1664 1667 return phases.phasecache(self, self._phasedefaults)
1665 1668
1666 1669 @storecache(b'obsstore')
1667 1670 def obsstore(self):
1668 1671 return obsolete.makestore(self.ui, self)
1669 1672
1670 1673 @storecache(b'00changelog.i')
1671 1674 def changelog(self):
1672 1675 # load dirstate before changelog to avoid race see issue6303
1673 1676 self.dirstate.prefetch_parents()
1674 1677 return self.store.changelog(
1675 1678 txnutil.mayhavepending(self.root),
1676 1679 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1677 1680 )
1678 1681
1679 1682 @storecache(b'00manifest.i')
1680 1683 def manifestlog(self):
1681 1684 return self.store.manifestlog(self, self._storenarrowmatch)
1682 1685
1683 1686 @repofilecache(b'dirstate')
1684 1687 def dirstate(self):
1685 1688 return self._makedirstate()
1686 1689
1687 1690 def _makedirstate(self):
1688 1691 """Extension point for wrapping the dirstate per-repo."""
1689 1692 sparsematchfn = lambda: sparse.matcher(self)
1690 1693
1691 1694 return dirstate.dirstate(
1692 1695 self.vfs,
1693 1696 self.ui,
1694 1697 self.root,
1695 1698 self._dirstatevalidate,
1696 1699 sparsematchfn,
1697 1700 self.nodeconstants,
1698 1701 )
1699 1702
1700 1703 def _dirstatevalidate(self, node):
1701 1704 try:
1702 1705 self.changelog.rev(node)
1703 1706 return node
1704 1707 except error.LookupError:
1705 1708 if not self._dirstatevalidatewarned:
1706 1709 self._dirstatevalidatewarned = True
1707 1710 self.ui.warn(
1708 1711 _(b"warning: ignoring unknown working parent %s!\n")
1709 1712 % short(node)
1710 1713 )
1711 1714 return self.nullid
1712 1715
1713 1716 @storecache(narrowspec.FILENAME)
1714 1717 def narrowpats(self):
1715 1718 """matcher patterns for this repository's narrowspec
1716 1719
1717 1720 A tuple of (includes, excludes).
1718 1721 """
1719 1722 return narrowspec.load(self)
1720 1723
1721 1724 @storecache(narrowspec.FILENAME)
1722 1725 def _storenarrowmatch(self):
1723 1726 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1724 1727 return matchmod.always()
1725 1728 include, exclude = self.narrowpats
1726 1729 return narrowspec.match(self.root, include=include, exclude=exclude)
1727 1730
1728 1731 @storecache(narrowspec.FILENAME)
1729 1732 def _narrowmatch(self):
1730 1733 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1731 1734 return matchmod.always()
1732 1735 narrowspec.checkworkingcopynarrowspec(self)
1733 1736 include, exclude = self.narrowpats
1734 1737 return narrowspec.match(self.root, include=include, exclude=exclude)
1735 1738
1736 1739 def narrowmatch(self, match=None, includeexact=False):
1737 1740 """matcher corresponding the the repo's narrowspec
1738 1741
1739 1742 If `match` is given, then that will be intersected with the narrow
1740 1743 matcher.
1741 1744
1742 1745 If `includeexact` is True, then any exact matches from `match` will
1743 1746 be included even if they're outside the narrowspec.
1744 1747 """
1745 1748 if match:
1746 1749 if includeexact and not self._narrowmatch.always():
1747 1750 # do not exclude explicitly-specified paths so that they can
1748 1751 # be warned later on
1749 1752 em = matchmod.exact(match.files())
1750 1753 nm = matchmod.unionmatcher([self._narrowmatch, em])
1751 1754 return matchmod.intersectmatchers(match, nm)
1752 1755 return matchmod.intersectmatchers(match, self._narrowmatch)
1753 1756 return self._narrowmatch
1754 1757
1755 1758 def setnarrowpats(self, newincludes, newexcludes):
1756 1759 narrowspec.save(self, newincludes, newexcludes)
1757 1760 self.invalidate(clearfilecache=True)
1758 1761
1759 1762 @unfilteredpropertycache
1760 1763 def _quick_access_changeid_null(self):
1761 1764 return {
1762 1765 b'null': (nullrev, self.nodeconstants.nullid),
1763 1766 nullrev: (nullrev, self.nodeconstants.nullid),
1764 1767 self.nullid: (nullrev, self.nullid),
1765 1768 }
1766 1769
1767 1770 @unfilteredpropertycache
1768 1771 def _quick_access_changeid_wc(self):
1769 1772 # also fast path access to the working copy parents
1770 1773 # however, only do it for filter that ensure wc is visible.
1771 1774 quick = self._quick_access_changeid_null.copy()
1772 1775 cl = self.unfiltered().changelog
1773 1776 for node in self.dirstate.parents():
1774 1777 if node == self.nullid:
1775 1778 continue
1776 1779 rev = cl.index.get_rev(node)
1777 1780 if rev is None:
1778 1781 # unknown working copy parent case:
1779 1782 #
1780 1783 # skip the fast path and let higher code deal with it
1781 1784 continue
1782 1785 pair = (rev, node)
1783 1786 quick[rev] = pair
1784 1787 quick[node] = pair
1785 1788 # also add the parents of the parents
1786 1789 for r in cl.parentrevs(rev):
1787 1790 if r == nullrev:
1788 1791 continue
1789 1792 n = cl.node(r)
1790 1793 pair = (r, n)
1791 1794 quick[r] = pair
1792 1795 quick[n] = pair
1793 1796 p1node = self.dirstate.p1()
1794 1797 if p1node != self.nullid:
1795 1798 quick[b'.'] = quick[p1node]
1796 1799 return quick
1797 1800
1798 1801 @unfilteredmethod
1799 1802 def _quick_access_changeid_invalidate(self):
1800 1803 if '_quick_access_changeid_wc' in vars(self):
1801 1804 del self.__dict__['_quick_access_changeid_wc']
1802 1805
1803 1806 @property
1804 1807 def _quick_access_changeid(self):
1805 1808 """an helper dictionnary for __getitem__ calls
1806 1809
1807 1810 This contains a list of symbol we can recognise right away without
1808 1811 further processing.
1809 1812 """
1810 1813 if self.filtername in repoview.filter_has_wc:
1811 1814 return self._quick_access_changeid_wc
1812 1815 return self._quick_access_changeid_null
1813 1816
1814 1817 def __getitem__(self, changeid):
1815 1818 # dealing with special cases
1816 1819 if changeid is None:
1817 1820 return context.workingctx(self)
1818 1821 if isinstance(changeid, context.basectx):
1819 1822 return changeid
1820 1823
1821 1824 # dealing with multiple revisions
1822 1825 if isinstance(changeid, slice):
1823 1826 # wdirrev isn't contiguous so the slice shouldn't include it
1824 1827 return [
1825 1828 self[i]
1826 1829 for i in pycompat.xrange(*changeid.indices(len(self)))
1827 1830 if i not in self.changelog.filteredrevs
1828 1831 ]
1829 1832
1830 1833 # dealing with some special values
1831 1834 quick_access = self._quick_access_changeid.get(changeid)
1832 1835 if quick_access is not None:
1833 1836 rev, node = quick_access
1834 1837 return context.changectx(self, rev, node, maybe_filtered=False)
1835 1838 if changeid == b'tip':
1836 1839 node = self.changelog.tip()
1837 1840 rev = self.changelog.rev(node)
1838 1841 return context.changectx(self, rev, node)
1839 1842
1840 1843 # dealing with arbitrary values
1841 1844 try:
1842 1845 if isinstance(changeid, int):
1843 1846 node = self.changelog.node(changeid)
1844 1847 rev = changeid
1845 1848 elif changeid == b'.':
1846 1849 # this is a hack to delay/avoid loading obsmarkers
1847 1850 # when we know that '.' won't be hidden
1848 1851 node = self.dirstate.p1()
1849 1852 rev = self.unfiltered().changelog.rev(node)
1850 1853 elif len(changeid) == self.nodeconstants.nodelen:
1851 1854 try:
1852 1855 node = changeid
1853 1856 rev = self.changelog.rev(changeid)
1854 1857 except error.FilteredLookupError:
1855 1858 changeid = hex(changeid) # for the error message
1856 1859 raise
1857 1860 except LookupError:
1858 1861 # check if it might have come from damaged dirstate
1859 1862 #
1860 1863 # XXX we could avoid the unfiltered if we had a recognizable
1861 1864 # exception for filtered changeset access
1862 1865 if (
1863 1866 self.local()
1864 1867 and changeid in self.unfiltered().dirstate.parents()
1865 1868 ):
1866 1869 msg = _(b"working directory has unknown parent '%s'!")
1867 1870 raise error.Abort(msg % short(changeid))
1868 1871 changeid = hex(changeid) # for the error message
1869 1872 raise
1870 1873
1871 1874 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1872 1875 node = bin(changeid)
1873 1876 rev = self.changelog.rev(node)
1874 1877 else:
1875 1878 raise error.ProgrammingError(
1876 1879 b"unsupported changeid '%s' of type %s"
1877 1880 % (changeid, pycompat.bytestr(type(changeid)))
1878 1881 )
1879 1882
1880 1883 return context.changectx(self, rev, node)
1881 1884
1882 1885 except (error.FilteredIndexError, error.FilteredLookupError):
1883 1886 raise error.FilteredRepoLookupError(
1884 1887 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1885 1888 )
1886 1889 except (IndexError, LookupError):
1887 1890 raise error.RepoLookupError(
1888 1891 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1889 1892 )
1890 1893 except error.WdirUnsupported:
1891 1894 return context.workingctx(self)
1892 1895
1893 1896 def __contains__(self, changeid):
1894 1897 """True if the given changeid exists"""
1895 1898 try:
1896 1899 self[changeid]
1897 1900 return True
1898 1901 except error.RepoLookupError:
1899 1902 return False
1900 1903
1901 1904 def __nonzero__(self):
1902 1905 return True
1903 1906
1904 1907 __bool__ = __nonzero__
1905 1908
1906 1909 def __len__(self):
1907 1910 # no need to pay the cost of repoview.changelog
1908 1911 unfi = self.unfiltered()
1909 1912 return len(unfi.changelog)
1910 1913
1911 1914 def __iter__(self):
1912 1915 return iter(self.changelog)
1913 1916
1914 1917 def revs(self, expr, *args):
1915 1918 """Find revisions matching a revset.
1916 1919
1917 1920 The revset is specified as a string ``expr`` that may contain
1918 1921 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1919 1922
1920 1923 Revset aliases from the configuration are not expanded. To expand
1921 1924 user aliases, consider calling ``scmutil.revrange()`` or
1922 1925 ``repo.anyrevs([expr], user=True)``.
1923 1926
1924 1927 Returns a smartset.abstractsmartset, which is a list-like interface
1925 1928 that contains integer revisions.
1926 1929 """
1927 1930 tree = revsetlang.spectree(expr, *args)
1928 1931 return revset.makematcher(tree)(self)
1929 1932
1930 1933 def set(self, expr, *args):
1931 1934 """Find revisions matching a revset and emit changectx instances.
1932 1935
1933 1936 This is a convenience wrapper around ``revs()`` that iterates the
1934 1937 result and is a generator of changectx instances.
1935 1938
1936 1939 Revset aliases from the configuration are not expanded. To expand
1937 1940 user aliases, consider calling ``scmutil.revrange()``.
1938 1941 """
1939 1942 for r in self.revs(expr, *args):
1940 1943 yield self[r]
1941 1944
1942 1945 def anyrevs(self, specs, user=False, localalias=None):
1943 1946 """Find revisions matching one of the given revsets.
1944 1947
1945 1948 Revset aliases from the configuration are not expanded by default. To
1946 1949 expand user aliases, specify ``user=True``. To provide some local
1947 1950 definitions overriding user aliases, set ``localalias`` to
1948 1951 ``{name: definitionstring}``.
1949 1952 """
1950 1953 if specs == [b'null']:
1951 1954 return revset.baseset([nullrev])
1952 1955 if specs == [b'.']:
1953 1956 quick_data = self._quick_access_changeid.get(b'.')
1954 1957 if quick_data is not None:
1955 1958 return revset.baseset([quick_data[0]])
1956 1959 if user:
1957 1960 m = revset.matchany(
1958 1961 self.ui,
1959 1962 specs,
1960 1963 lookup=revset.lookupfn(self),
1961 1964 localalias=localalias,
1962 1965 )
1963 1966 else:
1964 1967 m = revset.matchany(None, specs, localalias=localalias)
1965 1968 return m(self)
1966 1969
1967 1970 def url(self):
1968 1971 return b'file:' + self.root
1969 1972
1970 1973 def hook(self, name, throw=False, **args):
1971 1974 """Call a hook, passing this repo instance.
1972 1975
1973 1976 This a convenience method to aid invoking hooks. Extensions likely
1974 1977 won't call this unless they have registered a custom hook or are
1975 1978 replacing code that is expected to call a hook.
1976 1979 """
1977 1980 return hook.hook(self.ui, self, name, throw, **args)
1978 1981
1979 1982 @filteredpropertycache
1980 1983 def _tagscache(self):
1981 1984 """Returns a tagscache object that contains various tags related
1982 1985 caches."""
1983 1986
1984 1987 # This simplifies its cache management by having one decorated
1985 1988 # function (this one) and the rest simply fetch things from it.
1986 1989 class tagscache(object):
1987 1990 def __init__(self):
1988 1991 # These two define the set of tags for this repository. tags
1989 1992 # maps tag name to node; tagtypes maps tag name to 'global' or
1990 1993 # 'local'. (Global tags are defined by .hgtags across all
1991 1994 # heads, and local tags are defined in .hg/localtags.)
1992 1995 # They constitute the in-memory cache of tags.
1993 1996 self.tags = self.tagtypes = None
1994 1997
1995 1998 self.nodetagscache = self.tagslist = None
1996 1999
1997 2000 cache = tagscache()
1998 2001 cache.tags, cache.tagtypes = self._findtags()
1999 2002
2000 2003 return cache
2001 2004
2002 2005 def tags(self):
2003 2006 '''return a mapping of tag to node'''
2004 2007 t = {}
2005 2008 if self.changelog.filteredrevs:
2006 2009 tags, tt = self._findtags()
2007 2010 else:
2008 2011 tags = self._tagscache.tags
2009 2012 rev = self.changelog.rev
2010 2013 for k, v in pycompat.iteritems(tags):
2011 2014 try:
2012 2015 # ignore tags to unknown nodes
2013 2016 rev(v)
2014 2017 t[k] = v
2015 2018 except (error.LookupError, ValueError):
2016 2019 pass
2017 2020 return t
2018 2021
2019 2022 def _findtags(self):
2020 2023 """Do the hard work of finding tags. Return a pair of dicts
2021 2024 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2022 2025 maps tag name to a string like \'global\' or \'local\'.
2023 2026 Subclasses or extensions are free to add their own tags, but
2024 2027 should be aware that the returned dicts will be retained for the
2025 2028 duration of the localrepo object."""
2026 2029
2027 2030 # XXX what tagtype should subclasses/extensions use? Currently
2028 2031 # mq and bookmarks add tags, but do not set the tagtype at all.
2029 2032 # Should each extension invent its own tag type? Should there
2030 2033 # be one tagtype for all such "virtual" tags? Or is the status
2031 2034 # quo fine?
2032 2035
2033 2036 # map tag name to (node, hist)
2034 2037 alltags = tagsmod.findglobaltags(self.ui, self)
2035 2038 # map tag name to tag type
2036 2039 tagtypes = {tag: b'global' for tag in alltags}
2037 2040
2038 2041 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2039 2042
2040 2043 # Build the return dicts. Have to re-encode tag names because
2041 2044 # the tags module always uses UTF-8 (in order not to lose info
2042 2045 # writing to the cache), but the rest of Mercurial wants them in
2043 2046 # local encoding.
2044 2047 tags = {}
2045 2048 for (name, (node, hist)) in pycompat.iteritems(alltags):
2046 2049 if node != self.nullid:
2047 2050 tags[encoding.tolocal(name)] = node
2048 2051 tags[b'tip'] = self.changelog.tip()
2049 2052 tagtypes = {
2050 2053 encoding.tolocal(name): value
2051 2054 for (name, value) in pycompat.iteritems(tagtypes)
2052 2055 }
2053 2056 return (tags, tagtypes)
2054 2057
2055 2058 def tagtype(self, tagname):
2056 2059 """
2057 2060 return the type of the given tag. result can be:
2058 2061
2059 2062 'local' : a local tag
2060 2063 'global' : a global tag
2061 2064 None : tag does not exist
2062 2065 """
2063 2066
2064 2067 return self._tagscache.tagtypes.get(tagname)
2065 2068
2066 2069 def tagslist(self):
2067 2070 '''return a list of tags ordered by revision'''
2068 2071 if not self._tagscache.tagslist:
2069 2072 l = []
2070 2073 for t, n in pycompat.iteritems(self.tags()):
2071 2074 l.append((self.changelog.rev(n), t, n))
2072 2075 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2073 2076
2074 2077 return self._tagscache.tagslist
2075 2078
2076 2079 def nodetags(self, node):
2077 2080 '''return the tags associated with a node'''
2078 2081 if not self._tagscache.nodetagscache:
2079 2082 nodetagscache = {}
2080 2083 for t, n in pycompat.iteritems(self._tagscache.tags):
2081 2084 nodetagscache.setdefault(n, []).append(t)
2082 2085 for tags in pycompat.itervalues(nodetagscache):
2083 2086 tags.sort()
2084 2087 self._tagscache.nodetagscache = nodetagscache
2085 2088 return self._tagscache.nodetagscache.get(node, [])
2086 2089
2087 2090 def nodebookmarks(self, node):
2088 2091 """return the list of bookmarks pointing to the specified node"""
2089 2092 return self._bookmarks.names(node)
2090 2093
2091 2094 def branchmap(self):
2092 2095 """returns a dictionary {branch: [branchheads]} with branchheads
2093 2096 ordered by increasing revision number"""
2094 2097 return self._branchcaches[self]
2095 2098
2096 2099 @unfilteredmethod
2097 2100 def revbranchcache(self):
2098 2101 if not self._revbranchcache:
2099 2102 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2100 2103 return self._revbranchcache
2101 2104
2102 2105 def register_changeset(self, rev, changelogrevision):
2103 2106 self.revbranchcache().setdata(rev, changelogrevision)
2104 2107
2105 2108 def branchtip(self, branch, ignoremissing=False):
2106 2109 """return the tip node for a given branch
2107 2110
2108 2111 If ignoremissing is True, then this method will not raise an error.
2109 2112 This is helpful for callers that only expect None for a missing branch
2110 2113 (e.g. namespace).
2111 2114
2112 2115 """
2113 2116 try:
2114 2117 return self.branchmap().branchtip(branch)
2115 2118 except KeyError:
2116 2119 if not ignoremissing:
2117 2120 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2118 2121 else:
2119 2122 pass
2120 2123
2121 2124 def lookup(self, key):
2122 2125 node = scmutil.revsymbol(self, key).node()
2123 2126 if node is None:
2124 2127 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2125 2128 return node
2126 2129
2127 2130 def lookupbranch(self, key):
2128 2131 if self.branchmap().hasbranch(key):
2129 2132 return key
2130 2133
2131 2134 return scmutil.revsymbol(self, key).branch()
2132 2135
2133 2136 def known(self, nodes):
2134 2137 cl = self.changelog
2135 2138 get_rev = cl.index.get_rev
2136 2139 filtered = cl.filteredrevs
2137 2140 result = []
2138 2141 for n in nodes:
2139 2142 r = get_rev(n)
2140 2143 resp = not (r is None or r in filtered)
2141 2144 result.append(resp)
2142 2145 return result
2143 2146
2144 2147 def local(self):
2145 2148 return self
2146 2149
2147 2150 def publishing(self):
2148 2151 # it's safe (and desirable) to trust the publish flag unconditionally
2149 2152 # so that we don't finalize changes shared between users via ssh or nfs
2150 2153 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2151 2154
2152 2155 def cancopy(self):
2153 2156 # so statichttprepo's override of local() works
2154 2157 if not self.local():
2155 2158 return False
2156 2159 if not self.publishing():
2157 2160 return True
2158 2161 # if publishing we can't copy if there is filtered content
2159 2162 return not self.filtered(b'visible').changelog.filteredrevs
2160 2163
2161 2164 def shared(self):
2162 2165 '''the type of shared repository (None if not shared)'''
2163 2166 if self.sharedpath != self.path:
2164 2167 return b'store'
2165 2168 return None
2166 2169
2167 2170 def wjoin(self, f, *insidef):
2168 2171 return self.vfs.reljoin(self.root, f, *insidef)
2169 2172
2170 2173 def setparents(self, p1, p2=None):
2171 2174 if p2 is None:
2172 2175 p2 = self.nullid
2173 2176 self[None].setparents(p1, p2)
2174 2177 self._quick_access_changeid_invalidate()
2175 2178
2176 2179 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2177 2180 """changeid must be a changeset revision, if specified.
2178 2181 fileid can be a file revision or node."""
2179 2182 return context.filectx(
2180 2183 self, path, changeid, fileid, changectx=changectx
2181 2184 )
2182 2185
2183 2186 def getcwd(self):
2184 2187 return self.dirstate.getcwd()
2185 2188
2186 2189 def pathto(self, f, cwd=None):
2187 2190 return self.dirstate.pathto(f, cwd)
2188 2191
2189 2192 def _loadfilter(self, filter):
2190 2193 if filter not in self._filterpats:
2191 2194 l = []
2192 2195 for pat, cmd in self.ui.configitems(filter):
2193 2196 if cmd == b'!':
2194 2197 continue
2195 2198 mf = matchmod.match(self.root, b'', [pat])
2196 2199 fn = None
2197 2200 params = cmd
2198 2201 for name, filterfn in pycompat.iteritems(self._datafilters):
2199 2202 if cmd.startswith(name):
2200 2203 fn = filterfn
2201 2204 params = cmd[len(name) :].lstrip()
2202 2205 break
2203 2206 if not fn:
2204 2207 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2205 2208 fn.__name__ = 'commandfilter'
2206 2209 # Wrap old filters not supporting keyword arguments
2207 2210 if not pycompat.getargspec(fn)[2]:
2208 2211 oldfn = fn
2209 2212 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2210 2213 fn.__name__ = 'compat-' + oldfn.__name__
2211 2214 l.append((mf, fn, params))
2212 2215 self._filterpats[filter] = l
2213 2216 return self._filterpats[filter]
2214 2217
2215 2218 def _filter(self, filterpats, filename, data):
2216 2219 for mf, fn, cmd in filterpats:
2217 2220 if mf(filename):
2218 2221 self.ui.debug(
2219 2222 b"filtering %s through %s\n"
2220 2223 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2221 2224 )
2222 2225 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2223 2226 break
2224 2227
2225 2228 return data
2226 2229
2227 2230 @unfilteredpropertycache
2228 2231 def _encodefilterpats(self):
2229 2232 return self._loadfilter(b'encode')
2230 2233
2231 2234 @unfilteredpropertycache
2232 2235 def _decodefilterpats(self):
2233 2236 return self._loadfilter(b'decode')
2234 2237
2235 2238 def adddatafilter(self, name, filter):
2236 2239 self._datafilters[name] = filter
2237 2240
2238 2241 def wread(self, filename):
2239 2242 if self.wvfs.islink(filename):
2240 2243 data = self.wvfs.readlink(filename)
2241 2244 else:
2242 2245 data = self.wvfs.read(filename)
2243 2246 return self._filter(self._encodefilterpats, filename, data)
2244 2247
2245 2248 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2246 2249 """write ``data`` into ``filename`` in the working directory
2247 2250
2248 2251 This returns length of written (maybe decoded) data.
2249 2252 """
2250 2253 data = self._filter(self._decodefilterpats, filename, data)
2251 2254 if b'l' in flags:
2252 2255 self.wvfs.symlink(data, filename)
2253 2256 else:
2254 2257 self.wvfs.write(
2255 2258 filename, data, backgroundclose=backgroundclose, **kwargs
2256 2259 )
2257 2260 if b'x' in flags:
2258 2261 self.wvfs.setflags(filename, False, True)
2259 2262 else:
2260 2263 self.wvfs.setflags(filename, False, False)
2261 2264 return len(data)
2262 2265
2263 2266 def wwritedata(self, filename, data):
2264 2267 return self._filter(self._decodefilterpats, filename, data)
2265 2268
2266 2269 def currenttransaction(self):
2267 2270 """return the current transaction or None if non exists"""
2268 2271 if self._transref:
2269 2272 tr = self._transref()
2270 2273 else:
2271 2274 tr = None
2272 2275
2273 2276 if tr and tr.running():
2274 2277 return tr
2275 2278 return None
2276 2279
2277 2280 def transaction(self, desc, report=None):
2278 2281 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2279 2282 b'devel', b'check-locks'
2280 2283 ):
2281 2284 if self._currentlock(self._lockref) is None:
2282 2285 raise error.ProgrammingError(b'transaction requires locking')
2283 2286 tr = self.currenttransaction()
2284 2287 if tr is not None:
2285 2288 return tr.nest(name=desc)
2286 2289
2287 2290 # abort here if the journal already exists
2288 2291 if self.svfs.exists(b"journal"):
2289 2292 raise error.RepoError(
2290 2293 _(b"abandoned transaction found"),
2291 2294 hint=_(b"run 'hg recover' to clean up transaction"),
2292 2295 )
2293 2296
2294 2297 idbase = b"%.40f#%f" % (random.random(), time.time())
2295 2298 ha = hex(hashutil.sha1(idbase).digest())
2296 2299 txnid = b'TXN:' + ha
2297 2300 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2298 2301
2299 2302 self._writejournal(desc)
2300 2303 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2301 2304 if report:
2302 2305 rp = report
2303 2306 else:
2304 2307 rp = self.ui.warn
2305 2308 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2306 2309 # we must avoid cyclic reference between repo and transaction.
2307 2310 reporef = weakref.ref(self)
2308 2311 # Code to track tag movement
2309 2312 #
2310 2313 # Since tags are all handled as file content, it is actually quite hard
2311 2314 # to track these movement from a code perspective. So we fallback to a
2312 2315 # tracking at the repository level. One could envision to track changes
2313 2316 # to the '.hgtags' file through changegroup apply but that fails to
2314 2317 # cope with case where transaction expose new heads without changegroup
2315 2318 # being involved (eg: phase movement).
2316 2319 #
2317 2320 # For now, We gate the feature behind a flag since this likely comes
2318 2321 # with performance impacts. The current code run more often than needed
2319 2322 # and do not use caches as much as it could. The current focus is on
2320 2323 # the behavior of the feature so we disable it by default. The flag
2321 2324 # will be removed when we are happy with the performance impact.
2322 2325 #
2323 2326 # Once this feature is no longer experimental move the following
2324 2327 # documentation to the appropriate help section:
2325 2328 #
2326 2329 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2327 2330 # tags (new or changed or deleted tags). In addition the details of
2328 2331 # these changes are made available in a file at:
2329 2332 # ``REPOROOT/.hg/changes/tags.changes``.
2330 2333 # Make sure you check for HG_TAG_MOVED before reading that file as it
2331 2334 # might exist from a previous transaction even if no tag were touched
2332 2335 # in this one. Changes are recorded in a line base format::
2333 2336 #
2334 2337 # <action> <hex-node> <tag-name>\n
2335 2338 #
2336 2339 # Actions are defined as follow:
2337 2340 # "-R": tag is removed,
2338 2341 # "+A": tag is added,
2339 2342 # "-M": tag is moved (old value),
2340 2343 # "+M": tag is moved (new value),
2341 2344 tracktags = lambda x: None
2342 2345 # experimental config: experimental.hook-track-tags
2343 2346 shouldtracktags = self.ui.configbool(
2344 2347 b'experimental', b'hook-track-tags'
2345 2348 )
2346 2349 if desc != b'strip' and shouldtracktags:
2347 2350 oldheads = self.changelog.headrevs()
2348 2351
2349 2352 def tracktags(tr2):
2350 2353 repo = reporef()
2351 2354 assert repo is not None # help pytype
2352 2355 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2353 2356 newheads = repo.changelog.headrevs()
2354 2357 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2355 2358 # notes: we compare lists here.
2356 2359 # As we do it only once buiding set would not be cheaper
2357 2360 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2358 2361 if changes:
2359 2362 tr2.hookargs[b'tag_moved'] = b'1'
2360 2363 with repo.vfs(
2361 2364 b'changes/tags.changes', b'w', atomictemp=True
2362 2365 ) as changesfile:
2363 2366 # note: we do not register the file to the transaction
2364 2367 # because we needs it to still exist on the transaction
2365 2368 # is close (for txnclose hooks)
2366 2369 tagsmod.writediff(changesfile, changes)
2367 2370
2368 2371 def validate(tr2):
2369 2372 """will run pre-closing hooks"""
2370 2373 # XXX the transaction API is a bit lacking here so we take a hacky
2371 2374 # path for now
2372 2375 #
2373 2376 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2374 2377 # dict is copied before these run. In addition we needs the data
2375 2378 # available to in memory hooks too.
2376 2379 #
2377 2380 # Moreover, we also need to make sure this runs before txnclose
2378 2381 # hooks and there is no "pending" mechanism that would execute
2379 2382 # logic only if hooks are about to run.
2380 2383 #
2381 2384 # Fixing this limitation of the transaction is also needed to track
2382 2385 # other families of changes (bookmarks, phases, obsolescence).
2383 2386 #
2384 2387 # This will have to be fixed before we remove the experimental
2385 2388 # gating.
2386 2389 tracktags(tr2)
2387 2390 repo = reporef()
2388 2391 assert repo is not None # help pytype
2389 2392
2390 2393 singleheadopt = (b'experimental', b'single-head-per-branch')
2391 2394 singlehead = repo.ui.configbool(*singleheadopt)
2392 2395 if singlehead:
2393 2396 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2394 2397 accountclosed = singleheadsub.get(
2395 2398 b"account-closed-heads", False
2396 2399 )
2397 2400 if singleheadsub.get(b"public-changes-only", False):
2398 2401 filtername = b"immutable"
2399 2402 else:
2400 2403 filtername = b"visible"
2401 2404 scmutil.enforcesinglehead(
2402 2405 repo, tr2, desc, accountclosed, filtername
2403 2406 )
2404 2407 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2405 2408 for name, (old, new) in sorted(
2406 2409 tr.changes[b'bookmarks'].items()
2407 2410 ):
2408 2411 args = tr.hookargs.copy()
2409 2412 args.update(bookmarks.preparehookargs(name, old, new))
2410 2413 repo.hook(
2411 2414 b'pretxnclose-bookmark',
2412 2415 throw=True,
2413 2416 **pycompat.strkwargs(args)
2414 2417 )
2415 2418 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2416 2419 cl = repo.unfiltered().changelog
2417 2420 for revs, (old, new) in tr.changes[b'phases']:
2418 2421 for rev in revs:
2419 2422 args = tr.hookargs.copy()
2420 2423 node = hex(cl.node(rev))
2421 2424 args.update(phases.preparehookargs(node, old, new))
2422 2425 repo.hook(
2423 2426 b'pretxnclose-phase',
2424 2427 throw=True,
2425 2428 **pycompat.strkwargs(args)
2426 2429 )
2427 2430
2428 2431 repo.hook(
2429 2432 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2430 2433 )
2431 2434
2432 2435 def releasefn(tr, success):
2433 2436 repo = reporef()
2434 2437 if repo is None:
2435 2438 # If the repo has been GC'd (and this release function is being
2436 2439 # called from transaction.__del__), there's not much we can do,
2437 2440 # so just leave the unfinished transaction there and let the
2438 2441 # user run `hg recover`.
2439 2442 return
2440 2443 if success:
2441 2444 # this should be explicitly invoked here, because
2442 2445 # in-memory changes aren't written out at closing
2443 2446 # transaction, if tr.addfilegenerator (via
2444 2447 # dirstate.write or so) isn't invoked while
2445 2448 # transaction running
2446 2449 repo.dirstate.write(None)
2447 2450 else:
2448 2451 # discard all changes (including ones already written
2449 2452 # out) in this transaction
2450 2453 narrowspec.restorebackup(self, b'journal.narrowspec')
2451 2454 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2452 2455 repo.dirstate.restorebackup(None, b'journal.dirstate')
2453 2456
2454 2457 repo.invalidate(clearfilecache=True)
2455 2458
2456 2459 tr = transaction.transaction(
2457 2460 rp,
2458 2461 self.svfs,
2459 2462 vfsmap,
2460 2463 b"journal",
2461 2464 b"undo",
2462 2465 aftertrans(renames),
2463 2466 self.store.createmode,
2464 2467 validator=validate,
2465 2468 releasefn=releasefn,
2466 2469 checkambigfiles=_cachedfiles,
2467 2470 name=desc,
2468 2471 )
2469 2472 tr.changes[b'origrepolen'] = len(self)
2470 2473 tr.changes[b'obsmarkers'] = set()
2471 2474 tr.changes[b'phases'] = []
2472 2475 tr.changes[b'bookmarks'] = {}
2473 2476
2474 2477 tr.hookargs[b'txnid'] = txnid
2475 2478 tr.hookargs[b'txnname'] = desc
2476 2479 tr.hookargs[b'changes'] = tr.changes
2477 2480 # note: writing the fncache only during finalize mean that the file is
2478 2481 # outdated when running hooks. As fncache is used for streaming clone,
2479 2482 # this is not expected to break anything that happen during the hooks.
2480 2483 tr.addfinalize(b'flush-fncache', self.store.write)
2481 2484
2482 2485 def txnclosehook(tr2):
2483 2486 """To be run if transaction is successful, will schedule a hook run"""
2484 2487 # Don't reference tr2 in hook() so we don't hold a reference.
2485 2488 # This reduces memory consumption when there are multiple
2486 2489 # transactions per lock. This can likely go away if issue5045
2487 2490 # fixes the function accumulation.
2488 2491 hookargs = tr2.hookargs
2489 2492
2490 2493 def hookfunc(unused_success):
2491 2494 repo = reporef()
2492 2495 assert repo is not None # help pytype
2493 2496
2494 2497 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2495 2498 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2496 2499 for name, (old, new) in bmchanges:
2497 2500 args = tr.hookargs.copy()
2498 2501 args.update(bookmarks.preparehookargs(name, old, new))
2499 2502 repo.hook(
2500 2503 b'txnclose-bookmark',
2501 2504 throw=False,
2502 2505 **pycompat.strkwargs(args)
2503 2506 )
2504 2507
2505 2508 if hook.hashook(repo.ui, b'txnclose-phase'):
2506 2509 cl = repo.unfiltered().changelog
2507 2510 phasemv = sorted(
2508 2511 tr.changes[b'phases'], key=lambda r: r[0][0]
2509 2512 )
2510 2513 for revs, (old, new) in phasemv:
2511 2514 for rev in revs:
2512 2515 args = tr.hookargs.copy()
2513 2516 node = hex(cl.node(rev))
2514 2517 args.update(phases.preparehookargs(node, old, new))
2515 2518 repo.hook(
2516 2519 b'txnclose-phase',
2517 2520 throw=False,
2518 2521 **pycompat.strkwargs(args)
2519 2522 )
2520 2523
2521 2524 repo.hook(
2522 2525 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2523 2526 )
2524 2527
2525 2528 repo = reporef()
2526 2529 assert repo is not None # help pytype
2527 2530 repo._afterlock(hookfunc)
2528 2531
2529 2532 tr.addfinalize(b'txnclose-hook', txnclosehook)
2530 2533 # Include a leading "-" to make it happen before the transaction summary
2531 2534 # reports registered via scmutil.registersummarycallback() whose names
2532 2535 # are 00-txnreport etc. That way, the caches will be warm when the
2533 2536 # callbacks run.
2534 2537 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2535 2538
2536 2539 def txnaborthook(tr2):
2537 2540 """To be run if transaction is aborted"""
2538 2541 repo = reporef()
2539 2542 assert repo is not None # help pytype
2540 2543 repo.hook(
2541 2544 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2542 2545 )
2543 2546
2544 2547 tr.addabort(b'txnabort-hook', txnaborthook)
2545 2548 # avoid eager cache invalidation. in-memory data should be identical
2546 2549 # to stored data if transaction has no error.
2547 2550 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2548 2551 self._transref = weakref.ref(tr)
2549 2552 scmutil.registersummarycallback(self, tr, desc)
2550 2553 return tr
2551 2554
2552 2555 def _journalfiles(self):
2553 2556 return (
2554 2557 (self.svfs, b'journal'),
2555 2558 (self.svfs, b'journal.narrowspec'),
2556 2559 (self.vfs, b'journal.narrowspec.dirstate'),
2557 2560 (self.vfs, b'journal.dirstate'),
2558 2561 (self.vfs, b'journal.branch'),
2559 2562 (self.vfs, b'journal.desc'),
2560 2563 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2561 2564 (self.svfs, b'journal.phaseroots'),
2562 2565 )
2563 2566
2564 2567 def undofiles(self):
2565 2568 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2566 2569
2567 2570 @unfilteredmethod
2568 2571 def _writejournal(self, desc):
2569 2572 self.dirstate.savebackup(None, b'journal.dirstate')
2570 2573 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2571 2574 narrowspec.savebackup(self, b'journal.narrowspec')
2572 2575 self.vfs.write(
2573 2576 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2574 2577 )
2575 2578 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2576 2579 bookmarksvfs = bookmarks.bookmarksvfs(self)
2577 2580 bookmarksvfs.write(
2578 2581 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2579 2582 )
2580 2583 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2581 2584
2582 2585 def recover(self):
2583 2586 with self.lock():
2584 2587 if self.svfs.exists(b"journal"):
2585 2588 self.ui.status(_(b"rolling back interrupted transaction\n"))
2586 2589 vfsmap = {
2587 2590 b'': self.svfs,
2588 2591 b'plain': self.vfs,
2589 2592 }
2590 2593 transaction.rollback(
2591 2594 self.svfs,
2592 2595 vfsmap,
2593 2596 b"journal",
2594 2597 self.ui.warn,
2595 2598 checkambigfiles=_cachedfiles,
2596 2599 )
2597 2600 self.invalidate()
2598 2601 return True
2599 2602 else:
2600 2603 self.ui.warn(_(b"no interrupted transaction available\n"))
2601 2604 return False
2602 2605
2603 2606 def rollback(self, dryrun=False, force=False):
2604 2607 wlock = lock = dsguard = None
2605 2608 try:
2606 2609 wlock = self.wlock()
2607 2610 lock = self.lock()
2608 2611 if self.svfs.exists(b"undo"):
2609 2612 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2610 2613
2611 2614 return self._rollback(dryrun, force, dsguard)
2612 2615 else:
2613 2616 self.ui.warn(_(b"no rollback information available\n"))
2614 2617 return 1
2615 2618 finally:
2616 2619 release(dsguard, lock, wlock)
2617 2620
2618 2621 @unfilteredmethod # Until we get smarter cache management
2619 2622 def _rollback(self, dryrun, force, dsguard):
2620 2623 ui = self.ui
2621 2624 try:
2622 2625 args = self.vfs.read(b'undo.desc').splitlines()
2623 2626 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2624 2627 if len(args) >= 3:
2625 2628 detail = args[2]
2626 2629 oldtip = oldlen - 1
2627 2630
2628 2631 if detail and ui.verbose:
2629 2632 msg = _(
2630 2633 b'repository tip rolled back to revision %d'
2631 2634 b' (undo %s: %s)\n'
2632 2635 ) % (oldtip, desc, detail)
2633 2636 else:
2634 2637 msg = _(
2635 2638 b'repository tip rolled back to revision %d (undo %s)\n'
2636 2639 ) % (oldtip, desc)
2637 2640 except IOError:
2638 2641 msg = _(b'rolling back unknown transaction\n')
2639 2642 desc = None
2640 2643
2641 2644 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2642 2645 raise error.Abort(
2643 2646 _(
2644 2647 b'rollback of last commit while not checked out '
2645 2648 b'may lose data'
2646 2649 ),
2647 2650 hint=_(b'use -f to force'),
2648 2651 )
2649 2652
2650 2653 ui.status(msg)
2651 2654 if dryrun:
2652 2655 return 0
2653 2656
2654 2657 parents = self.dirstate.parents()
2655 2658 self.destroying()
2656 2659 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2657 2660 transaction.rollback(
2658 2661 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2659 2662 )
2660 2663 bookmarksvfs = bookmarks.bookmarksvfs(self)
2661 2664 if bookmarksvfs.exists(b'undo.bookmarks'):
2662 2665 bookmarksvfs.rename(
2663 2666 b'undo.bookmarks', b'bookmarks', checkambig=True
2664 2667 )
2665 2668 if self.svfs.exists(b'undo.phaseroots'):
2666 2669 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2667 2670 self.invalidate()
2668 2671
2669 2672 has_node = self.changelog.index.has_node
2670 2673 parentgone = any(not has_node(p) for p in parents)
2671 2674 if parentgone:
2672 2675 # prevent dirstateguard from overwriting already restored one
2673 2676 dsguard.close()
2674 2677
2675 2678 narrowspec.restorebackup(self, b'undo.narrowspec')
2676 2679 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2677 2680 self.dirstate.restorebackup(None, b'undo.dirstate')
2678 2681 try:
2679 2682 branch = self.vfs.read(b'undo.branch')
2680 2683 self.dirstate.setbranch(encoding.tolocal(branch))
2681 2684 except IOError:
2682 2685 ui.warn(
2683 2686 _(
2684 2687 b'named branch could not be reset: '
2685 2688 b'current branch is still \'%s\'\n'
2686 2689 )
2687 2690 % self.dirstate.branch()
2688 2691 )
2689 2692
2690 2693 parents = tuple([p.rev() for p in self[None].parents()])
2691 2694 if len(parents) > 1:
2692 2695 ui.status(
2693 2696 _(
2694 2697 b'working directory now based on '
2695 2698 b'revisions %d and %d\n'
2696 2699 )
2697 2700 % parents
2698 2701 )
2699 2702 else:
2700 2703 ui.status(
2701 2704 _(b'working directory now based on revision %d\n') % parents
2702 2705 )
2703 2706 mergestatemod.mergestate.clean(self)
2704 2707
2705 2708 # TODO: if we know which new heads may result from this rollback, pass
2706 2709 # them to destroy(), which will prevent the branchhead cache from being
2707 2710 # invalidated.
2708 2711 self.destroyed()
2709 2712 return 0
2710 2713
2711 2714 def _buildcacheupdater(self, newtransaction):
2712 2715 """called during transaction to build the callback updating cache
2713 2716
2714 2717 Lives on the repository to help extension who might want to augment
2715 2718 this logic. For this purpose, the created transaction is passed to the
2716 2719 method.
2717 2720 """
2718 2721 # we must avoid cyclic reference between repo and transaction.
2719 2722 reporef = weakref.ref(self)
2720 2723
2721 2724 def updater(tr):
2722 2725 repo = reporef()
2723 2726 assert repo is not None # help pytype
2724 2727 repo.updatecaches(tr)
2725 2728
2726 2729 return updater
2727 2730
2728 2731 @unfilteredmethod
2729 2732 def updatecaches(self, tr=None, full=False):
2730 2733 """warm appropriate caches
2731 2734
2732 2735 If this function is called after a transaction closed. The transaction
2733 2736 will be available in the 'tr' argument. This can be used to selectively
2734 2737 update caches relevant to the changes in that transaction.
2735 2738
2736 2739 If 'full' is set, make sure all caches the function knows about have
2737 2740 up-to-date data. Even the ones usually loaded more lazily.
2738 2741
2739 2742 The `full` argument can take a special "post-clone" value. In this case
2740 2743 the cache warming is made after a clone and of the slower cache might
2741 2744 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2742 2745 as we plan for a cleaner way to deal with this for 5.9.
2743 2746 """
2744 2747 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2745 2748 # During strip, many caches are invalid but
2746 2749 # later call to `destroyed` will refresh them.
2747 2750 return
2748 2751
2749 2752 if tr is None or tr.changes[b'origrepolen'] < len(self):
2750 2753 # accessing the 'served' branchmap should refresh all the others,
2751 2754 self.ui.debug(b'updating the branch cache\n')
2752 2755 self.filtered(b'served').branchmap()
2753 2756 self.filtered(b'served.hidden').branchmap()
2754 2757
2755 2758 if full:
2756 2759 unfi = self.unfiltered()
2757 2760
2758 2761 self.changelog.update_caches(transaction=tr)
2759 2762 self.manifestlog.update_caches(transaction=tr)
2760 2763
2761 2764 rbc = unfi.revbranchcache()
2762 2765 for r in unfi.changelog:
2763 2766 rbc.branchinfo(r)
2764 2767 rbc.write()
2765 2768
2766 2769 # ensure the working copy parents are in the manifestfulltextcache
2767 2770 for ctx in self[b'.'].parents():
2768 2771 ctx.manifest() # accessing the manifest is enough
2769 2772
2770 2773 if not full == b"post-clone":
2771 2774 # accessing fnode cache warms the cache
2772 2775 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2773 2776 # accessing tags warm the cache
2774 2777 self.tags()
2775 2778 self.filtered(b'served').tags()
2776 2779
2777 2780 # The `full` arg is documented as updating even the lazily-loaded
2778 2781 # caches immediately, so we're forcing a write to cause these caches
2779 2782 # to be warmed up even if they haven't explicitly been requested
2780 2783 # yet (if they've never been used by hg, they won't ever have been
2781 2784 # written, even if they're a subset of another kind of cache that
2782 2785 # *has* been used).
2783 2786 for filt in repoview.filtertable.keys():
2784 2787 filtered = self.filtered(filt)
2785 2788 filtered.branchmap().write(filtered)
2786 2789
2787 2790 def invalidatecaches(self):
2788 2791
2789 2792 if '_tagscache' in vars(self):
2790 2793 # can't use delattr on proxy
2791 2794 del self.__dict__['_tagscache']
2792 2795
2793 2796 self._branchcaches.clear()
2794 2797 self.invalidatevolatilesets()
2795 2798 self._sparsesignaturecache.clear()
2796 2799
2797 2800 def invalidatevolatilesets(self):
2798 2801 self.filteredrevcache.clear()
2799 2802 obsolete.clearobscaches(self)
2800 2803 self._quick_access_changeid_invalidate()
2801 2804
2802 2805 def invalidatedirstate(self):
2803 2806 """Invalidates the dirstate, causing the next call to dirstate
2804 2807 to check if it was modified since the last time it was read,
2805 2808 rereading it if it has.
2806 2809
2807 2810 This is different to dirstate.invalidate() that it doesn't always
2808 2811 rereads the dirstate. Use dirstate.invalidate() if you want to
2809 2812 explicitly read the dirstate again (i.e. restoring it to a previous
2810 2813 known good state)."""
2811 2814 if hasunfilteredcache(self, 'dirstate'):
2812 2815 for k in self.dirstate._filecache:
2813 2816 try:
2814 2817 delattr(self.dirstate, k)
2815 2818 except AttributeError:
2816 2819 pass
2817 2820 delattr(self.unfiltered(), 'dirstate')
2818 2821
2819 2822 def invalidate(self, clearfilecache=False):
2820 2823 """Invalidates both store and non-store parts other than dirstate
2821 2824
2822 2825 If a transaction is running, invalidation of store is omitted,
2823 2826 because discarding in-memory changes might cause inconsistency
2824 2827 (e.g. incomplete fncache causes unintentional failure, but
2825 2828 redundant one doesn't).
2826 2829 """
2827 2830 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2828 2831 for k in list(self._filecache.keys()):
2829 2832 # dirstate is invalidated separately in invalidatedirstate()
2830 2833 if k == b'dirstate':
2831 2834 continue
2832 2835 if (
2833 2836 k == b'changelog'
2834 2837 and self.currenttransaction()
2835 2838 and self.changelog._delayed
2836 2839 ):
2837 2840 # The changelog object may store unwritten revisions. We don't
2838 2841 # want to lose them.
2839 2842 # TODO: Solve the problem instead of working around it.
2840 2843 continue
2841 2844
2842 2845 if clearfilecache:
2843 2846 del self._filecache[k]
2844 2847 try:
2845 2848 delattr(unfiltered, k)
2846 2849 except AttributeError:
2847 2850 pass
2848 2851 self.invalidatecaches()
2849 2852 if not self.currenttransaction():
2850 2853 # TODO: Changing contents of store outside transaction
2851 2854 # causes inconsistency. We should make in-memory store
2852 2855 # changes detectable, and abort if changed.
2853 2856 self.store.invalidatecaches()
2854 2857
2855 2858 def invalidateall(self):
2856 2859 """Fully invalidates both store and non-store parts, causing the
2857 2860 subsequent operation to reread any outside changes."""
2858 2861 # extension should hook this to invalidate its caches
2859 2862 self.invalidate()
2860 2863 self.invalidatedirstate()
2861 2864
2862 2865 @unfilteredmethod
2863 2866 def _refreshfilecachestats(self, tr):
2864 2867 """Reload stats of cached files so that they are flagged as valid"""
2865 2868 for k, ce in self._filecache.items():
2866 2869 k = pycompat.sysstr(k)
2867 2870 if k == 'dirstate' or k not in self.__dict__:
2868 2871 continue
2869 2872 ce.refresh()
2870 2873
2871 2874 def _lock(
2872 2875 self,
2873 2876 vfs,
2874 2877 lockname,
2875 2878 wait,
2876 2879 releasefn,
2877 2880 acquirefn,
2878 2881 desc,
2879 2882 ):
2880 2883 timeout = 0
2881 2884 warntimeout = 0
2882 2885 if wait:
2883 2886 timeout = self.ui.configint(b"ui", b"timeout")
2884 2887 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2885 2888 # internal config: ui.signal-safe-lock
2886 2889 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2887 2890
2888 2891 l = lockmod.trylock(
2889 2892 self.ui,
2890 2893 vfs,
2891 2894 lockname,
2892 2895 timeout,
2893 2896 warntimeout,
2894 2897 releasefn=releasefn,
2895 2898 acquirefn=acquirefn,
2896 2899 desc=desc,
2897 2900 signalsafe=signalsafe,
2898 2901 )
2899 2902 return l
2900 2903
2901 2904 def _afterlock(self, callback):
2902 2905 """add a callback to be run when the repository is fully unlocked
2903 2906
2904 2907 The callback will be executed when the outermost lock is released
2905 2908 (with wlock being higher level than 'lock')."""
2906 2909 for ref in (self._wlockref, self._lockref):
2907 2910 l = ref and ref()
2908 2911 if l and l.held:
2909 2912 l.postrelease.append(callback)
2910 2913 break
2911 2914 else: # no lock have been found.
2912 2915 callback(True)
2913 2916
2914 2917 def lock(self, wait=True):
2915 2918 """Lock the repository store (.hg/store) and return a weak reference
2916 2919 to the lock. Use this before modifying the store (e.g. committing or
2917 2920 stripping). If you are opening a transaction, get a lock as well.)
2918 2921
2919 2922 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2920 2923 'wlock' first to avoid a dead-lock hazard."""
2921 2924 l = self._currentlock(self._lockref)
2922 2925 if l is not None:
2923 2926 l.lock()
2924 2927 return l
2925 2928
2926 2929 l = self._lock(
2927 2930 vfs=self.svfs,
2928 2931 lockname=b"lock",
2929 2932 wait=wait,
2930 2933 releasefn=None,
2931 2934 acquirefn=self.invalidate,
2932 2935 desc=_(b'repository %s') % self.origroot,
2933 2936 )
2934 2937 self._lockref = weakref.ref(l)
2935 2938 return l
2936 2939
2937 2940 def wlock(self, wait=True):
2938 2941 """Lock the non-store parts of the repository (everything under
2939 2942 .hg except .hg/store) and return a weak reference to the lock.
2940 2943
2941 2944 Use this before modifying files in .hg.
2942 2945
2943 2946 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2944 2947 'wlock' first to avoid a dead-lock hazard."""
2945 2948 l = self._wlockref() if self._wlockref else None
2946 2949 if l is not None and l.held:
2947 2950 l.lock()
2948 2951 return l
2949 2952
2950 2953 # We do not need to check for non-waiting lock acquisition. Such
2951 2954 # acquisition would not cause dead-lock as they would just fail.
2952 2955 if wait and (
2953 2956 self.ui.configbool(b'devel', b'all-warnings')
2954 2957 or self.ui.configbool(b'devel', b'check-locks')
2955 2958 ):
2956 2959 if self._currentlock(self._lockref) is not None:
2957 2960 self.ui.develwarn(b'"wlock" acquired after "lock"')
2958 2961
2959 2962 def unlock():
2960 2963 if self.dirstate.pendingparentchange():
2961 2964 self.dirstate.invalidate()
2962 2965 else:
2963 2966 self.dirstate.write(None)
2964 2967
2965 2968 self._filecache[b'dirstate'].refresh()
2966 2969
2967 2970 l = self._lock(
2968 2971 self.vfs,
2969 2972 b"wlock",
2970 2973 wait,
2971 2974 unlock,
2972 2975 self.invalidatedirstate,
2973 2976 _(b'working directory of %s') % self.origroot,
2974 2977 )
2975 2978 self._wlockref = weakref.ref(l)
2976 2979 return l
2977 2980
2978 2981 def _currentlock(self, lockref):
2979 2982 """Returns the lock if it's held, or None if it's not."""
2980 2983 if lockref is None:
2981 2984 return None
2982 2985 l = lockref()
2983 2986 if l is None or not l.held:
2984 2987 return None
2985 2988 return l
2986 2989
2987 2990 def currentwlock(self):
2988 2991 """Returns the wlock if it's held, or None if it's not."""
2989 2992 return self._currentlock(self._wlockref)
2990 2993
2991 2994 def checkcommitpatterns(self, wctx, match, status, fail):
2992 2995 """check for commit arguments that aren't committable"""
2993 2996 if match.isexact() or match.prefix():
2994 2997 matched = set(status.modified + status.added + status.removed)
2995 2998
2996 2999 for f in match.files():
2997 3000 f = self.dirstate.normalize(f)
2998 3001 if f == b'.' or f in matched or f in wctx.substate:
2999 3002 continue
3000 3003 if f in status.deleted:
3001 3004 fail(f, _(b'file not found!'))
3002 3005 # Is it a directory that exists or used to exist?
3003 3006 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3004 3007 d = f + b'/'
3005 3008 for mf in matched:
3006 3009 if mf.startswith(d):
3007 3010 break
3008 3011 else:
3009 3012 fail(f, _(b"no match under directory!"))
3010 3013 elif f not in self.dirstate:
3011 3014 fail(f, _(b"file not tracked!"))
3012 3015
3013 3016 @unfilteredmethod
3014 3017 def commit(
3015 3018 self,
3016 3019 text=b"",
3017 3020 user=None,
3018 3021 date=None,
3019 3022 match=None,
3020 3023 force=False,
3021 3024 editor=None,
3022 3025 extra=None,
3023 3026 ):
3024 3027 """Add a new revision to current repository.
3025 3028
3026 3029 Revision information is gathered from the working directory,
3027 3030 match can be used to filter the committed files. If editor is
3028 3031 supplied, it is called to get a commit message.
3029 3032 """
3030 3033 if extra is None:
3031 3034 extra = {}
3032 3035
3033 3036 def fail(f, msg):
3034 3037 raise error.InputError(b'%s: %s' % (f, msg))
3035 3038
3036 3039 if not match:
3037 3040 match = matchmod.always()
3038 3041
3039 3042 if not force:
3040 3043 match.bad = fail
3041 3044
3042 3045 # lock() for recent changelog (see issue4368)
3043 3046 with self.wlock(), self.lock():
3044 3047 wctx = self[None]
3045 3048 merge = len(wctx.parents()) > 1
3046 3049
3047 3050 if not force and merge and not match.always():
3048 3051 raise error.Abort(
3049 3052 _(
3050 3053 b'cannot partially commit a merge '
3051 3054 b'(do not specify files or patterns)'
3052 3055 )
3053 3056 )
3054 3057
3055 3058 status = self.status(match=match, clean=force)
3056 3059 if force:
3057 3060 status.modified.extend(
3058 3061 status.clean
3059 3062 ) # mq may commit clean files
3060 3063
3061 3064 # check subrepos
3062 3065 subs, commitsubs, newstate = subrepoutil.precommit(
3063 3066 self.ui, wctx, status, match, force=force
3064 3067 )
3065 3068
3066 3069 # make sure all explicit patterns are matched
3067 3070 if not force:
3068 3071 self.checkcommitpatterns(wctx, match, status, fail)
3069 3072
3070 3073 cctx = context.workingcommitctx(
3071 3074 self, status, text, user, date, extra
3072 3075 )
3073 3076
3074 3077 ms = mergestatemod.mergestate.read(self)
3075 3078 mergeutil.checkunresolved(ms)
3076 3079
3077 3080 # internal config: ui.allowemptycommit
3078 3081 if cctx.isempty() and not self.ui.configbool(
3079 3082 b'ui', b'allowemptycommit'
3080 3083 ):
3081 3084 self.ui.debug(b'nothing to commit, clearing merge state\n')
3082 3085 ms.reset()
3083 3086 return None
3084 3087
3085 3088 if merge and cctx.deleted():
3086 3089 raise error.Abort(_(b"cannot commit merge with missing files"))
3087 3090
3088 3091 if editor:
3089 3092 cctx._text = editor(self, cctx, subs)
3090 3093 edited = text != cctx._text
3091 3094
3092 3095 # Save commit message in case this transaction gets rolled back
3093 3096 # (e.g. by a pretxncommit hook). Leave the content alone on
3094 3097 # the assumption that the user will use the same editor again.
3095 3098 msgfn = self.savecommitmessage(cctx._text)
3096 3099
3097 3100 # commit subs and write new state
3098 3101 if subs:
3099 3102 uipathfn = scmutil.getuipathfn(self)
3100 3103 for s in sorted(commitsubs):
3101 3104 sub = wctx.sub(s)
3102 3105 self.ui.status(
3103 3106 _(b'committing subrepository %s\n')
3104 3107 % uipathfn(subrepoutil.subrelpath(sub))
3105 3108 )
3106 3109 sr = sub.commit(cctx._text, user, date)
3107 3110 newstate[s] = (newstate[s][0], sr)
3108 3111 subrepoutil.writestate(self, newstate)
3109 3112
3110 3113 p1, p2 = self.dirstate.parents()
3111 3114 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3112 3115 try:
3113 3116 self.hook(
3114 3117 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3115 3118 )
3116 3119 with self.transaction(b'commit'):
3117 3120 ret = self.commitctx(cctx, True)
3118 3121 # update bookmarks, dirstate and mergestate
3119 3122 bookmarks.update(self, [p1, p2], ret)
3120 3123 cctx.markcommitted(ret)
3121 3124 ms.reset()
3122 3125 except: # re-raises
3123 3126 if edited:
3124 3127 self.ui.write(
3125 3128 _(b'note: commit message saved in %s\n') % msgfn
3126 3129 )
3127 3130 self.ui.write(
3128 3131 _(
3129 3132 b"note: use 'hg commit --logfile "
3130 3133 b".hg/last-message.txt --edit' to reuse it\n"
3131 3134 )
3132 3135 )
3133 3136 raise
3134 3137
3135 3138 def commithook(unused_success):
3136 3139 # hack for command that use a temporary commit (eg: histedit)
3137 3140 # temporary commit got stripped before hook release
3138 3141 if self.changelog.hasnode(ret):
3139 3142 self.hook(
3140 3143 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3141 3144 )
3142 3145
3143 3146 self._afterlock(commithook)
3144 3147 return ret
3145 3148
3146 3149 @unfilteredmethod
3147 3150 def commitctx(self, ctx, error=False, origctx=None):
3148 3151 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3149 3152
3150 3153 @unfilteredmethod
3151 3154 def destroying(self):
3152 3155 """Inform the repository that nodes are about to be destroyed.
3153 3156 Intended for use by strip and rollback, so there's a common
3154 3157 place for anything that has to be done before destroying history.
3155 3158
3156 3159 This is mostly useful for saving state that is in memory and waiting
3157 3160 to be flushed when the current lock is released. Because a call to
3158 3161 destroyed is imminent, the repo will be invalidated causing those
3159 3162 changes to stay in memory (waiting for the next unlock), or vanish
3160 3163 completely.
3161 3164 """
3162 3165 # When using the same lock to commit and strip, the phasecache is left
3163 3166 # dirty after committing. Then when we strip, the repo is invalidated,
3164 3167 # causing those changes to disappear.
3165 3168 if '_phasecache' in vars(self):
3166 3169 self._phasecache.write()
3167 3170
3168 3171 @unfilteredmethod
3169 3172 def destroyed(self):
3170 3173 """Inform the repository that nodes have been destroyed.
3171 3174 Intended for use by strip and rollback, so there's a common
3172 3175 place for anything that has to be done after destroying history.
3173 3176 """
3174 3177 # When one tries to:
3175 3178 # 1) destroy nodes thus calling this method (e.g. strip)
3176 3179 # 2) use phasecache somewhere (e.g. commit)
3177 3180 #
3178 3181 # then 2) will fail because the phasecache contains nodes that were
3179 3182 # removed. We can either remove phasecache from the filecache,
3180 3183 # causing it to reload next time it is accessed, or simply filter
3181 3184 # the removed nodes now and write the updated cache.
3182 3185 self._phasecache.filterunknown(self)
3183 3186 self._phasecache.write()
3184 3187
3185 3188 # refresh all repository caches
3186 3189 self.updatecaches()
3187 3190
3188 3191 # Ensure the persistent tag cache is updated. Doing it now
3189 3192 # means that the tag cache only has to worry about destroyed
3190 3193 # heads immediately after a strip/rollback. That in turn
3191 3194 # guarantees that "cachetip == currenttip" (comparing both rev
3192 3195 # and node) always means no nodes have been added or destroyed.
3193 3196
3194 3197 # XXX this is suboptimal when qrefresh'ing: we strip the current
3195 3198 # head, refresh the tag cache, then immediately add a new head.
3196 3199 # But I think doing it this way is necessary for the "instant
3197 3200 # tag cache retrieval" case to work.
3198 3201 self.invalidate()
3199 3202
3200 3203 def status(
3201 3204 self,
3202 3205 node1=b'.',
3203 3206 node2=None,
3204 3207 match=None,
3205 3208 ignored=False,
3206 3209 clean=False,
3207 3210 unknown=False,
3208 3211 listsubrepos=False,
3209 3212 ):
3210 3213 '''a convenience method that calls node1.status(node2)'''
3211 3214 return self[node1].status(
3212 3215 node2, match, ignored, clean, unknown, listsubrepos
3213 3216 )
3214 3217
3215 3218 def addpostdsstatus(self, ps):
3216 3219 """Add a callback to run within the wlock, at the point at which status
3217 3220 fixups happen.
3218 3221
3219 3222 On status completion, callback(wctx, status) will be called with the
3220 3223 wlock held, unless the dirstate has changed from underneath or the wlock
3221 3224 couldn't be grabbed.
3222 3225
3223 3226 Callbacks should not capture and use a cached copy of the dirstate --
3224 3227 it might change in the meanwhile. Instead, they should access the
3225 3228 dirstate via wctx.repo().dirstate.
3226 3229
3227 3230 This list is emptied out after each status run -- extensions should
3228 3231 make sure it adds to this list each time dirstate.status is called.
3229 3232 Extensions should also make sure they don't call this for statuses
3230 3233 that don't involve the dirstate.
3231 3234 """
3232 3235
3233 3236 # The list is located here for uniqueness reasons -- it is actually
3234 3237 # managed by the workingctx, but that isn't unique per-repo.
3235 3238 self._postdsstatus.append(ps)
3236 3239
3237 3240 def postdsstatus(self):
3238 3241 """Used by workingctx to get the list of post-dirstate-status hooks."""
3239 3242 return self._postdsstatus
3240 3243
3241 3244 def clearpostdsstatus(self):
3242 3245 """Used by workingctx to clear post-dirstate-status hooks."""
3243 3246 del self._postdsstatus[:]
3244 3247
3245 3248 def heads(self, start=None):
3246 3249 if start is None:
3247 3250 cl = self.changelog
3248 3251 headrevs = reversed(cl.headrevs())
3249 3252 return [cl.node(rev) for rev in headrevs]
3250 3253
3251 3254 heads = self.changelog.heads(start)
3252 3255 # sort the output in rev descending order
3253 3256 return sorted(heads, key=self.changelog.rev, reverse=True)
3254 3257
3255 3258 def branchheads(self, branch=None, start=None, closed=False):
3256 3259 """return a (possibly filtered) list of heads for the given branch
3257 3260
3258 3261 Heads are returned in topological order, from newest to oldest.
3259 3262 If branch is None, use the dirstate branch.
3260 3263 If start is not None, return only heads reachable from start.
3261 3264 If closed is True, return heads that are marked as closed as well.
3262 3265 """
3263 3266 if branch is None:
3264 3267 branch = self[None].branch()
3265 3268 branches = self.branchmap()
3266 3269 if not branches.hasbranch(branch):
3267 3270 return []
3268 3271 # the cache returns heads ordered lowest to highest
3269 3272 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3270 3273 if start is not None:
3271 3274 # filter out the heads that cannot be reached from startrev
3272 3275 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3273 3276 bheads = [h for h in bheads if h in fbheads]
3274 3277 return bheads
3275 3278
3276 3279 def branches(self, nodes):
3277 3280 if not nodes:
3278 3281 nodes = [self.changelog.tip()]
3279 3282 b = []
3280 3283 for n in nodes:
3281 3284 t = n
3282 3285 while True:
3283 3286 p = self.changelog.parents(n)
3284 3287 if p[1] != self.nullid or p[0] == self.nullid:
3285 3288 b.append((t, n, p[0], p[1]))
3286 3289 break
3287 3290 n = p[0]
3288 3291 return b
3289 3292
3290 3293 def between(self, pairs):
3291 3294 r = []
3292 3295
3293 3296 for top, bottom in pairs:
3294 3297 n, l, i = top, [], 0
3295 3298 f = 1
3296 3299
3297 3300 while n != bottom and n != self.nullid:
3298 3301 p = self.changelog.parents(n)[0]
3299 3302 if i == f:
3300 3303 l.append(n)
3301 3304 f = f * 2
3302 3305 n = p
3303 3306 i += 1
3304 3307
3305 3308 r.append(l)
3306 3309
3307 3310 return r
3308 3311
3309 3312 def checkpush(self, pushop):
3310 3313 """Extensions can override this function if additional checks have
3311 3314 to be performed before pushing, or call it if they override push
3312 3315 command.
3313 3316 """
3314 3317
3315 3318 @unfilteredpropertycache
3316 3319 def prepushoutgoinghooks(self):
3317 3320 """Return util.hooks consists of a pushop with repo, remote, outgoing
3318 3321 methods, which are called before pushing changesets.
3319 3322 """
3320 3323 return util.hooks()
3321 3324
3322 3325 def pushkey(self, namespace, key, old, new):
3323 3326 try:
3324 3327 tr = self.currenttransaction()
3325 3328 hookargs = {}
3326 3329 if tr is not None:
3327 3330 hookargs.update(tr.hookargs)
3328 3331 hookargs = pycompat.strkwargs(hookargs)
3329 3332 hookargs['namespace'] = namespace
3330 3333 hookargs['key'] = key
3331 3334 hookargs['old'] = old
3332 3335 hookargs['new'] = new
3333 3336 self.hook(b'prepushkey', throw=True, **hookargs)
3334 3337 except error.HookAbort as exc:
3335 3338 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3336 3339 if exc.hint:
3337 3340 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3338 3341 return False
3339 3342 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3340 3343 ret = pushkey.push(self, namespace, key, old, new)
3341 3344
3342 3345 def runhook(unused_success):
3343 3346 self.hook(
3344 3347 b'pushkey',
3345 3348 namespace=namespace,
3346 3349 key=key,
3347 3350 old=old,
3348 3351 new=new,
3349 3352 ret=ret,
3350 3353 )
3351 3354
3352 3355 self._afterlock(runhook)
3353 3356 return ret
3354 3357
3355 3358 def listkeys(self, namespace):
3356 3359 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3357 3360 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3358 3361 values = pushkey.list(self, namespace)
3359 3362 self.hook(b'listkeys', namespace=namespace, values=values)
3360 3363 return values
3361 3364
3362 3365 def debugwireargs(self, one, two, three=None, four=None, five=None):
3363 3366 '''used to test argument passing over the wire'''
3364 3367 return b"%s %s %s %s %s" % (
3365 3368 one,
3366 3369 two,
3367 3370 pycompat.bytestr(three),
3368 3371 pycompat.bytestr(four),
3369 3372 pycompat.bytestr(five),
3370 3373 )
3371 3374
3372 3375 def savecommitmessage(self, text):
3373 3376 fp = self.vfs(b'last-message.txt', b'wb')
3374 3377 try:
3375 3378 fp.write(text)
3376 3379 finally:
3377 3380 fp.close()
3378 3381 return self.pathto(fp.name[len(self.root) + 1 :])
3379 3382
3380 3383 def register_wanted_sidedata(self, category):
3381 3384 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3382 3385 # Only revlogv2 repos can want sidedata.
3383 3386 return
3384 3387 self._wanted_sidedata.add(pycompat.bytestr(category))
3385 3388
3386 3389 def register_sidedata_computer(
3387 3390 self, kind, category, keys, computer, flags, replace=False
3388 3391 ):
3389 3392 if kind not in revlogconst.ALL_KINDS:
3390 3393 msg = _(b"unexpected revlog kind '%s'.")
3391 3394 raise error.ProgrammingError(msg % kind)
3392 3395 category = pycompat.bytestr(category)
3393 3396 already_registered = category in self._sidedata_computers.get(kind, [])
3394 3397 if already_registered and not replace:
3395 3398 msg = _(
3396 3399 b"cannot register a sidedata computer twice for category '%s'."
3397 3400 )
3398 3401 raise error.ProgrammingError(msg % category)
3399 3402 if replace and not already_registered:
3400 3403 msg = _(
3401 3404 b"cannot replace a sidedata computer that isn't registered "
3402 3405 b"for category '%s'."
3403 3406 )
3404 3407 raise error.ProgrammingError(msg % category)
3405 3408 self._sidedata_computers.setdefault(kind, {})
3406 3409 self._sidedata_computers[kind][category] = (keys, computer, flags)
3407 3410
3408 3411
3409 3412 # used to avoid circular references so destructors work
3410 3413 def aftertrans(files):
3411 3414 renamefiles = [tuple(t) for t in files]
3412 3415
3413 3416 def a():
3414 3417 for vfs, src, dest in renamefiles:
3415 3418 # if src and dest refer to a same file, vfs.rename is a no-op,
3416 3419 # leaving both src and dest on disk. delete dest to make sure
3417 3420 # the rename couldn't be such a no-op.
3418 3421 vfs.tryunlink(dest)
3419 3422 try:
3420 3423 vfs.rename(src, dest)
3421 3424 except OSError: # journal file does not yet exist
3422 3425 pass
3423 3426
3424 3427 return a
3425 3428
3426 3429
3427 3430 def undoname(fn):
3428 3431 base, name = os.path.split(fn)
3429 3432 assert name.startswith(b'journal')
3430 3433 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3431 3434
3432 3435
3433 3436 def instance(ui, path, create, intents=None, createopts=None):
3434 3437 localpath = urlutil.urllocalpath(path)
3435 3438 if create:
3436 3439 createrepository(ui, localpath, createopts=createopts)
3437 3440
3438 3441 return makelocalrepository(ui, localpath, intents=intents)
3439 3442
3440 3443
3441 3444 def islocal(path):
3442 3445 return True
3443 3446
3444 3447
3445 3448 def defaultcreateopts(ui, createopts=None):
3446 3449 """Populate the default creation options for a repository.
3447 3450
3448 3451 A dictionary of explicitly requested creation options can be passed
3449 3452 in. Missing keys will be populated.
3450 3453 """
3451 3454 createopts = dict(createopts or {})
3452 3455
3453 3456 if b'backend' not in createopts:
3454 3457 # experimental config: storage.new-repo-backend
3455 3458 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3456 3459
3457 3460 return createopts
3458 3461
3459 3462
3460 3463 def newreporequirements(ui, createopts):
3461 3464 """Determine the set of requirements for a new local repository.
3462 3465
3463 3466 Extensions can wrap this function to specify custom requirements for
3464 3467 new repositories.
3465 3468 """
3466 3469 # If the repo is being created from a shared repository, we copy
3467 3470 # its requirements.
3468 3471 if b'sharedrepo' in createopts:
3469 3472 requirements = set(createopts[b'sharedrepo'].requirements)
3470 3473 if createopts.get(b'sharedrelative'):
3471 3474 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3472 3475 else:
3473 3476 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3474 3477
3475 3478 return requirements
3476 3479
3477 3480 if b'backend' not in createopts:
3478 3481 raise error.ProgrammingError(
3479 3482 b'backend key not present in createopts; '
3480 3483 b'was defaultcreateopts() called?'
3481 3484 )
3482 3485
3483 3486 if createopts[b'backend'] != b'revlogv1':
3484 3487 raise error.Abort(
3485 3488 _(
3486 3489 b'unable to determine repository requirements for '
3487 3490 b'storage backend: %s'
3488 3491 )
3489 3492 % createopts[b'backend']
3490 3493 )
3491 3494
3492 3495 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3493 3496 if ui.configbool(b'format', b'usestore'):
3494 3497 requirements.add(requirementsmod.STORE_REQUIREMENT)
3495 3498 if ui.configbool(b'format', b'usefncache'):
3496 3499 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3497 3500 if ui.configbool(b'format', b'dotencode'):
3498 3501 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3499 3502
3500 3503 compengines = ui.configlist(b'format', b'revlog-compression')
3501 3504 for compengine in compengines:
3502 3505 if compengine in util.compengines:
3503 3506 engine = util.compengines[compengine]
3504 3507 if engine.available() and engine.revlogheader():
3505 3508 break
3506 3509 else:
3507 3510 raise error.Abort(
3508 3511 _(
3509 3512 b'compression engines %s defined by '
3510 3513 b'format.revlog-compression not available'
3511 3514 )
3512 3515 % b', '.join(b'"%s"' % e for e in compengines),
3513 3516 hint=_(
3514 3517 b'run "hg debuginstall" to list available '
3515 3518 b'compression engines'
3516 3519 ),
3517 3520 )
3518 3521
3519 3522 # zlib is the historical default and doesn't need an explicit requirement.
3520 3523 if compengine == b'zstd':
3521 3524 requirements.add(b'revlog-compression-zstd')
3522 3525 elif compengine != b'zlib':
3523 3526 requirements.add(b'exp-compression-%s' % compengine)
3524 3527
3525 3528 if scmutil.gdinitconfig(ui):
3526 3529 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3527 3530 if ui.configbool(b'format', b'sparse-revlog'):
3528 3531 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3529 3532
3533 # experimental config: format.exp-dirstate-v2
3534 if ui.configbool(b'format', b'exp-dirstate-v2'):
3535 if dirstate.SUPPORTS_DIRSTATE_V2:
3536 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3537 else:
3538 raise error.Abort(
3539 _(
3540 b"dirstate v2 format requested by config "
3541 b"but not supported (requires Rust extensions)"
3542 )
3543 )
3544
3530 3545 # experimental config: format.exp-use-copies-side-data-changeset
3531 3546 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3532 3547 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3533 3548 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3534 3549 if ui.configbool(b'experimental', b'treemanifest'):
3535 3550 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3536 3551
3537 3552 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3538 3553 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3539 3554 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3540 3555
3541 3556 revlogv2 = ui.config(b'experimental', b'revlogv2')
3542 3557 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3543 3558 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3544 3559 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3545 3560 # experimental config: format.internal-phase
3546 3561 if ui.configbool(b'format', b'internal-phase'):
3547 3562 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3548 3563
3549 3564 if createopts.get(b'narrowfiles'):
3550 3565 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3551 3566
3552 3567 if createopts.get(b'lfs'):
3553 3568 requirements.add(b'lfs')
3554 3569
3555 3570 if ui.configbool(b'format', b'bookmarks-in-store'):
3556 3571 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3557 3572
3558 3573 if ui.configbool(b'format', b'use-persistent-nodemap'):
3559 3574 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3560 3575
3561 3576 # if share-safe is enabled, let's create the new repository with the new
3562 3577 # requirement
3563 3578 if ui.configbool(b'format', b'use-share-safe'):
3564 3579 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3565 3580
3566 3581 return requirements
3567 3582
3568 3583
3569 3584 def checkrequirementscompat(ui, requirements):
3570 3585 """Checks compatibility of repository requirements enabled and disabled.
3571 3586
3572 3587 Returns a set of requirements which needs to be dropped because dependend
3573 3588 requirements are not enabled. Also warns users about it"""
3574 3589
3575 3590 dropped = set()
3576 3591
3577 3592 if requirementsmod.STORE_REQUIREMENT not in requirements:
3578 3593 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3579 3594 ui.warn(
3580 3595 _(
3581 3596 b'ignoring enabled \'format.bookmarks-in-store\' config '
3582 3597 b'beacuse it is incompatible with disabled '
3583 3598 b'\'format.usestore\' config\n'
3584 3599 )
3585 3600 )
3586 3601 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3587 3602
3588 3603 if (
3589 3604 requirementsmod.SHARED_REQUIREMENT in requirements
3590 3605 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3591 3606 ):
3592 3607 raise error.Abort(
3593 3608 _(
3594 3609 b"cannot create shared repository as source was created"
3595 3610 b" with 'format.usestore' config disabled"
3596 3611 )
3597 3612 )
3598 3613
3599 3614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3600 3615 ui.warn(
3601 3616 _(
3602 3617 b"ignoring enabled 'format.use-share-safe' config because "
3603 3618 b"it is incompatible with disabled 'format.usestore'"
3604 3619 b" config\n"
3605 3620 )
3606 3621 )
3607 3622 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3608 3623
3609 3624 return dropped
3610 3625
3611 3626
3612 3627 def filterknowncreateopts(ui, createopts):
3613 3628 """Filters a dict of repo creation options against options that are known.
3614 3629
3615 3630 Receives a dict of repo creation options and returns a dict of those
3616 3631 options that we don't know how to handle.
3617 3632
3618 3633 This function is called as part of repository creation. If the
3619 3634 returned dict contains any items, repository creation will not
3620 3635 be allowed, as it means there was a request to create a repository
3621 3636 with options not recognized by loaded code.
3622 3637
3623 3638 Extensions can wrap this function to filter out creation options
3624 3639 they know how to handle.
3625 3640 """
3626 3641 known = {
3627 3642 b'backend',
3628 3643 b'lfs',
3629 3644 b'narrowfiles',
3630 3645 b'sharedrepo',
3631 3646 b'sharedrelative',
3632 3647 b'shareditems',
3633 3648 b'shallowfilestore',
3634 3649 }
3635 3650
3636 3651 return {k: v for k, v in createopts.items() if k not in known}
3637 3652
3638 3653
3639 3654 def createrepository(ui, path, createopts=None):
3640 3655 """Create a new repository in a vfs.
3641 3656
3642 3657 ``path`` path to the new repo's working directory.
3643 3658 ``createopts`` options for the new repository.
3644 3659
3645 3660 The following keys for ``createopts`` are recognized:
3646 3661
3647 3662 backend
3648 3663 The storage backend to use.
3649 3664 lfs
3650 3665 Repository will be created with ``lfs`` requirement. The lfs extension
3651 3666 will automatically be loaded when the repository is accessed.
3652 3667 narrowfiles
3653 3668 Set up repository to support narrow file storage.
3654 3669 sharedrepo
3655 3670 Repository object from which storage should be shared.
3656 3671 sharedrelative
3657 3672 Boolean indicating if the path to the shared repo should be
3658 3673 stored as relative. By default, the pointer to the "parent" repo
3659 3674 is stored as an absolute path.
3660 3675 shareditems
3661 3676 Set of items to share to the new repository (in addition to storage).
3662 3677 shallowfilestore
3663 3678 Indicates that storage for files should be shallow (not all ancestor
3664 3679 revisions are known).
3665 3680 """
3666 3681 createopts = defaultcreateopts(ui, createopts=createopts)
3667 3682
3668 3683 unknownopts = filterknowncreateopts(ui, createopts)
3669 3684
3670 3685 if not isinstance(unknownopts, dict):
3671 3686 raise error.ProgrammingError(
3672 3687 b'filterknowncreateopts() did not return a dict'
3673 3688 )
3674 3689
3675 3690 if unknownopts:
3676 3691 raise error.Abort(
3677 3692 _(
3678 3693 b'unable to create repository because of unknown '
3679 3694 b'creation option: %s'
3680 3695 )
3681 3696 % b', '.join(sorted(unknownopts)),
3682 3697 hint=_(b'is a required extension not loaded?'),
3683 3698 )
3684 3699
3685 3700 requirements = newreporequirements(ui, createopts=createopts)
3686 3701 requirements -= checkrequirementscompat(ui, requirements)
3687 3702
3688 3703 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3689 3704
3690 3705 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3691 3706 if hgvfs.exists():
3692 3707 raise error.RepoError(_(b'repository %s already exists') % path)
3693 3708
3694 3709 if b'sharedrepo' in createopts:
3695 3710 sharedpath = createopts[b'sharedrepo'].sharedpath
3696 3711
3697 3712 if createopts.get(b'sharedrelative'):
3698 3713 try:
3699 3714 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3700 3715 sharedpath = util.pconvert(sharedpath)
3701 3716 except (IOError, ValueError) as e:
3702 3717 # ValueError is raised on Windows if the drive letters differ
3703 3718 # on each path.
3704 3719 raise error.Abort(
3705 3720 _(b'cannot calculate relative path'),
3706 3721 hint=stringutil.forcebytestr(e),
3707 3722 )
3708 3723
3709 3724 if not wdirvfs.exists():
3710 3725 wdirvfs.makedirs()
3711 3726
3712 3727 hgvfs.makedir(notindexed=True)
3713 3728 if b'sharedrepo' not in createopts:
3714 3729 hgvfs.mkdir(b'cache')
3715 3730 hgvfs.mkdir(b'wcache')
3716 3731
3717 3732 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3718 3733 if has_store and b'sharedrepo' not in createopts:
3719 3734 hgvfs.mkdir(b'store')
3720 3735
3721 3736 # We create an invalid changelog outside the store so very old
3722 3737 # Mercurial versions (which didn't know about the requirements
3723 3738 # file) encounter an error on reading the changelog. This
3724 3739 # effectively locks out old clients and prevents them from
3725 3740 # mucking with a repo in an unknown format.
3726 3741 #
3727 3742 # The revlog header has version 65535, which won't be recognized by
3728 3743 # such old clients.
3729 3744 hgvfs.append(
3730 3745 b'00changelog.i',
3731 3746 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3732 3747 b'layout',
3733 3748 )
3734 3749
3735 3750 # Filter the requirements into working copy and store ones
3736 3751 wcreq, storereq = scmutil.filterrequirements(requirements)
3737 3752 # write working copy ones
3738 3753 scmutil.writerequires(hgvfs, wcreq)
3739 3754 # If there are store requirements and the current repository
3740 3755 # is not a shared one, write stored requirements
3741 3756 # For new shared repository, we don't need to write the store
3742 3757 # requirements as they are already present in store requires
3743 3758 if storereq and b'sharedrepo' not in createopts:
3744 3759 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3745 3760 scmutil.writerequires(storevfs, storereq)
3746 3761
3747 3762 # Write out file telling readers where to find the shared store.
3748 3763 if b'sharedrepo' in createopts:
3749 3764 hgvfs.write(b'sharedpath', sharedpath)
3750 3765
3751 3766 if createopts.get(b'shareditems'):
3752 3767 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3753 3768 hgvfs.write(b'shared', shared)
3754 3769
3755 3770
3756 3771 def poisonrepository(repo):
3757 3772 """Poison a repository instance so it can no longer be used."""
3758 3773 # Perform any cleanup on the instance.
3759 3774 repo.close()
3760 3775
3761 3776 # Our strategy is to replace the type of the object with one that
3762 3777 # has all attribute lookups result in error.
3763 3778 #
3764 3779 # But we have to allow the close() method because some constructors
3765 3780 # of repos call close() on repo references.
3766 3781 class poisonedrepository(object):
3767 3782 def __getattribute__(self, item):
3768 3783 if item == 'close':
3769 3784 return object.__getattribute__(self, item)
3770 3785
3771 3786 raise error.ProgrammingError(
3772 3787 b'repo instances should not be used after unshare'
3773 3788 )
3774 3789
3775 3790 def close(self):
3776 3791 pass
3777 3792
3778 3793 # We may have a repoview, which intercepts __setattr__. So be sure
3779 3794 # we operate at the lowest level possible.
3780 3795 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,82 +1,87 b''
1 1 # requirements.py - objects and functions related to repository requirements
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 GENERALDELTA_REQUIREMENT = b'generaldelta'
11 11 DOTENCODE_REQUIREMENT = b'dotencode'
12 12 STORE_REQUIREMENT = b'store'
13 13 FNCACHE_REQUIREMENT = b'fncache'
14 14
15 DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
16
15 17 # When narrowing is finalized and no longer subject to format changes,
16 18 # we should move this to just "narrow" or similar.
17 19 NARROW_REQUIREMENT = b'narrowhg-experimental'
18 20
19 21 # Enables sparse working directory usage
20 22 SPARSE_REQUIREMENT = b'exp-sparse'
21 23
22 24 # Enables the internal phase which is used to hide changesets instead
23 25 # of stripping them
24 26 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
25 27
26 28 # Stores manifest in Tree structure
27 29 TREEMANIFEST_REQUIREMENT = b'treemanifest'
28 30
29 31 REVLOGV1_REQUIREMENT = b'revlogv1'
30 32
31 33 # Increment the sub-version when the revlog v2 format changes to lock out old
32 34 # clients.
33 35 CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
34 36
35 37 # Increment the sub-version when the revlog v2 format changes to lock out old
36 38 # clients.
37 39 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
38 40
39 41 # A repository with the sparserevlog feature will have delta chains that
40 42 # can spread over a larger span. Sparse reading cuts these large spans into
41 43 # pieces, so that each piece isn't too big.
42 44 # Without the sparserevlog capability, reading from the repository could use
43 45 # huge amounts of memory, because the whole span would be read at once,
44 46 # including all the intermediate revisions that aren't pertinent for the chain.
45 47 # This is why once a repository has enabled sparse-read, it becomes required.
46 48 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
47 49
48 50 # A repository with the the copies-sidedata-changeset requirement will store
49 51 # copies related information in changeset's sidedata.
50 52 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
51 53
52 54 # The repository use persistent nodemap for the changelog and the manifest.
53 55 NODEMAP_REQUIREMENT = b'persistent-nodemap'
54 56
55 57 # Denotes that the current repository is a share
56 58 SHARED_REQUIREMENT = b'shared'
57 59
58 60 # Denotes that current repository is a share and the shared source path is
59 61 # relative to the current repository root path
60 62 RELATIVE_SHARED_REQUIREMENT = b'relshared'
61 63
62 64 # A repository with share implemented safely. The repository has different
63 65 # store and working copy requirements i.e. both `.hg/requires` and
64 66 # `.hg/store/requires` are present.
65 67 SHARESAFE_REQUIREMENT = b'share-safe'
66 68
67 69 # List of requirements which are working directory specific
68 70 # These requirements cannot be shared between repositories if they
69 71 # share the same store
70 72 # * sparse is a working directory specific functionality and hence working
71 73 # directory specific requirement
72 74 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
73 75 # represents that the current working copy/repository shares store of another
74 76 # repo. Hence both of them should be stored in working copy
75 77 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
76 78 # the requirements are stored in store's requires
79 # * DIRSTATE_V2_REQUIREMENT affects .hg/dirstate, of which there is one per
80 # working directory.
77 81 WORKING_DIR_REQUIREMENTS = {
78 82 SPARSE_REQUIREMENT,
79 83 SHARED_REQUIREMENT,
80 84 RELATIVE_SHARED_REQUIREMENT,
81 85 SHARESAFE_REQUIREMENT,
86 DIRSTATE_V2_REQUIREMENT,
82 87 }
@@ -1,1051 +1,1052 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import (
12 12 error,
13 13 localrepo,
14 14 pycompat,
15 15 requirements,
16 16 revlog,
17 17 util,
18 18 )
19 19
20 20 from ..utils import compression
21 21
22 22 if pycompat.TYPE_CHECKING:
23 23 from typing import (
24 24 List,
25 25 Type,
26 26 )
27 27
28 28
29 29 # list of requirements that request a clone of all revlog if added/removed
30 30 RECLONES_REQUIREMENTS = {
31 31 requirements.GENERALDELTA_REQUIREMENT,
32 32 requirements.SPARSEREVLOG_REQUIREMENT,
33 33 requirements.REVLOGV2_REQUIREMENT,
34 34 requirements.CHANGELOGV2_REQUIREMENT,
35 35 }
36 36
37 37
38 38 def preservedrequirements(repo):
39 39 return set()
40 40
41 41
42 42 FORMAT_VARIANT = b'deficiency'
43 43 OPTIMISATION = b'optimization'
44 44
45 45
46 46 class improvement(object):
47 47 """Represents an improvement that can be made as part of an upgrade.
48 48
49 49 The following attributes are defined on each instance:
50 50
51 51 name
52 52 Machine-readable string uniquely identifying this improvement. It
53 53 will be mapped to an action later in the upgrade process.
54 54
55 55 type
56 56 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
57 57 A format variant is where we change the storage format. Not all format
58 58 variant changes are an obvious problem.
59 59 An optimization is an action (sometimes optional) that
60 60 can be taken to further improve the state of the repository.
61 61
62 62 description
63 63 Message intended for humans explaining the improvement in more detail,
64 64 including the implications of it. For ``FORMAT_VARIANT`` types, should be
65 65 worded in the present tense. For ``OPTIMISATION`` types, should be
66 66 worded in the future tense.
67 67
68 68 upgrademessage
69 69 Message intended for humans explaining what an upgrade addressing this
70 70 issue will do. Should be worded in the future tense.
71 71
72 72 postupgrademessage
73 73 Message intended for humans which will be shown post an upgrade
74 74 operation when the improvement will be added
75 75
76 76 postdowngrademessage
77 77 Message intended for humans which will be shown post an upgrade
78 78 operation in which this improvement was removed
79 79
80 80 touches_filelogs (bool)
81 81 Whether this improvement touches filelogs
82 82
83 83 touches_manifests (bool)
84 84 Whether this improvement touches manifests
85 85
86 86 touches_changelog (bool)
87 87 Whether this improvement touches changelog
88 88
89 89 touches_requirements (bool)
90 90 Whether this improvement changes repository requirements
91 91 """
92 92
93 93 def __init__(self, name, type, description, upgrademessage):
94 94 self.name = name
95 95 self.type = type
96 96 self.description = description
97 97 self.upgrademessage = upgrademessage
98 98 self.postupgrademessage = None
99 99 self.postdowngrademessage = None
100 100 # By default for now, we assume every improvement touches
101 101 # all the things
102 102 self.touches_filelogs = True
103 103 self.touches_manifests = True
104 104 self.touches_changelog = True
105 105 self.touches_requirements = True
106 106
107 107 def __eq__(self, other):
108 108 if not isinstance(other, improvement):
109 109 # This is what python tell use to do
110 110 return NotImplemented
111 111 return self.name == other.name
112 112
113 113 def __ne__(self, other):
114 114 return not (self == other)
115 115
116 116 def __hash__(self):
117 117 return hash(self.name)
118 118
119 119
120 120 allformatvariant = [] # type: List[Type['formatvariant']]
121 121
122 122
123 123 def registerformatvariant(cls):
124 124 allformatvariant.append(cls)
125 125 return cls
126 126
127 127
128 128 class formatvariant(improvement):
129 129 """an improvement subclass dedicated to repository format"""
130 130
131 131 type = FORMAT_VARIANT
132 132 ### The following attributes should be defined for each class:
133 133
134 134 # machine-readable string uniquely identifying this improvement. it will be
135 135 # mapped to an action later in the upgrade process.
136 136 name = None
137 137
138 138 # message intended for humans explaining the improvement in more detail,
139 139 # including the implications of it ``FORMAT_VARIANT`` types, should be
140 140 # worded
141 141 # in the present tense.
142 142 description = None
143 143
144 144 # message intended for humans explaining what an upgrade addressing this
145 145 # issue will do. should be worded in the future tense.
146 146 upgrademessage = None
147 147
148 148 # value of current Mercurial default for new repository
149 149 default = None
150 150
151 151 # Message intended for humans which will be shown post an upgrade
152 152 # operation when the improvement will be added
153 153 postupgrademessage = None
154 154
155 155 # Message intended for humans which will be shown post an upgrade
156 156 # operation in which this improvement was removed
157 157 postdowngrademessage = None
158 158
159 159 # By default for now, we assume every improvement touches all the things
160 160 touches_filelogs = True
161 161 touches_manifests = True
162 162 touches_changelog = True
163 163 touches_requirements = True
164 164
165 165 def __init__(self):
166 166 raise NotImplementedError()
167 167
168 168 @staticmethod
169 169 def fromrepo(repo):
170 170 """current value of the variant in the repository"""
171 171 raise NotImplementedError()
172 172
173 173 @staticmethod
174 174 def fromconfig(repo):
175 175 """current value of the variant in the configuration"""
176 176 raise NotImplementedError()
177 177
178 178
179 179 class requirementformatvariant(formatvariant):
180 180 """formatvariant based on a 'requirement' name.
181 181
182 182 Many format variant are controlled by a 'requirement'. We define a small
183 183 subclass to factor the code.
184 184 """
185 185
186 186 # the requirement that control this format variant
187 187 _requirement = None
188 188
189 189 @staticmethod
190 190 def _newreporequirements(ui):
191 191 return localrepo.newreporequirements(
192 192 ui, localrepo.defaultcreateopts(ui)
193 193 )
194 194
195 195 @classmethod
196 196 def fromrepo(cls, repo):
197 197 assert cls._requirement is not None
198 198 return cls._requirement in repo.requirements
199 199
200 200 @classmethod
201 201 def fromconfig(cls, repo):
202 202 assert cls._requirement is not None
203 203 return cls._requirement in cls._newreporequirements(repo.ui)
204 204
205 205
206 206 @registerformatvariant
207 207 class fncache(requirementformatvariant):
208 208 name = b'fncache'
209 209
210 210 _requirement = requirements.FNCACHE_REQUIREMENT
211 211
212 212 default = True
213 213
214 214 description = _(
215 215 b'long and reserved filenames may not work correctly; '
216 216 b'repository performance is sub-optimal'
217 217 )
218 218
219 219 upgrademessage = _(
220 220 b'repository will be more resilient to storing '
221 221 b'certain paths and performance of certain '
222 222 b'operations should be improved'
223 223 )
224 224
225 225
226 226 @registerformatvariant
227 227 class dotencode(requirementformatvariant):
228 228 name = b'dotencode'
229 229
230 230 _requirement = requirements.DOTENCODE_REQUIREMENT
231 231
232 232 default = True
233 233
234 234 description = _(
235 235 b'storage of filenames beginning with a period or '
236 236 b'space may not work correctly'
237 237 )
238 238
239 239 upgrademessage = _(
240 240 b'repository will be better able to store files '
241 241 b'beginning with a space or period'
242 242 )
243 243
244 244
245 245 @registerformatvariant
246 246 class generaldelta(requirementformatvariant):
247 247 name = b'generaldelta'
248 248
249 249 _requirement = requirements.GENERALDELTA_REQUIREMENT
250 250
251 251 default = True
252 252
253 253 description = _(
254 254 b'deltas within internal storage are unable to '
255 255 b'choose optimal revisions; repository is larger and '
256 256 b'slower than it could be; interaction with other '
257 257 b'repositories may require extra network and CPU '
258 258 b'resources, making "hg push" and "hg pull" slower'
259 259 )
260 260
261 261 upgrademessage = _(
262 262 b'repository storage will be able to create '
263 263 b'optimal deltas; new repository data will be '
264 264 b'smaller and read times should decrease; '
265 265 b'interacting with other repositories using this '
266 266 b'storage model should require less network and '
267 267 b'CPU resources, making "hg push" and "hg pull" '
268 268 b'faster'
269 269 )
270 270
271 271
272 272 @registerformatvariant
273 273 class sharesafe(requirementformatvariant):
274 274 name = b'share-safe'
275 275 _requirement = requirements.SHARESAFE_REQUIREMENT
276 276
277 277 default = False
278 278
279 279 description = _(
280 280 b'old shared repositories do not share source repository '
281 281 b'requirements and config. This leads to various problems '
282 282 b'when the source repository format is upgraded or some new '
283 283 b'extensions are enabled.'
284 284 )
285 285
286 286 upgrademessage = _(
287 287 b'Upgrades a repository to share-safe format so that future '
288 288 b'shares of this repository share its requirements and configs.'
289 289 )
290 290
291 291 postdowngrademessage = _(
292 292 b'repository downgraded to not use share safe mode, '
293 293 b'existing shares will not work and needs to'
294 294 b' be reshared.'
295 295 )
296 296
297 297 postupgrademessage = _(
298 298 b'repository upgraded to share safe mode, existing'
299 299 b' shares will still work in old non-safe mode. '
300 300 b'Re-share existing shares to use them in safe mode'
301 301 b' New shares will be created in safe mode.'
302 302 )
303 303
304 304 # upgrade only needs to change the requirements
305 305 touches_filelogs = False
306 306 touches_manifests = False
307 307 touches_changelog = False
308 308 touches_requirements = True
309 309
310 310
311 311 @registerformatvariant
312 312 class sparserevlog(requirementformatvariant):
313 313 name = b'sparserevlog'
314 314
315 315 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
316 316
317 317 default = True
318 318
319 319 description = _(
320 320 b'in order to limit disk reading and memory usage on older '
321 321 b'version, the span of a delta chain from its root to its '
322 322 b'end is limited, whatever the relevant data in this span. '
323 323 b'This can severly limit Mercurial ability to build good '
324 324 b'chain of delta resulting is much more storage space being '
325 325 b'taken and limit reusability of on disk delta during '
326 326 b'exchange.'
327 327 )
328 328
329 329 upgrademessage = _(
330 330 b'Revlog supports delta chain with more unused data '
331 331 b'between payload. These gaps will be skipped at read '
332 332 b'time. This allows for better delta chains, making a '
333 333 b'better compression and faster exchange with server.'
334 334 )
335 335
336 336
337 337 @registerformatvariant
338 338 class persistentnodemap(requirementformatvariant):
339 339 name = b'persistent-nodemap'
340 340
341 341 _requirement = requirements.NODEMAP_REQUIREMENT
342 342
343 343 default = False
344 344
345 345 description = _(
346 346 b'persist the node -> rev mapping on disk to speedup lookup'
347 347 )
348 348
349 349 upgrademessage = _(b'Speedup revision lookup by node id.')
350 350
351 351
352 352 @registerformatvariant
353 353 class copiessdc(requirementformatvariant):
354 354 name = b'copies-sdc'
355 355
356 356 _requirement = requirements.COPIESSDC_REQUIREMENT
357 357
358 358 default = False
359 359
360 360 description = _(b'Stores copies information alongside changesets.')
361 361
362 362 upgrademessage = _(
363 363 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
364 364 )
365 365
366 366
367 367 @registerformatvariant
368 368 class revlogv2(requirementformatvariant):
369 369 name = b'revlog-v2'
370 370 _requirement = requirements.REVLOGV2_REQUIREMENT
371 371 default = False
372 372 description = _(b'Version 2 of the revlog.')
373 373 upgrademessage = _(b'very experimental')
374 374
375 375
376 376 @registerformatvariant
377 377 class changelogv2(requirementformatvariant):
378 378 name = b'changelog-v2'
379 379 _requirement = requirements.CHANGELOGV2_REQUIREMENT
380 380 default = False
381 381 description = _(b'An iteration of the revlog focussed on changelog needs.')
382 382 upgrademessage = _(b'quite experimental')
383 383
384 384
385 385 @registerformatvariant
386 386 class removecldeltachain(formatvariant):
387 387 name = b'plain-cl-delta'
388 388
389 389 default = True
390 390
391 391 description = _(
392 392 b'changelog storage is using deltas instead of '
393 393 b'raw entries; changelog reading and any '
394 394 b'operation relying on changelog data are slower '
395 395 b'than they could be'
396 396 )
397 397
398 398 upgrademessage = _(
399 399 b'changelog storage will be reformated to '
400 400 b'store raw entries; changelog reading will be '
401 401 b'faster; changelog size may be reduced'
402 402 )
403 403
404 404 @staticmethod
405 405 def fromrepo(repo):
406 406 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
407 407 # changelogs with deltas.
408 408 cl = repo.changelog
409 409 chainbase = cl.chainbase
410 410 return all(rev == chainbase(rev) for rev in cl)
411 411
412 412 @staticmethod
413 413 def fromconfig(repo):
414 414 return True
415 415
416 416
417 417 _has_zstd = (
418 418 b'zstd' in util.compengines
419 419 and util.compengines[b'zstd'].available()
420 420 and util.compengines[b'zstd'].revlogheader()
421 421 )
422 422
423 423
424 424 @registerformatvariant
425 425 class compressionengine(formatvariant):
426 426 name = b'compression'
427 427
428 428 if _has_zstd:
429 429 default = b'zstd'
430 430 else:
431 431 default = b'zlib'
432 432
433 433 description = _(
434 434 b'Compresion algorithm used to compress data. '
435 435 b'Some engine are faster than other'
436 436 )
437 437
438 438 upgrademessage = _(
439 439 b'revlog content will be recompressed with the new algorithm.'
440 440 )
441 441
442 442 @classmethod
443 443 def fromrepo(cls, repo):
444 444 # we allow multiple compression engine requirement to co-exist because
445 445 # strickly speaking, revlog seems to support mixed compression style.
446 446 #
447 447 # The compression used for new entries will be "the last one"
448 448 compression = b'zlib'
449 449 for req in repo.requirements:
450 450 prefix = req.startswith
451 451 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
452 452 compression = req.split(b'-', 2)[2]
453 453 return compression
454 454
455 455 @classmethod
456 456 def fromconfig(cls, repo):
457 457 compengines = repo.ui.configlist(b'format', b'revlog-compression')
458 458 # return the first valid value as the selection code would do
459 459 for comp in compengines:
460 460 if comp in util.compengines:
461 461 e = util.compengines[comp]
462 462 if e.available() and e.revlogheader():
463 463 return comp
464 464
465 465 # no valide compression found lets display it all for clarity
466 466 return b','.join(compengines)
467 467
468 468
469 469 @registerformatvariant
470 470 class compressionlevel(formatvariant):
471 471 name = b'compression-level'
472 472 default = b'default'
473 473
474 474 description = _(b'compression level')
475 475
476 476 upgrademessage = _(b'revlog content will be recompressed')
477 477
478 478 @classmethod
479 479 def fromrepo(cls, repo):
480 480 comp = compressionengine.fromrepo(repo)
481 481 level = None
482 482 if comp == b'zlib':
483 483 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
484 484 elif comp == b'zstd':
485 485 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
486 486 if level is None:
487 487 return b'default'
488 488 return bytes(level)
489 489
490 490 @classmethod
491 491 def fromconfig(cls, repo):
492 492 comp = compressionengine.fromconfig(repo)
493 493 level = None
494 494 if comp == b'zlib':
495 495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 496 elif comp == b'zstd':
497 497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 498 if level is None:
499 499 return b'default'
500 500 return bytes(level)
501 501
502 502
503 503 def find_format_upgrades(repo):
504 504 """returns a list of format upgrades which can be perform on the repo"""
505 505 upgrades = []
506 506
507 507 # We could detect lack of revlogv1 and store here, but they were added
508 508 # in 0.9.2 and we don't support upgrading repos without these
509 509 # requirements, so let's not bother.
510 510
511 511 for fv in allformatvariant:
512 512 if not fv.fromrepo(repo):
513 513 upgrades.append(fv)
514 514
515 515 return upgrades
516 516
517 517
518 518 def find_format_downgrades(repo):
519 519 """returns a list of format downgrades which will be performed on the repo
520 520 because of disabled config option for them"""
521 521
522 522 downgrades = []
523 523
524 524 for fv in allformatvariant:
525 525 if fv.name == b'compression':
526 526 # If there is a compression change between repository
527 527 # and config, destination repository compression will change
528 528 # and current compression will be removed.
529 529 if fv.fromrepo(repo) != fv.fromconfig(repo):
530 530 downgrades.append(fv)
531 531 continue
532 532 # format variant exist in repo but does not exist in new repository
533 533 # config
534 534 if fv.fromrepo(repo) and not fv.fromconfig(repo):
535 535 downgrades.append(fv)
536 536
537 537 return downgrades
538 538
539 539
540 540 ALL_OPTIMISATIONS = []
541 541
542 542
543 543 def register_optimization(obj):
544 544 ALL_OPTIMISATIONS.append(obj)
545 545 return obj
546 546
547 547
548 548 register_optimization(
549 549 improvement(
550 550 name=b're-delta-parent',
551 551 type=OPTIMISATION,
552 552 description=_(
553 553 b'deltas within internal storage will be recalculated to '
554 554 b'choose an optimal base revision where this was not '
555 555 b'already done; the size of the repository may shrink and '
556 556 b'various operations may become faster; the first time '
557 557 b'this optimization is performed could slow down upgrade '
558 558 b'execution considerably; subsequent invocations should '
559 559 b'not run noticeably slower'
560 560 ),
561 561 upgrademessage=_(
562 562 b'deltas within internal storage will choose a new '
563 563 b'base revision if needed'
564 564 ),
565 565 )
566 566 )
567 567
568 568 register_optimization(
569 569 improvement(
570 570 name=b're-delta-multibase',
571 571 type=OPTIMISATION,
572 572 description=_(
573 573 b'deltas within internal storage will be recalculated '
574 574 b'against multiple base revision and the smallest '
575 575 b'difference will be used; the size of the repository may '
576 576 b'shrink significantly when there are many merges; this '
577 577 b'optimization will slow down execution in proportion to '
578 578 b'the number of merges in the repository and the amount '
579 579 b'of files in the repository; this slow down should not '
580 580 b'be significant unless there are tens of thousands of '
581 581 b'files and thousands of merges'
582 582 ),
583 583 upgrademessage=_(
584 584 b'deltas within internal storage will choose an '
585 585 b'optimal delta by computing deltas against multiple '
586 586 b'parents; may slow down execution time '
587 587 b'significantly'
588 588 ),
589 589 )
590 590 )
591 591
592 592 register_optimization(
593 593 improvement(
594 594 name=b're-delta-all',
595 595 type=OPTIMISATION,
596 596 description=_(
597 597 b'deltas within internal storage will always be '
598 598 b'recalculated without reusing prior deltas; this will '
599 599 b'likely make execution run several times slower; this '
600 600 b'optimization is typically not needed'
601 601 ),
602 602 upgrademessage=_(
603 603 b'deltas within internal storage will be fully '
604 604 b'recomputed; this will likely drastically slow down '
605 605 b'execution time'
606 606 ),
607 607 )
608 608 )
609 609
610 610 register_optimization(
611 611 improvement(
612 612 name=b're-delta-fulladd',
613 613 type=OPTIMISATION,
614 614 description=_(
615 615 b'every revision will be re-added as if it was new '
616 616 b'content. It will go through the full storage '
617 617 b'mechanism giving extensions a chance to process it '
618 618 b'(eg. lfs). This is similar to "re-delta-all" but even '
619 619 b'slower since more logic is involved.'
620 620 ),
621 621 upgrademessage=_(
622 622 b'each revision will be added as new content to the '
623 623 b'internal storage; this will likely drastically slow '
624 624 b'down execution time, but some extensions might need '
625 625 b'it'
626 626 ),
627 627 )
628 628 )
629 629
630 630
631 631 def findoptimizations(repo):
632 632 """Determine optimisation that could be used during upgrade"""
633 633 # These are unconditionally added. There is logic later that figures out
634 634 # which ones to apply.
635 635 return list(ALL_OPTIMISATIONS)
636 636
637 637
638 638 def determine_upgrade_actions(
639 639 repo, format_upgrades, optimizations, sourcereqs, destreqs
640 640 ):
641 641 """Determine upgrade actions that will be performed.
642 642
643 643 Given a list of improvements as returned by ``find_format_upgrades`` and
644 644 ``findoptimizations``, determine the list of upgrade actions that
645 645 will be performed.
646 646
647 647 The role of this function is to filter improvements if needed, apply
648 648 recommended optimizations from the improvements list that make sense,
649 649 etc.
650 650
651 651 Returns a list of action names.
652 652 """
653 653 newactions = []
654 654
655 655 for d in format_upgrades:
656 656 name = d._requirement
657 657
658 658 # If the action is a requirement that doesn't show up in the
659 659 # destination requirements, prune the action.
660 660 if name is not None and name not in destreqs:
661 661 continue
662 662
663 663 newactions.append(d)
664 664
665 665 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
666 666
667 667 # FUTURE consider adding some optimizations here for certain transitions.
668 668 # e.g. adding generaldelta could schedule parent redeltas.
669 669
670 670 return newactions
671 671
672 672
673 673 class UpgradeOperation(object):
674 674 """represent the work to be done during an upgrade"""
675 675
676 676 def __init__(
677 677 self,
678 678 ui,
679 679 new_requirements,
680 680 current_requirements,
681 681 upgrade_actions,
682 682 removed_actions,
683 683 revlogs_to_process,
684 684 backup_store,
685 685 ):
686 686 self.ui = ui
687 687 self.new_requirements = new_requirements
688 688 self.current_requirements = current_requirements
689 689 # list of upgrade actions the operation will perform
690 690 self.upgrade_actions = upgrade_actions
691 691 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
692 692 self.removed_actions = removed_actions
693 693 self.revlogs_to_process = revlogs_to_process
694 694 # requirements which will be added by the operation
695 695 self._added_requirements = (
696 696 self.new_requirements - self.current_requirements
697 697 )
698 698 # requirements which will be removed by the operation
699 699 self._removed_requirements = (
700 700 self.current_requirements - self.new_requirements
701 701 )
702 702 # requirements which will be preserved by the operation
703 703 self._preserved_requirements = (
704 704 self.current_requirements & self.new_requirements
705 705 )
706 706 # optimizations which are not used and it's recommended that they
707 707 # should use them
708 708 all_optimizations = findoptimizations(None)
709 709 self.unused_optimizations = [
710 710 i for i in all_optimizations if i not in self.upgrade_actions
711 711 ]
712 712
713 713 # delta reuse mode of this upgrade operation
714 714 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
715 715 if b're-delta-all' in self._upgrade_actions_names:
716 716 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
717 717 elif b're-delta-parent' in self._upgrade_actions_names:
718 718 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
719 719 elif b're-delta-multibase' in self._upgrade_actions_names:
720 720 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
721 721 elif b're-delta-fulladd' in self._upgrade_actions_names:
722 722 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
723 723
724 724 # should this operation force re-delta of both parents
725 725 self.force_re_delta_both_parents = (
726 726 b're-delta-multibase' in self._upgrade_actions_names
727 727 )
728 728
729 729 # should this operation create a backup of the store
730 730 self.backup_store = backup_store
731 731
732 732 # whether the operation touches different revlogs at all or not
733 733 self.touches_filelogs = self._touches_filelogs()
734 734 self.touches_manifests = self._touches_manifests()
735 735 self.touches_changelog = self._touches_changelog()
736 736 # whether the operation touches requirements file or not
737 737 self.touches_requirements = self._touches_requirements()
738 738 self.touches_store = (
739 739 self.touches_filelogs
740 740 or self.touches_manifests
741 741 or self.touches_changelog
742 742 )
743 743 # does the operation only touches repository requirement
744 744 self.requirements_only = (
745 745 self.touches_requirements and not self.touches_store
746 746 )
747 747
748 748 def _touches_filelogs(self):
749 749 for a in self.upgrade_actions:
750 750 # in optimisations, we re-process the revlogs again
751 751 if a.type == OPTIMISATION:
752 752 return True
753 753 elif a.touches_filelogs:
754 754 return True
755 755 for a in self.removed_actions:
756 756 if a.touches_filelogs:
757 757 return True
758 758 return False
759 759
760 760 def _touches_manifests(self):
761 761 for a in self.upgrade_actions:
762 762 # in optimisations, we re-process the revlogs again
763 763 if a.type == OPTIMISATION:
764 764 return True
765 765 elif a.touches_manifests:
766 766 return True
767 767 for a in self.removed_actions:
768 768 if a.touches_manifests:
769 769 return True
770 770 return False
771 771
772 772 def _touches_changelog(self):
773 773 for a in self.upgrade_actions:
774 774 # in optimisations, we re-process the revlogs again
775 775 if a.type == OPTIMISATION:
776 776 return True
777 777 elif a.touches_changelog:
778 778 return True
779 779 for a in self.removed_actions:
780 780 if a.touches_changelog:
781 781 return True
782 782 return False
783 783
784 784 def _touches_requirements(self):
785 785 for a in self.upgrade_actions:
786 786 # optimisations are used to re-process revlogs and does not result
787 787 # in a requirement being added or removed
788 788 if a.type == OPTIMISATION:
789 789 pass
790 790 elif a.touches_requirements:
791 791 return True
792 792 for a in self.removed_actions:
793 793 if a.touches_requirements:
794 794 return True
795 795
796 796 return False
797 797
798 798 def _write_labeled(self, l, label):
799 799 """
800 800 Utility function to aid writing of a list under one label
801 801 """
802 802 first = True
803 803 for r in sorted(l):
804 804 if not first:
805 805 self.ui.write(b', ')
806 806 self.ui.write(r, label=label)
807 807 first = False
808 808
809 809 def print_requirements(self):
810 810 self.ui.write(_(b'requirements\n'))
811 811 self.ui.write(_(b' preserved: '))
812 812 self._write_labeled(
813 813 self._preserved_requirements, "upgrade-repo.requirement.preserved"
814 814 )
815 815 self.ui.write((b'\n'))
816 816 if self._removed_requirements:
817 817 self.ui.write(_(b' removed: '))
818 818 self._write_labeled(
819 819 self._removed_requirements, "upgrade-repo.requirement.removed"
820 820 )
821 821 self.ui.write((b'\n'))
822 822 if self._added_requirements:
823 823 self.ui.write(_(b' added: '))
824 824 self._write_labeled(
825 825 self._added_requirements, "upgrade-repo.requirement.added"
826 826 )
827 827 self.ui.write((b'\n'))
828 828 self.ui.write(b'\n')
829 829
830 830 def print_optimisations(self):
831 831 optimisations = [
832 832 a for a in self.upgrade_actions if a.type == OPTIMISATION
833 833 ]
834 834 optimisations.sort(key=lambda a: a.name)
835 835 if optimisations:
836 836 self.ui.write(_(b'optimisations: '))
837 837 self._write_labeled(
838 838 [a.name for a in optimisations],
839 839 "upgrade-repo.optimisation.performed",
840 840 )
841 841 self.ui.write(b'\n\n')
842 842
843 843 def print_upgrade_actions(self):
844 844 for a in self.upgrade_actions:
845 845 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
846 846
847 847 def print_affected_revlogs(self):
848 848 if not self.revlogs_to_process:
849 849 self.ui.write((b'no revlogs to process\n'))
850 850 else:
851 851 self.ui.write((b'processed revlogs:\n'))
852 852 for r in sorted(self.revlogs_to_process):
853 853 self.ui.write((b' - %s\n' % r))
854 854 self.ui.write((b'\n'))
855 855
856 856 def print_unused_optimizations(self):
857 857 for i in self.unused_optimizations:
858 858 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
859 859
860 860 def has_upgrade_action(self, name):
861 861 """Check whether the upgrade operation will perform this action"""
862 862 return name in self._upgrade_actions_names
863 863
864 864 def print_post_op_messages(self):
865 865 """print post upgrade operation warning messages"""
866 866 for a in self.upgrade_actions:
867 867 if a.postupgrademessage is not None:
868 868 self.ui.warn(b'%s\n' % a.postupgrademessage)
869 869 for a in self.removed_actions:
870 870 if a.postdowngrademessage is not None:
871 871 self.ui.warn(b'%s\n' % a.postdowngrademessage)
872 872
873 873
874 874 ### Code checking if a repository can got through the upgrade process at all. #
875 875
876 876
877 877 def requiredsourcerequirements(repo):
878 878 """Obtain requirements required to be present to upgrade a repo.
879 879
880 880 An upgrade will not be allowed if the repository doesn't have the
881 881 requirements returned by this function.
882 882 """
883 883 return {
884 884 # Introduced in Mercurial 0.9.2.
885 885 requirements.STORE_REQUIREMENT,
886 886 }
887 887
888 888
889 889 def blocksourcerequirements(repo):
890 890 """Obtain requirements that will prevent an upgrade from occurring.
891 891
892 892 An upgrade cannot be performed if the source repository contains a
893 893 requirements in the returned set.
894 894 """
895 895 return {
896 896 # The upgrade code does not yet support these experimental features.
897 897 # This is an artificial limitation.
898 898 requirements.TREEMANIFEST_REQUIREMENT,
899 899 # This was a precursor to generaldelta and was never enabled by default.
900 900 # It should (hopefully) not exist in the wild.
901 901 b'parentdelta',
902 902 # Upgrade should operate on the actual store, not the shared link.
903 903 requirements.SHARED_REQUIREMENT,
904 904 }
905 905
906 906
907 907 def check_revlog_version(reqs):
908 908 """Check that the requirements contain at least one Revlog version"""
909 909 all_revlogs = {
910 910 requirements.REVLOGV1_REQUIREMENT,
911 911 requirements.REVLOGV2_REQUIREMENT,
912 912 }
913 913 if not all_revlogs.intersection(reqs):
914 914 msg = _(b'cannot upgrade repository; missing a revlog version')
915 915 raise error.Abort(msg)
916 916
917 917
918 918 def check_source_requirements(repo):
919 919 """Ensure that no existing requirements prevent the repository upgrade"""
920 920
921 921 check_revlog_version(repo.requirements)
922 922 required = requiredsourcerequirements(repo)
923 923 missingreqs = required - repo.requirements
924 924 if missingreqs:
925 925 msg = _(b'cannot upgrade repository; requirement missing: %s')
926 926 missingreqs = b', '.join(sorted(missingreqs))
927 927 raise error.Abort(msg % missingreqs)
928 928
929 929 blocking = blocksourcerequirements(repo)
930 930 blockingreqs = blocking & repo.requirements
931 931 if blockingreqs:
932 932 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
933 933 blockingreqs = b', '.join(sorted(blockingreqs))
934 934 raise error.Abort(m % blockingreqs)
935 935
936 936
937 937 ### Verify the validity of the planned requirement changes ####################
938 938
939 939
940 940 def supportremovedrequirements(repo):
941 941 """Obtain requirements that can be removed during an upgrade.
942 942
943 943 If an upgrade were to create a repository that dropped a requirement,
944 944 the dropped requirement must appear in the returned set for the upgrade
945 945 to be allowed.
946 946 """
947 947 supported = {
948 948 requirements.SPARSEREVLOG_REQUIREMENT,
949 949 requirements.COPIESSDC_REQUIREMENT,
950 950 requirements.NODEMAP_REQUIREMENT,
951 951 requirements.SHARESAFE_REQUIREMENT,
952 952 requirements.REVLOGV2_REQUIREMENT,
953 953 requirements.CHANGELOGV2_REQUIREMENT,
954 954 requirements.REVLOGV1_REQUIREMENT,
955 955 }
956 956 for name in compression.compengines:
957 957 engine = compression.compengines[name]
958 958 if engine.available() and engine.revlogheader():
959 959 supported.add(b'exp-compression-%s' % name)
960 960 if engine.name() == b'zstd':
961 961 supported.add(b'revlog-compression-zstd')
962 962 return supported
963 963
964 964
965 965 def supporteddestrequirements(repo):
966 966 """Obtain requirements that upgrade supports in the destination.
967 967
968 968 If the result of the upgrade would create requirements not in this set,
969 969 the upgrade is disallowed.
970 970
971 971 Extensions should monkeypatch this to add their custom requirements.
972 972 """
973 973 supported = {
974 974 requirements.DOTENCODE_REQUIREMENT,
975 975 requirements.FNCACHE_REQUIREMENT,
976 976 requirements.GENERALDELTA_REQUIREMENT,
977 977 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
978 978 requirements.STORE_REQUIREMENT,
979 979 requirements.SPARSEREVLOG_REQUIREMENT,
980 980 requirements.COPIESSDC_REQUIREMENT,
981 981 requirements.NODEMAP_REQUIREMENT,
982 982 requirements.SHARESAFE_REQUIREMENT,
983 983 requirements.REVLOGV2_REQUIREMENT,
984 984 requirements.CHANGELOGV2_REQUIREMENT,
985 requirements.DIRSTATE_V2_REQUIREMENT,
985 986 }
986 987 for name in compression.compengines:
987 988 engine = compression.compengines[name]
988 989 if engine.available() and engine.revlogheader():
989 990 supported.add(b'exp-compression-%s' % name)
990 991 if engine.name() == b'zstd':
991 992 supported.add(b'revlog-compression-zstd')
992 993 return supported
993 994
994 995
995 996 def allowednewrequirements(repo):
996 997 """Obtain requirements that can be added to a repository during upgrade.
997 998
998 999 This is used to disallow proposed requirements from being added when
999 1000 they weren't present before.
1000 1001
1001 1002 We use a list of allowed requirement additions instead of a list of known
1002 1003 bad additions because the whitelist approach is safer and will prevent
1003 1004 future, unknown requirements from accidentally being added.
1004 1005 """
1005 1006 supported = {
1006 1007 requirements.DOTENCODE_REQUIREMENT,
1007 1008 requirements.FNCACHE_REQUIREMENT,
1008 1009 requirements.GENERALDELTA_REQUIREMENT,
1009 1010 requirements.SPARSEREVLOG_REQUIREMENT,
1010 1011 requirements.COPIESSDC_REQUIREMENT,
1011 1012 requirements.NODEMAP_REQUIREMENT,
1012 1013 requirements.SHARESAFE_REQUIREMENT,
1013 1014 requirements.REVLOGV1_REQUIREMENT,
1014 1015 requirements.REVLOGV2_REQUIREMENT,
1015 1016 requirements.CHANGELOGV2_REQUIREMENT,
1016 1017 }
1017 1018 for name in compression.compengines:
1018 1019 engine = compression.compengines[name]
1019 1020 if engine.available() and engine.revlogheader():
1020 1021 supported.add(b'exp-compression-%s' % name)
1021 1022 if engine.name() == b'zstd':
1022 1023 supported.add(b'revlog-compression-zstd')
1023 1024 return supported
1024 1025
1025 1026
1026 1027 def check_requirements_changes(repo, new_reqs):
1027 1028 old_reqs = repo.requirements
1028 1029 check_revlog_version(repo.requirements)
1029 1030 support_removal = supportremovedrequirements(repo)
1030 1031 no_remove_reqs = old_reqs - new_reqs - support_removal
1031 1032 if no_remove_reqs:
1032 1033 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1033 1034 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1034 1035 raise error.Abort(msg % no_remove_reqs)
1035 1036
1036 1037 support_addition = allowednewrequirements(repo)
1037 1038 no_add_reqs = new_reqs - old_reqs - support_addition
1038 1039 if no_add_reqs:
1039 1040 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1040 1041 no_add_reqs = b', '.join(sorted(no_add_reqs))
1041 1042 raise error.Abort(m + no_add_reqs)
1042 1043
1043 1044 supported = supporteddestrequirements(repo)
1044 1045 unsupported_reqs = new_reqs - supported
1045 1046 if unsupported_reqs:
1046 1047 msg = _(
1047 1048 b'cannot upgrade repository; do not support destination '
1048 1049 b'requirement: %s'
1049 1050 )
1050 1051 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1051 1052 raise error.Abort(msg % unsupported_reqs)
General Comments 0
You need to be logged in to leave comments. Login now