##// END OF EJS Templates
changelog-v2: add a configuration to disable rank computation...
marmoute -
r50558:45d7b8c3 default
parent child Browse files
Show More
@@ -1,2901 +1,2908 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10 import re
11 11
12 12 from . import (
13 13 encoding,
14 14 error,
15 15 )
16 16
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config=b'warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31
32 32 class configitem:
33 33 """represent a known config item
34 34
35 35 :section: the official config section where to find this item,
36 36 :name: the official name within the section,
37 37 :default: default value for this item,
38 38 :alias: optional list of tuples as alternatives,
39 39 :generic: this is a generic definition, match name using regular expression.
40 40 """
41 41
42 42 def __init__(
43 43 self,
44 44 section,
45 45 name,
46 46 default=None,
47 47 alias=(),
48 48 generic=False,
49 49 priority=0,
50 50 experimental=False,
51 51 ):
52 52 self.section = section
53 53 self.name = name
54 54 self.default = default
55 55 self.alias = list(alias)
56 56 self.generic = generic
57 57 self.priority = priority
58 58 self.experimental = experimental
59 59 self._re = None
60 60 if generic:
61 61 self._re = re.compile(self.name)
62 62
63 63
64 64 class itemregister(dict):
65 65 """A specialized dictionary that can handle wild-card selection"""
66 66
67 67 def __init__(self):
68 68 super(itemregister, self).__init__()
69 69 self._generics = set()
70 70
71 71 def update(self, other):
72 72 super(itemregister, self).update(other)
73 73 self._generics.update(other._generics)
74 74
75 75 def __setitem__(self, key, item):
76 76 super(itemregister, self).__setitem__(key, item)
77 77 if item.generic:
78 78 self._generics.add(item)
79 79
80 80 def get(self, key):
81 81 baseitem = super(itemregister, self).get(key)
82 82 if baseitem is not None and not baseitem.generic:
83 83 return baseitem
84 84
85 85 # search for a matching generic item
86 86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 87 for item in generics:
88 88 # we use 'match' instead of 'search' to make the matching simpler
89 89 # for people unfamiliar with regular expression. Having the match
90 90 # rooted to the start of the string will produce less surprising
91 91 # result for user writing simple regex for sub-attribute.
92 92 #
93 93 # For example using "color\..*" match produces an unsurprising
94 94 # result, while using search could suddenly match apparently
95 95 # unrelated configuration that happens to contains "color."
96 96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 97 # some match to avoid the need to prefix most pattern with "^".
98 98 # The "^" seems more error prone.
99 99 if item._re.match(key):
100 100 return item
101 101
102 102 return None
103 103
104 104
105 105 coreitems = {}
106 106
107 107
108 108 def _register(configtable, *args, **kwargs):
109 109 item = configitem(*args, **kwargs)
110 110 section = configtable.setdefault(item.section, itemregister())
111 111 if item.name in section:
112 112 msg = b"duplicated config item registration for '%s.%s'"
113 113 raise error.ProgrammingError(msg % (item.section, item.name))
114 114 section[item.name] = item
115 115
116 116
117 117 # special value for case where the default is derived from other values
118 118 dynamicdefault = object()
119 119
120 120 # Registering actual config items
121 121
122 122
123 123 def getitemregister(configtable):
124 124 f = functools.partial(_register, configtable)
125 125 # export pseudo enum as configitem.*
126 126 f.dynamicdefault = dynamicdefault
127 127 return f
128 128
129 129
130 130 coreconfigitem = getitemregister(coreitems)
131 131
132 132
133 133 def _registerdiffopts(section, configprefix=b''):
134 134 coreconfigitem(
135 135 section,
136 136 configprefix + b'nodates',
137 137 default=False,
138 138 )
139 139 coreconfigitem(
140 140 section,
141 141 configprefix + b'showfunc',
142 142 default=False,
143 143 )
144 144 coreconfigitem(
145 145 section,
146 146 configprefix + b'unified',
147 147 default=None,
148 148 )
149 149 coreconfigitem(
150 150 section,
151 151 configprefix + b'git',
152 152 default=False,
153 153 )
154 154 coreconfigitem(
155 155 section,
156 156 configprefix + b'ignorews',
157 157 default=False,
158 158 )
159 159 coreconfigitem(
160 160 section,
161 161 configprefix + b'ignorewsamount',
162 162 default=False,
163 163 )
164 164 coreconfigitem(
165 165 section,
166 166 configprefix + b'ignoreblanklines',
167 167 default=False,
168 168 )
169 169 coreconfigitem(
170 170 section,
171 171 configprefix + b'ignorewseol',
172 172 default=False,
173 173 )
174 174 coreconfigitem(
175 175 section,
176 176 configprefix + b'nobinary',
177 177 default=False,
178 178 )
179 179 coreconfigitem(
180 180 section,
181 181 configprefix + b'noprefix',
182 182 default=False,
183 183 )
184 184 coreconfigitem(
185 185 section,
186 186 configprefix + b'word-diff',
187 187 default=False,
188 188 )
189 189
190 190
191 191 coreconfigitem(
192 192 b'alias',
193 193 b'.*',
194 194 default=dynamicdefault,
195 195 generic=True,
196 196 )
197 197 coreconfigitem(
198 198 b'auth',
199 199 b'cookiefile',
200 200 default=None,
201 201 )
202 202 _registerdiffopts(section=b'annotate')
203 203 # bookmarks.pushing: internal hack for discovery
204 204 coreconfigitem(
205 205 b'bookmarks',
206 206 b'pushing',
207 207 default=list,
208 208 )
209 209 # bundle.mainreporoot: internal hack for bundlerepo
210 210 coreconfigitem(
211 211 b'bundle',
212 212 b'mainreporoot',
213 213 default=b'',
214 214 )
215 215 coreconfigitem(
216 216 b'censor',
217 217 b'policy',
218 218 default=b'abort',
219 219 experimental=True,
220 220 )
221 221 coreconfigitem(
222 222 b'chgserver',
223 223 b'idletimeout',
224 224 default=3600,
225 225 )
226 226 coreconfigitem(
227 227 b'chgserver',
228 228 b'skiphash',
229 229 default=False,
230 230 )
231 231 coreconfigitem(
232 232 b'cmdserver',
233 233 b'log',
234 234 default=None,
235 235 )
236 236 coreconfigitem(
237 237 b'cmdserver',
238 238 b'max-log-files',
239 239 default=7,
240 240 )
241 241 coreconfigitem(
242 242 b'cmdserver',
243 243 b'max-log-size',
244 244 default=b'1 MB',
245 245 )
246 246 coreconfigitem(
247 247 b'cmdserver',
248 248 b'max-repo-cache',
249 249 default=0,
250 250 experimental=True,
251 251 )
252 252 coreconfigitem(
253 253 b'cmdserver',
254 254 b'message-encodings',
255 255 default=list,
256 256 )
257 257 coreconfigitem(
258 258 b'cmdserver',
259 259 b'track-log',
260 260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 261 )
262 262 coreconfigitem(
263 263 b'cmdserver',
264 264 b'shutdown-on-interrupt',
265 265 default=True,
266 266 )
267 267 coreconfigitem(
268 268 b'color',
269 269 b'.*',
270 270 default=None,
271 271 generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'color',
275 275 b'mode',
276 276 default=b'auto',
277 277 )
278 278 coreconfigitem(
279 279 b'color',
280 280 b'pagermode',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem(
284 284 b'command-templates',
285 285 b'graphnode',
286 286 default=None,
287 287 alias=[(b'ui', b'graphnodetemplate')],
288 288 )
289 289 coreconfigitem(
290 290 b'command-templates',
291 291 b'log',
292 292 default=None,
293 293 alias=[(b'ui', b'logtemplate')],
294 294 )
295 295 coreconfigitem(
296 296 b'command-templates',
297 297 b'mergemarker',
298 298 default=(
299 299 b'{node|short} '
300 300 b'{ifeq(tags, "tip", "", '
301 301 b'ifeq(tags, "", "", "{tags} "))}'
302 302 b'{if(bookmarks, "{bookmarks} ")}'
303 303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 304 b'- {author|user}: {desc|firstline}'
305 305 ),
306 306 alias=[(b'ui', b'mergemarkertemplate')],
307 307 )
308 308 coreconfigitem(
309 309 b'command-templates',
310 310 b'pre-merge-tool-output',
311 311 default=None,
312 312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 313 )
314 314 coreconfigitem(
315 315 b'command-templates',
316 316 b'oneline-summary',
317 317 default=None,
318 318 )
319 319 coreconfigitem(
320 320 b'command-templates',
321 321 b'oneline-summary.*',
322 322 default=dynamicdefault,
323 323 generic=True,
324 324 )
325 325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 326 coreconfigitem(
327 327 b'commands',
328 328 b'commit.post-status',
329 329 default=False,
330 330 )
331 331 coreconfigitem(
332 332 b'commands',
333 333 b'grep.all-files',
334 334 default=False,
335 335 experimental=True,
336 336 )
337 337 coreconfigitem(
338 338 b'commands',
339 339 b'merge.require-rev',
340 340 default=False,
341 341 )
342 342 coreconfigitem(
343 343 b'commands',
344 344 b'push.require-revs',
345 345 default=False,
346 346 )
347 347 coreconfigitem(
348 348 b'commands',
349 349 b'resolve.confirm',
350 350 default=False,
351 351 )
352 352 coreconfigitem(
353 353 b'commands',
354 354 b'resolve.explicit-re-merge',
355 355 default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'commands',
359 359 b'resolve.mark-check',
360 360 default=b'none',
361 361 )
362 362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 363 coreconfigitem(
364 364 b'commands',
365 365 b'show.aliasprefix',
366 366 default=list,
367 367 )
368 368 coreconfigitem(
369 369 b'commands',
370 370 b'status.relative',
371 371 default=False,
372 372 )
373 373 coreconfigitem(
374 374 b'commands',
375 375 b'status.skipstates',
376 376 default=[],
377 377 experimental=True,
378 378 )
379 379 coreconfigitem(
380 380 b'commands',
381 381 b'status.terse',
382 382 default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'commands',
386 386 b'status.verbose',
387 387 default=False,
388 388 )
389 389 coreconfigitem(
390 390 b'commands',
391 391 b'update.check',
392 392 default=None,
393 393 )
394 394 coreconfigitem(
395 395 b'commands',
396 396 b'update.requiredest',
397 397 default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'committemplate',
401 401 b'.*',
402 402 default=None,
403 403 generic=True,
404 404 )
405 405 coreconfigitem(
406 406 b'convert',
407 407 b'bzr.saverev',
408 408 default=True,
409 409 )
410 410 coreconfigitem(
411 411 b'convert',
412 412 b'cvsps.cache',
413 413 default=True,
414 414 )
415 415 coreconfigitem(
416 416 b'convert',
417 417 b'cvsps.fuzz',
418 418 default=60,
419 419 )
420 420 coreconfigitem(
421 421 b'convert',
422 422 b'cvsps.logencoding',
423 423 default=None,
424 424 )
425 425 coreconfigitem(
426 426 b'convert',
427 427 b'cvsps.mergefrom',
428 428 default=None,
429 429 )
430 430 coreconfigitem(
431 431 b'convert',
432 432 b'cvsps.mergeto',
433 433 default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'convert',
437 437 b'git.committeractions',
438 438 default=lambda: [b'messagedifferent'],
439 439 )
440 440 coreconfigitem(
441 441 b'convert',
442 442 b'git.extrakeys',
443 443 default=list,
444 444 )
445 445 coreconfigitem(
446 446 b'convert',
447 447 b'git.findcopiesharder',
448 448 default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'convert',
452 452 b'git.remoteprefix',
453 453 default=b'remote',
454 454 )
455 455 coreconfigitem(
456 456 b'convert',
457 457 b'git.renamelimit',
458 458 default=400,
459 459 )
460 460 coreconfigitem(
461 461 b'convert',
462 462 b'git.saverev',
463 463 default=True,
464 464 )
465 465 coreconfigitem(
466 466 b'convert',
467 467 b'git.similarity',
468 468 default=50,
469 469 )
470 470 coreconfigitem(
471 471 b'convert',
472 472 b'git.skipsubmodules',
473 473 default=False,
474 474 )
475 475 coreconfigitem(
476 476 b'convert',
477 477 b'hg.clonebranches',
478 478 default=False,
479 479 )
480 480 coreconfigitem(
481 481 b'convert',
482 482 b'hg.ignoreerrors',
483 483 default=False,
484 484 )
485 485 coreconfigitem(
486 486 b'convert',
487 487 b'hg.preserve-hash',
488 488 default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'convert',
492 492 b'hg.revs',
493 493 default=None,
494 494 )
495 495 coreconfigitem(
496 496 b'convert',
497 497 b'hg.saverev',
498 498 default=False,
499 499 )
500 500 coreconfigitem(
501 501 b'convert',
502 502 b'hg.sourcename',
503 503 default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'convert',
507 507 b'hg.startrev',
508 508 default=None,
509 509 )
510 510 coreconfigitem(
511 511 b'convert',
512 512 b'hg.tagsbranch',
513 513 default=b'default',
514 514 )
515 515 coreconfigitem(
516 516 b'convert',
517 517 b'hg.usebranchnames',
518 518 default=True,
519 519 )
520 520 coreconfigitem(
521 521 b'convert',
522 522 b'ignoreancestorcheck',
523 523 default=False,
524 524 experimental=True,
525 525 )
526 526 coreconfigitem(
527 527 b'convert',
528 528 b'localtimezone',
529 529 default=False,
530 530 )
531 531 coreconfigitem(
532 532 b'convert',
533 533 b'p4.encoding',
534 534 default=dynamicdefault,
535 535 )
536 536 coreconfigitem(
537 537 b'convert',
538 538 b'p4.startrev',
539 539 default=0,
540 540 )
541 541 coreconfigitem(
542 542 b'convert',
543 543 b'skiptags',
544 544 default=False,
545 545 )
546 546 coreconfigitem(
547 547 b'convert',
548 548 b'svn.debugsvnlog',
549 549 default=True,
550 550 )
551 551 coreconfigitem(
552 552 b'convert',
553 553 b'svn.trunk',
554 554 default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'convert',
558 558 b'svn.tags',
559 559 default=None,
560 560 )
561 561 coreconfigitem(
562 562 b'convert',
563 563 b'svn.branches',
564 564 default=None,
565 565 )
566 566 coreconfigitem(
567 567 b'convert',
568 568 b'svn.startrev',
569 569 default=0,
570 570 )
571 571 coreconfigitem(
572 572 b'convert',
573 573 b'svn.dangerous-set-commit-dates',
574 574 default=False,
575 575 )
576 576 coreconfigitem(
577 577 b'debug',
578 578 b'dirstate.delaywrite',
579 579 default=0,
580 580 )
581 581 coreconfigitem(
582 582 b'debug',
583 583 b'revlog.verifyposition.changelog',
584 584 default=b'',
585 585 )
586 586 coreconfigitem(
587 587 b'debug',
588 588 b'revlog.debug-delta',
589 589 default=False,
590 590 )
591 591 # display extra information about the bundling process
592 592 coreconfigitem(
593 593 b'debug',
594 594 b'bundling-stats',
595 595 default=False,
596 596 )
597 597 # display extra information about the unbundling process
598 598 coreconfigitem(
599 599 b'debug',
600 600 b'unbundling-stats',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'defaults',
605 605 b'.*',
606 606 default=None,
607 607 generic=True,
608 608 )
609 609 coreconfigitem(
610 610 b'devel',
611 611 b'all-warnings',
612 612 default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'devel',
616 616 b'bundle2.debug',
617 617 default=False,
618 618 )
619 619 coreconfigitem(
620 620 b'devel',
621 621 b'bundle.delta',
622 622 default=b'',
623 623 )
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'cache-vfs',
627 627 default=None,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'check-locks',
632 632 default=False,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'check-relroot',
637 637 default=False,
638 638 )
639 639 # Track copy information for all file, not just "added" one (very slow)
640 640 coreconfigitem(
641 641 b'devel',
642 642 b'copy-tracing.trace-all-files',
643 643 default=False,
644 644 )
645 645 coreconfigitem(
646 646 b'devel',
647 647 b'default-date',
648 648 default=None,
649 649 )
650 650 coreconfigitem(
651 651 b'devel',
652 652 b'deprec-warn',
653 653 default=False,
654 654 )
655 655 coreconfigitem(
656 656 b'devel',
657 657 b'disableloaddefaultcerts',
658 658 default=False,
659 659 )
660 660 coreconfigitem(
661 661 b'devel',
662 662 b'warn-empty-changegroup',
663 663 default=False,
664 664 )
665 665 coreconfigitem(
666 666 b'devel',
667 667 b'legacy.exchange',
668 668 default=list,
669 669 )
670 670 # When True, revlogs use a special reference version of the nodemap, that is not
671 671 # performant but is "known" to behave properly.
672 672 coreconfigitem(
673 673 b'devel',
674 674 b'persistent-nodemap',
675 675 default=False,
676 676 )
677 677 coreconfigitem(
678 678 b'devel',
679 679 b'servercafile',
680 680 default=b'',
681 681 )
682 682 coreconfigitem(
683 683 b'devel',
684 684 b'serverexactprotocol',
685 685 default=b'',
686 686 )
687 687 coreconfigitem(
688 688 b'devel',
689 689 b'serverrequirecert',
690 690 default=False,
691 691 )
692 692 coreconfigitem(
693 693 b'devel',
694 694 b'strip-obsmarkers',
695 695 default=True,
696 696 )
697 697 coreconfigitem(
698 698 b'devel',
699 699 b'warn-config',
700 700 default=None,
701 701 )
702 702 coreconfigitem(
703 703 b'devel',
704 704 b'warn-config-default',
705 705 default=None,
706 706 )
707 707 coreconfigitem(
708 708 b'devel',
709 709 b'user.obsmarker',
710 710 default=None,
711 711 )
712 712 coreconfigitem(
713 713 b'devel',
714 714 b'warn-config-unknown',
715 715 default=None,
716 716 )
717 717 coreconfigitem(
718 718 b'devel',
719 719 b'debug.copies',
720 720 default=False,
721 721 )
722 722 coreconfigitem(
723 723 b'devel',
724 724 b'copy-tracing.multi-thread',
725 725 default=True,
726 726 )
727 727 coreconfigitem(
728 728 b'devel',
729 729 b'debug.extensions',
730 730 default=False,
731 731 )
732 732 coreconfigitem(
733 733 b'devel',
734 734 b'debug.repo-filters',
735 735 default=False,
736 736 )
737 737 coreconfigitem(
738 738 b'devel',
739 739 b'debug.peer-request',
740 740 default=False,
741 741 )
742 742 # If discovery.exchange-heads is False, the discovery will not start with
743 743 # remote head fetching and local head querying.
744 744 coreconfigitem(
745 745 b'devel',
746 746 b'discovery.exchange-heads',
747 747 default=True,
748 748 )
749 749 # If discovery.grow-sample is False, the sample size used in set discovery will
750 750 # not be increased through the process
751 751 coreconfigitem(
752 752 b'devel',
753 753 b'discovery.grow-sample',
754 754 default=True,
755 755 )
756 756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
757 757 # adapted to the shape of the undecided set (it is set to the max of:
758 758 # <target-size>, len(roots(undecided)), len(heads(undecided)
759 759 coreconfigitem(
760 760 b'devel',
761 761 b'discovery.grow-sample.dynamic',
762 762 default=True,
763 763 )
764 764 # discovery.grow-sample.rate control the rate at which the sample grow
765 765 coreconfigitem(
766 766 b'devel',
767 767 b'discovery.grow-sample.rate',
768 768 default=1.05,
769 769 )
770 770 # If discovery.randomize is False, random sampling during discovery are
771 771 # deterministic. It is meant for integration tests.
772 772 coreconfigitem(
773 773 b'devel',
774 774 b'discovery.randomize',
775 775 default=True,
776 776 )
777 777 # Control the initial size of the discovery sample
778 778 coreconfigitem(
779 779 b'devel',
780 780 b'discovery.sample-size',
781 781 default=200,
782 782 )
783 783 # Control the initial size of the discovery for initial change
784 784 coreconfigitem(
785 785 b'devel',
786 786 b'discovery.sample-size.initial',
787 787 default=100,
788 788 )
789 789 _registerdiffopts(section=b'diff')
790 790 coreconfigitem(
791 791 b'diff',
792 792 b'merge',
793 793 default=False,
794 794 experimental=True,
795 795 )
796 796 coreconfigitem(
797 797 b'email',
798 798 b'bcc',
799 799 default=None,
800 800 )
801 801 coreconfigitem(
802 802 b'email',
803 803 b'cc',
804 804 default=None,
805 805 )
806 806 coreconfigitem(
807 807 b'email',
808 808 b'charsets',
809 809 default=list,
810 810 )
811 811 coreconfigitem(
812 812 b'email',
813 813 b'from',
814 814 default=None,
815 815 )
816 816 coreconfigitem(
817 817 b'email',
818 818 b'method',
819 819 default=b'smtp',
820 820 )
821 821 coreconfigitem(
822 822 b'email',
823 823 b'reply-to',
824 824 default=None,
825 825 )
826 826 coreconfigitem(
827 827 b'email',
828 828 b'to',
829 829 default=None,
830 830 )
831 831 coreconfigitem(
832 832 b'experimental',
833 833 b'archivemetatemplate',
834 834 default=dynamicdefault,
835 835 )
836 836 coreconfigitem(
837 837 b'experimental',
838 838 b'auto-publish',
839 839 default=b'publish',
840 840 )
841 841 coreconfigitem(
842 842 b'experimental',
843 843 b'bundle-phases',
844 844 default=False,
845 845 )
846 846 coreconfigitem(
847 847 b'experimental',
848 848 b'bundle2-advertise',
849 849 default=True,
850 850 )
851 851 coreconfigitem(
852 852 b'experimental',
853 853 b'bundle2-output-capture',
854 854 default=False,
855 855 )
856 856 coreconfigitem(
857 857 b'experimental',
858 858 b'bundle2.pushback',
859 859 default=False,
860 860 )
861 861 coreconfigitem(
862 862 b'experimental',
863 863 b'bundle2lazylocking',
864 864 default=False,
865 865 )
866 866 coreconfigitem(
867 867 b'experimental',
868 868 b'bundlecomplevel',
869 869 default=None,
870 870 )
871 871 coreconfigitem(
872 872 b'experimental',
873 873 b'bundlecomplevel.bzip2',
874 874 default=None,
875 875 )
876 876 coreconfigitem(
877 877 b'experimental',
878 878 b'bundlecomplevel.gzip',
879 879 default=None,
880 880 )
881 881 coreconfigitem(
882 882 b'experimental',
883 883 b'bundlecomplevel.none',
884 884 default=None,
885 885 )
886 886 coreconfigitem(
887 887 b'experimental',
888 888 b'bundlecomplevel.zstd',
889 889 default=None,
890 890 )
891 891 coreconfigitem(
892 892 b'experimental',
893 893 b'bundlecompthreads',
894 894 default=None,
895 895 )
896 896 coreconfigitem(
897 897 b'experimental',
898 898 b'bundlecompthreads.bzip2',
899 899 default=None,
900 900 )
901 901 coreconfigitem(
902 902 b'experimental',
903 903 b'bundlecompthreads.gzip',
904 904 default=None,
905 905 )
906 906 coreconfigitem(
907 907 b'experimental',
908 908 b'bundlecompthreads.none',
909 909 default=None,
910 910 )
911 911 coreconfigitem(
912 912 b'experimental',
913 913 b'bundlecompthreads.zstd',
914 914 default=None,
915 915 )
916 916 coreconfigitem(
917 917 b'experimental',
918 918 b'changegroup3',
919 919 default=False,
920 920 )
921 921 coreconfigitem(
922 922 b'experimental',
923 923 b'changegroup4',
924 924 default=False,
925 925 )
926
927 # might remove rank configuration once the computation has no impact
928 coreconfigitem(
929 b'experimental',
930 b'changelog-v2.compute-rank',
931 default=True,
932 )
926 933 coreconfigitem(
927 934 b'experimental',
928 935 b'cleanup-as-archived',
929 936 default=False,
930 937 )
931 938 coreconfigitem(
932 939 b'experimental',
933 940 b'clientcompressionengines',
934 941 default=list,
935 942 )
936 943 coreconfigitem(
937 944 b'experimental',
938 945 b'copytrace',
939 946 default=b'on',
940 947 )
941 948 coreconfigitem(
942 949 b'experimental',
943 950 b'copytrace.movecandidateslimit',
944 951 default=100,
945 952 )
946 953 coreconfigitem(
947 954 b'experimental',
948 955 b'copytrace.sourcecommitlimit',
949 956 default=100,
950 957 )
951 958 coreconfigitem(
952 959 b'experimental',
953 960 b'copies.read-from',
954 961 default=b"filelog-only",
955 962 )
956 963 coreconfigitem(
957 964 b'experimental',
958 965 b'copies.write-to',
959 966 default=b'filelog-only',
960 967 )
961 968 coreconfigitem(
962 969 b'experimental',
963 970 b'crecordtest',
964 971 default=None,
965 972 )
966 973 coreconfigitem(
967 974 b'experimental',
968 975 b'directaccess',
969 976 default=False,
970 977 )
971 978 coreconfigitem(
972 979 b'experimental',
973 980 b'directaccess.revnums',
974 981 default=False,
975 982 )
976 983 coreconfigitem(
977 984 b'experimental',
978 985 b'editortmpinhg',
979 986 default=False,
980 987 )
981 988 coreconfigitem(
982 989 b'experimental',
983 990 b'evolution',
984 991 default=list,
985 992 )
986 993 coreconfigitem(
987 994 b'experimental',
988 995 b'evolution.allowdivergence',
989 996 default=False,
990 997 alias=[(b'experimental', b'allowdivergence')],
991 998 )
992 999 coreconfigitem(
993 1000 b'experimental',
994 1001 b'evolution.allowunstable',
995 1002 default=None,
996 1003 )
997 1004 coreconfigitem(
998 1005 b'experimental',
999 1006 b'evolution.createmarkers',
1000 1007 default=None,
1001 1008 )
1002 1009 coreconfigitem(
1003 1010 b'experimental',
1004 1011 b'evolution.effect-flags',
1005 1012 default=True,
1006 1013 alias=[(b'experimental', b'effect-flags')],
1007 1014 )
1008 1015 coreconfigitem(
1009 1016 b'experimental',
1010 1017 b'evolution.exchange',
1011 1018 default=None,
1012 1019 )
1013 1020 coreconfigitem(
1014 1021 b'experimental',
1015 1022 b'evolution.bundle-obsmarker',
1016 1023 default=False,
1017 1024 )
1018 1025 coreconfigitem(
1019 1026 b'experimental',
1020 1027 b'evolution.bundle-obsmarker:mandatory',
1021 1028 default=True,
1022 1029 )
1023 1030 coreconfigitem(
1024 1031 b'experimental',
1025 1032 b'log.topo',
1026 1033 default=False,
1027 1034 )
1028 1035 coreconfigitem(
1029 1036 b'experimental',
1030 1037 b'evolution.report-instabilities',
1031 1038 default=True,
1032 1039 )
1033 1040 coreconfigitem(
1034 1041 b'experimental',
1035 1042 b'evolution.track-operation',
1036 1043 default=True,
1037 1044 )
1038 1045 # repo-level config to exclude a revset visibility
1039 1046 #
1040 1047 # The target use case is to use `share` to expose different subset of the same
1041 1048 # repository, especially server side. See also `server.view`.
1042 1049 coreconfigitem(
1043 1050 b'experimental',
1044 1051 b'extra-filter-revs',
1045 1052 default=None,
1046 1053 )
1047 1054 coreconfigitem(
1048 1055 b'experimental',
1049 1056 b'maxdeltachainspan',
1050 1057 default=-1,
1051 1058 )
1052 1059 # tracks files which were undeleted (merge might delete them but we explicitly
1053 1060 # kept/undeleted them) and creates new filenodes for them
1054 1061 coreconfigitem(
1055 1062 b'experimental',
1056 1063 b'merge-track-salvaged',
1057 1064 default=False,
1058 1065 )
1059 1066 coreconfigitem(
1060 1067 b'experimental',
1061 1068 b'mmapindexthreshold',
1062 1069 default=None,
1063 1070 )
1064 1071 coreconfigitem(
1065 1072 b'experimental',
1066 1073 b'narrow',
1067 1074 default=False,
1068 1075 )
1069 1076 coreconfigitem(
1070 1077 b'experimental',
1071 1078 b'nonnormalparanoidcheck',
1072 1079 default=False,
1073 1080 )
1074 1081 coreconfigitem(
1075 1082 b'experimental',
1076 1083 b'exportableenviron',
1077 1084 default=list,
1078 1085 )
1079 1086 coreconfigitem(
1080 1087 b'experimental',
1081 1088 b'extendedheader.index',
1082 1089 default=None,
1083 1090 )
1084 1091 coreconfigitem(
1085 1092 b'experimental',
1086 1093 b'extendedheader.similarity',
1087 1094 default=False,
1088 1095 )
1089 1096 coreconfigitem(
1090 1097 b'experimental',
1091 1098 b'graphshorten',
1092 1099 default=False,
1093 1100 )
1094 1101 coreconfigitem(
1095 1102 b'experimental',
1096 1103 b'graphstyle.parent',
1097 1104 default=dynamicdefault,
1098 1105 )
1099 1106 coreconfigitem(
1100 1107 b'experimental',
1101 1108 b'graphstyle.missing',
1102 1109 default=dynamicdefault,
1103 1110 )
1104 1111 coreconfigitem(
1105 1112 b'experimental',
1106 1113 b'graphstyle.grandparent',
1107 1114 default=dynamicdefault,
1108 1115 )
1109 1116 coreconfigitem(
1110 1117 b'experimental',
1111 1118 b'hook-track-tags',
1112 1119 default=False,
1113 1120 )
1114 1121 coreconfigitem(
1115 1122 b'experimental',
1116 1123 b'httppostargs',
1117 1124 default=False,
1118 1125 )
1119 1126 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1120 1127 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1121 1128
1122 1129 coreconfigitem(
1123 1130 b'experimental',
1124 1131 b'obsmarkers-exchange-debug',
1125 1132 default=False,
1126 1133 )
1127 1134 coreconfigitem(
1128 1135 b'experimental',
1129 1136 b'remotenames',
1130 1137 default=False,
1131 1138 )
1132 1139 coreconfigitem(
1133 1140 b'experimental',
1134 1141 b'removeemptydirs',
1135 1142 default=True,
1136 1143 )
1137 1144 coreconfigitem(
1138 1145 b'experimental',
1139 1146 b'revert.interactive.select-to-keep',
1140 1147 default=False,
1141 1148 )
1142 1149 coreconfigitem(
1143 1150 b'experimental',
1144 1151 b'revisions.prefixhexnode',
1145 1152 default=False,
1146 1153 )
1147 1154 # "out of experimental" todo list.
1148 1155 #
1149 1156 # * include management of a persistent nodemap in the main docket
1150 1157 # * enforce a "no-truncate" policy for mmap safety
1151 1158 # - for censoring operation
1152 1159 # - for stripping operation
1153 1160 # - for rollback operation
1154 1161 # * proper streaming (race free) of the docket file
1155 1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1156 1163 # * Exchange-wise, we will also need to do something more efficient than
1157 1164 # keeping references to the affected revlogs, especially memory-wise when
1158 1165 # rewriting sidedata.
1159 1166 # * introduce a proper solution to reduce the number of filelog related files.
1160 1167 # * use caching for reading sidedata (similar to what we do for data).
1161 1168 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1162 1169 # * Improvement to consider
1163 1170 # - avoid compression header in chunk using the default compression?
1164 1171 # - forbid "inline" compression mode entirely?
1165 1172 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1166 1173 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1167 1174 # - keep track of chain base or size (probably not that useful anymore)
1168 1175 coreconfigitem(
1169 1176 b'experimental',
1170 1177 b'revlogv2',
1171 1178 default=None,
1172 1179 )
1173 1180 coreconfigitem(
1174 1181 b'experimental',
1175 1182 b'revisions.disambiguatewithin',
1176 1183 default=None,
1177 1184 )
1178 1185 coreconfigitem(
1179 1186 b'experimental',
1180 1187 b'rust.index',
1181 1188 default=False,
1182 1189 )
1183 1190 coreconfigitem(
1184 1191 b'experimental',
1185 1192 b'server.filesdata.recommended-batch-size',
1186 1193 default=50000,
1187 1194 )
1188 1195 coreconfigitem(
1189 1196 b'experimental',
1190 1197 b'server.manifestdata.recommended-batch-size',
1191 1198 default=100000,
1192 1199 )
1193 1200 coreconfigitem(
1194 1201 b'experimental',
1195 1202 b'server.stream-narrow-clones',
1196 1203 default=False,
1197 1204 )
1198 1205 coreconfigitem(
1199 1206 b'experimental',
1200 1207 b'single-head-per-branch',
1201 1208 default=False,
1202 1209 )
1203 1210 coreconfigitem(
1204 1211 b'experimental',
1205 1212 b'single-head-per-branch:account-closed-heads',
1206 1213 default=False,
1207 1214 )
1208 1215 coreconfigitem(
1209 1216 b'experimental',
1210 1217 b'single-head-per-branch:public-changes-only',
1211 1218 default=False,
1212 1219 )
1213 1220 coreconfigitem(
1214 1221 b'experimental',
1215 1222 b'sparse-read',
1216 1223 default=False,
1217 1224 )
1218 1225 coreconfigitem(
1219 1226 b'experimental',
1220 1227 b'sparse-read.density-threshold',
1221 1228 default=0.50,
1222 1229 )
1223 1230 coreconfigitem(
1224 1231 b'experimental',
1225 1232 b'sparse-read.min-gap-size',
1226 1233 default=b'65K',
1227 1234 )
1228 1235 coreconfigitem(
1229 1236 b'experimental',
1230 1237 b'treemanifest',
1231 1238 default=False,
1232 1239 )
1233 1240 coreconfigitem(
1234 1241 b'experimental',
1235 1242 b'update.atomic-file',
1236 1243 default=False,
1237 1244 )
1238 1245 coreconfigitem(
1239 1246 b'experimental',
1240 1247 b'web.full-garbage-collection-rate',
1241 1248 default=1, # still forcing a full collection on each request
1242 1249 )
1243 1250 coreconfigitem(
1244 1251 b'experimental',
1245 1252 b'worker.wdir-get-thread-safe',
1246 1253 default=False,
1247 1254 )
1248 1255 coreconfigitem(
1249 1256 b'experimental',
1250 1257 b'worker.repository-upgrade',
1251 1258 default=False,
1252 1259 )
1253 1260 coreconfigitem(
1254 1261 b'experimental',
1255 1262 b'xdiff',
1256 1263 default=False,
1257 1264 )
1258 1265 coreconfigitem(
1259 1266 b'extensions',
1260 1267 b'[^:]*',
1261 1268 default=None,
1262 1269 generic=True,
1263 1270 )
1264 1271 coreconfigitem(
1265 1272 b'extensions',
1266 1273 b'[^:]*:required',
1267 1274 default=False,
1268 1275 generic=True,
1269 1276 )
1270 1277 coreconfigitem(
1271 1278 b'extdata',
1272 1279 b'.*',
1273 1280 default=None,
1274 1281 generic=True,
1275 1282 )
1276 1283 coreconfigitem(
1277 1284 b'format',
1278 1285 b'bookmarks-in-store',
1279 1286 default=False,
1280 1287 )
1281 1288 coreconfigitem(
1282 1289 b'format',
1283 1290 b'chunkcachesize',
1284 1291 default=None,
1285 1292 experimental=True,
1286 1293 )
1287 1294 coreconfigitem(
1288 1295 # Enable this dirstate format *when creating a new repository*.
1289 1296 # Which format to use for existing repos is controlled by .hg/requires
1290 1297 b'format',
1291 1298 b'use-dirstate-v2',
1292 1299 default=False,
1293 1300 experimental=True,
1294 1301 alias=[(b'format', b'exp-rc-dirstate-v2')],
1295 1302 )
1296 1303 coreconfigitem(
1297 1304 b'format',
1298 1305 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1299 1306 default=False,
1300 1307 experimental=True,
1301 1308 )
1302 1309 coreconfigitem(
1303 1310 b'format',
1304 1311 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1305 1312 default=False,
1306 1313 experimental=True,
1307 1314 )
1308 1315 coreconfigitem(
1309 1316 b'format',
1310 1317 b'use-dirstate-tracked-hint',
1311 1318 default=False,
1312 1319 experimental=True,
1313 1320 )
1314 1321 coreconfigitem(
1315 1322 b'format',
1316 1323 b'use-dirstate-tracked-hint.version',
1317 1324 default=1,
1318 1325 experimental=True,
1319 1326 )
1320 1327 coreconfigitem(
1321 1328 b'format',
1322 1329 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1323 1330 default=False,
1324 1331 experimental=True,
1325 1332 )
1326 1333 coreconfigitem(
1327 1334 b'format',
1328 1335 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1329 1336 default=False,
1330 1337 experimental=True,
1331 1338 )
1332 1339 coreconfigitem(
1333 1340 b'format',
1334 1341 b'dotencode',
1335 1342 default=True,
1336 1343 )
1337 1344 coreconfigitem(
1338 1345 b'format',
1339 1346 b'generaldelta',
1340 1347 default=False,
1341 1348 experimental=True,
1342 1349 )
1343 1350 coreconfigitem(
1344 1351 b'format',
1345 1352 b'manifestcachesize',
1346 1353 default=None,
1347 1354 experimental=True,
1348 1355 )
1349 1356 coreconfigitem(
1350 1357 b'format',
1351 1358 b'maxchainlen',
1352 1359 default=dynamicdefault,
1353 1360 experimental=True,
1354 1361 )
1355 1362 coreconfigitem(
1356 1363 b'format',
1357 1364 b'obsstore-version',
1358 1365 default=None,
1359 1366 )
1360 1367 coreconfigitem(
1361 1368 b'format',
1362 1369 b'sparse-revlog',
1363 1370 default=True,
1364 1371 )
1365 1372 coreconfigitem(
1366 1373 b'format',
1367 1374 b'revlog-compression',
1368 1375 default=lambda: [b'zstd', b'zlib'],
1369 1376 alias=[(b'experimental', b'format.compression')],
1370 1377 )
1371 1378 # Experimental TODOs:
1372 1379 #
1373 1380 # * Same as for revlogv2 (but for the reduction of the number of files)
1374 1381 # * Actually computing the rank of changesets
1375 1382 # * Improvement to investigate
1376 1383 # - storing .hgtags fnode
1377 1384 # - storing branch related identifier
1378 1385
1379 1386 coreconfigitem(
1380 1387 b'format',
1381 1388 b'exp-use-changelog-v2',
1382 1389 default=None,
1383 1390 experimental=True,
1384 1391 )
1385 1392 coreconfigitem(
1386 1393 b'format',
1387 1394 b'usefncache',
1388 1395 default=True,
1389 1396 )
1390 1397 coreconfigitem(
1391 1398 b'format',
1392 1399 b'usegeneraldelta',
1393 1400 default=True,
1394 1401 )
1395 1402 coreconfigitem(
1396 1403 b'format',
1397 1404 b'usestore',
1398 1405 default=True,
1399 1406 )
1400 1407
1401 1408
1402 1409 def _persistent_nodemap_default():
1403 1410 """compute `use-persistent-nodemap` default value
1404 1411
1405 1412 The feature is disabled unless a fast implementation is available.
1406 1413 """
1407 1414 from . import policy
1408 1415
1409 1416 return policy.importrust('revlog') is not None
1410 1417
1411 1418
1412 1419 coreconfigitem(
1413 1420 b'format',
1414 1421 b'use-persistent-nodemap',
1415 1422 default=_persistent_nodemap_default,
1416 1423 )
1417 1424 coreconfigitem(
1418 1425 b'format',
1419 1426 b'exp-use-copies-side-data-changeset',
1420 1427 default=False,
1421 1428 experimental=True,
1422 1429 )
1423 1430 coreconfigitem(
1424 1431 b'format',
1425 1432 b'use-share-safe',
1426 1433 default=True,
1427 1434 )
1428 1435 coreconfigitem(
1429 1436 b'format',
1430 1437 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1431 1438 default=False,
1432 1439 experimental=True,
1433 1440 )
1434 1441 coreconfigitem(
1435 1442 b'format',
1436 1443 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1437 1444 default=False,
1438 1445 experimental=True,
1439 1446 )
1440 1447
1441 1448 # Moving this on by default means we are confident about the scaling of phases.
1442 1449 # This is not garanteed to be the case at the time this message is written.
1443 1450 coreconfigitem(
1444 1451 b'format',
1445 1452 b'use-internal-phase',
1446 1453 default=False,
1447 1454 experimental=True,
1448 1455 )
1449 1456 # The interaction between the archived phase and obsolescence markers needs to
1450 1457 # be sorted out before wider usage of this are to be considered.
1451 1458 #
1452 1459 # At the time this message is written, behavior when archiving obsolete
1453 1460 # changeset differ significantly from stripping. As part of stripping, we also
1454 1461 # remove the obsolescence marker associated to the stripped changesets,
1455 1462 # revealing the precedecessors changesets when applicable. When archiving, we
1456 1463 # don't touch the obsolescence markers, keeping everything hidden. This can
1457 1464 # result in quite confusing situation for people combining exchanging draft
1458 1465 # with the archived phases. As some markers needed by others may be skipped
1459 1466 # during exchange.
1460 1467 coreconfigitem(
1461 1468 b'format',
1462 1469 b'exp-archived-phase',
1463 1470 default=False,
1464 1471 experimental=True,
1465 1472 )
1466 1473 coreconfigitem(
1467 1474 b'shelve',
1468 1475 b'store',
1469 1476 default=b'internal',
1470 1477 experimental=True,
1471 1478 )
1472 1479 coreconfigitem(
1473 1480 b'fsmonitor',
1474 1481 b'warn_when_unused',
1475 1482 default=True,
1476 1483 )
1477 1484 coreconfigitem(
1478 1485 b'fsmonitor',
1479 1486 b'warn_update_file_count',
1480 1487 default=50000,
1481 1488 )
1482 1489 coreconfigitem(
1483 1490 b'fsmonitor',
1484 1491 b'warn_update_file_count_rust',
1485 1492 default=400000,
1486 1493 )
1487 1494 coreconfigitem(
1488 1495 b'help',
1489 1496 br'hidden-command\..*',
1490 1497 default=False,
1491 1498 generic=True,
1492 1499 )
1493 1500 coreconfigitem(
1494 1501 b'help',
1495 1502 br'hidden-topic\..*',
1496 1503 default=False,
1497 1504 generic=True,
1498 1505 )
1499 1506 coreconfigitem(
1500 1507 b'hooks',
1501 1508 b'[^:]*',
1502 1509 default=dynamicdefault,
1503 1510 generic=True,
1504 1511 )
1505 1512 coreconfigitem(
1506 1513 b'hooks',
1507 1514 b'.*:run-with-plain',
1508 1515 default=True,
1509 1516 generic=True,
1510 1517 )
1511 1518 coreconfigitem(
1512 1519 b'hgweb-paths',
1513 1520 b'.*',
1514 1521 default=list,
1515 1522 generic=True,
1516 1523 )
1517 1524 coreconfigitem(
1518 1525 b'hostfingerprints',
1519 1526 b'.*',
1520 1527 default=list,
1521 1528 generic=True,
1522 1529 )
1523 1530 coreconfigitem(
1524 1531 b'hostsecurity',
1525 1532 b'ciphers',
1526 1533 default=None,
1527 1534 )
1528 1535 coreconfigitem(
1529 1536 b'hostsecurity',
1530 1537 b'minimumprotocol',
1531 1538 default=dynamicdefault,
1532 1539 )
1533 1540 coreconfigitem(
1534 1541 b'hostsecurity',
1535 1542 b'.*:minimumprotocol$',
1536 1543 default=dynamicdefault,
1537 1544 generic=True,
1538 1545 )
1539 1546 coreconfigitem(
1540 1547 b'hostsecurity',
1541 1548 b'.*:ciphers$',
1542 1549 default=dynamicdefault,
1543 1550 generic=True,
1544 1551 )
1545 1552 coreconfigitem(
1546 1553 b'hostsecurity',
1547 1554 b'.*:fingerprints$',
1548 1555 default=list,
1549 1556 generic=True,
1550 1557 )
1551 1558 coreconfigitem(
1552 1559 b'hostsecurity',
1553 1560 b'.*:verifycertsfile$',
1554 1561 default=None,
1555 1562 generic=True,
1556 1563 )
1557 1564
1558 1565 coreconfigitem(
1559 1566 b'http_proxy',
1560 1567 b'always',
1561 1568 default=False,
1562 1569 )
1563 1570 coreconfigitem(
1564 1571 b'http_proxy',
1565 1572 b'host',
1566 1573 default=None,
1567 1574 )
1568 1575 coreconfigitem(
1569 1576 b'http_proxy',
1570 1577 b'no',
1571 1578 default=list,
1572 1579 )
1573 1580 coreconfigitem(
1574 1581 b'http_proxy',
1575 1582 b'passwd',
1576 1583 default=None,
1577 1584 )
1578 1585 coreconfigitem(
1579 1586 b'http_proxy',
1580 1587 b'user',
1581 1588 default=None,
1582 1589 )
1583 1590
1584 1591 coreconfigitem(
1585 1592 b'http',
1586 1593 b'timeout',
1587 1594 default=None,
1588 1595 )
1589 1596
1590 1597 coreconfigitem(
1591 1598 b'logtoprocess',
1592 1599 b'commandexception',
1593 1600 default=None,
1594 1601 )
1595 1602 coreconfigitem(
1596 1603 b'logtoprocess',
1597 1604 b'commandfinish',
1598 1605 default=None,
1599 1606 )
1600 1607 coreconfigitem(
1601 1608 b'logtoprocess',
1602 1609 b'command',
1603 1610 default=None,
1604 1611 )
1605 1612 coreconfigitem(
1606 1613 b'logtoprocess',
1607 1614 b'develwarn',
1608 1615 default=None,
1609 1616 )
1610 1617 coreconfigitem(
1611 1618 b'logtoprocess',
1612 1619 b'uiblocked',
1613 1620 default=None,
1614 1621 )
1615 1622 coreconfigitem(
1616 1623 b'merge',
1617 1624 b'checkunknown',
1618 1625 default=b'abort',
1619 1626 )
1620 1627 coreconfigitem(
1621 1628 b'merge',
1622 1629 b'checkignored',
1623 1630 default=b'abort',
1624 1631 )
1625 1632 coreconfigitem(
1626 1633 b'experimental',
1627 1634 b'merge.checkpathconflicts',
1628 1635 default=False,
1629 1636 )
1630 1637 coreconfigitem(
1631 1638 b'merge',
1632 1639 b'followcopies',
1633 1640 default=True,
1634 1641 )
1635 1642 coreconfigitem(
1636 1643 b'merge',
1637 1644 b'on-failure',
1638 1645 default=b'continue',
1639 1646 )
1640 1647 coreconfigitem(
1641 1648 b'merge',
1642 1649 b'preferancestor',
1643 1650 default=lambda: [b'*'],
1644 1651 experimental=True,
1645 1652 )
1646 1653 coreconfigitem(
1647 1654 b'merge',
1648 1655 b'strict-capability-check',
1649 1656 default=False,
1650 1657 )
1651 1658 coreconfigitem(
1652 1659 b'merge',
1653 1660 b'disable-partial-tools',
1654 1661 default=False,
1655 1662 experimental=True,
1656 1663 )
1657 1664 coreconfigitem(
1658 1665 b'partial-merge-tools',
1659 1666 b'.*',
1660 1667 default=None,
1661 1668 generic=True,
1662 1669 experimental=True,
1663 1670 )
1664 1671 coreconfigitem(
1665 1672 b'partial-merge-tools',
1666 1673 br'.*\.patterns',
1667 1674 default=dynamicdefault,
1668 1675 generic=True,
1669 1676 priority=-1,
1670 1677 experimental=True,
1671 1678 )
1672 1679 coreconfigitem(
1673 1680 b'partial-merge-tools',
1674 1681 br'.*\.executable$',
1675 1682 default=dynamicdefault,
1676 1683 generic=True,
1677 1684 priority=-1,
1678 1685 experimental=True,
1679 1686 )
1680 1687 coreconfigitem(
1681 1688 b'partial-merge-tools',
1682 1689 br'.*\.order',
1683 1690 default=0,
1684 1691 generic=True,
1685 1692 priority=-1,
1686 1693 experimental=True,
1687 1694 )
1688 1695 coreconfigitem(
1689 1696 b'partial-merge-tools',
1690 1697 br'.*\.args',
1691 1698 default=b"$local $base $other",
1692 1699 generic=True,
1693 1700 priority=-1,
1694 1701 experimental=True,
1695 1702 )
1696 1703 coreconfigitem(
1697 1704 b'partial-merge-tools',
1698 1705 br'.*\.disable',
1699 1706 default=False,
1700 1707 generic=True,
1701 1708 priority=-1,
1702 1709 experimental=True,
1703 1710 )
1704 1711 coreconfigitem(
1705 1712 b'merge-tools',
1706 1713 b'.*',
1707 1714 default=None,
1708 1715 generic=True,
1709 1716 )
1710 1717 coreconfigitem(
1711 1718 b'merge-tools',
1712 1719 br'.*\.args$',
1713 1720 default=b"$local $base $other",
1714 1721 generic=True,
1715 1722 priority=-1,
1716 1723 )
1717 1724 coreconfigitem(
1718 1725 b'merge-tools',
1719 1726 br'.*\.binary$',
1720 1727 default=False,
1721 1728 generic=True,
1722 1729 priority=-1,
1723 1730 )
1724 1731 coreconfigitem(
1725 1732 b'merge-tools',
1726 1733 br'.*\.check$',
1727 1734 default=list,
1728 1735 generic=True,
1729 1736 priority=-1,
1730 1737 )
1731 1738 coreconfigitem(
1732 1739 b'merge-tools',
1733 1740 br'.*\.checkchanged$',
1734 1741 default=False,
1735 1742 generic=True,
1736 1743 priority=-1,
1737 1744 )
1738 1745 coreconfigitem(
1739 1746 b'merge-tools',
1740 1747 br'.*\.executable$',
1741 1748 default=dynamicdefault,
1742 1749 generic=True,
1743 1750 priority=-1,
1744 1751 )
1745 1752 coreconfigitem(
1746 1753 b'merge-tools',
1747 1754 br'.*\.fixeol$',
1748 1755 default=False,
1749 1756 generic=True,
1750 1757 priority=-1,
1751 1758 )
1752 1759 coreconfigitem(
1753 1760 b'merge-tools',
1754 1761 br'.*\.gui$',
1755 1762 default=False,
1756 1763 generic=True,
1757 1764 priority=-1,
1758 1765 )
1759 1766 coreconfigitem(
1760 1767 b'merge-tools',
1761 1768 br'.*\.mergemarkers$',
1762 1769 default=b'basic',
1763 1770 generic=True,
1764 1771 priority=-1,
1765 1772 )
1766 1773 coreconfigitem(
1767 1774 b'merge-tools',
1768 1775 br'.*\.mergemarkertemplate$',
1769 1776 default=dynamicdefault, # take from command-templates.mergemarker
1770 1777 generic=True,
1771 1778 priority=-1,
1772 1779 )
1773 1780 coreconfigitem(
1774 1781 b'merge-tools',
1775 1782 br'.*\.priority$',
1776 1783 default=0,
1777 1784 generic=True,
1778 1785 priority=-1,
1779 1786 )
1780 1787 coreconfigitem(
1781 1788 b'merge-tools',
1782 1789 br'.*\.premerge$',
1783 1790 default=dynamicdefault,
1784 1791 generic=True,
1785 1792 priority=-1,
1786 1793 )
1787 1794 coreconfigitem(
1788 1795 b'merge-tools',
1789 1796 br'.*\.regappend$',
1790 1797 default=b"",
1791 1798 generic=True,
1792 1799 priority=-1,
1793 1800 )
1794 1801 coreconfigitem(
1795 1802 b'merge-tools',
1796 1803 br'.*\.symlink$',
1797 1804 default=False,
1798 1805 generic=True,
1799 1806 priority=-1,
1800 1807 )
1801 1808 coreconfigitem(
1802 1809 b'pager',
1803 1810 b'attend-.*',
1804 1811 default=dynamicdefault,
1805 1812 generic=True,
1806 1813 )
1807 1814 coreconfigitem(
1808 1815 b'pager',
1809 1816 b'ignore',
1810 1817 default=list,
1811 1818 )
1812 1819 coreconfigitem(
1813 1820 b'pager',
1814 1821 b'pager',
1815 1822 default=dynamicdefault,
1816 1823 )
1817 1824 coreconfigitem(
1818 1825 b'patch',
1819 1826 b'eol',
1820 1827 default=b'strict',
1821 1828 )
1822 1829 coreconfigitem(
1823 1830 b'patch',
1824 1831 b'fuzz',
1825 1832 default=2,
1826 1833 )
1827 1834 coreconfigitem(
1828 1835 b'paths',
1829 1836 b'default',
1830 1837 default=None,
1831 1838 )
1832 1839 coreconfigitem(
1833 1840 b'paths',
1834 1841 b'default-push',
1835 1842 default=None,
1836 1843 )
1837 1844 coreconfigitem(
1838 1845 b'paths',
1839 1846 b'.*',
1840 1847 default=None,
1841 1848 generic=True,
1842 1849 )
1843 1850 coreconfigitem(
1844 1851 b'paths',
1845 1852 b'.*:bookmarks.mode',
1846 1853 default='default',
1847 1854 generic=True,
1848 1855 )
1849 1856 coreconfigitem(
1850 1857 b'paths',
1851 1858 b'.*:multi-urls',
1852 1859 default=False,
1853 1860 generic=True,
1854 1861 )
1855 1862 coreconfigitem(
1856 1863 b'paths',
1857 1864 b'.*:pushrev',
1858 1865 default=None,
1859 1866 generic=True,
1860 1867 )
1861 1868 coreconfigitem(
1862 1869 b'paths',
1863 1870 b'.*:pushurl',
1864 1871 default=None,
1865 1872 generic=True,
1866 1873 )
1867 1874 coreconfigitem(
1868 1875 b'phases',
1869 1876 b'checksubrepos',
1870 1877 default=b'follow',
1871 1878 )
1872 1879 coreconfigitem(
1873 1880 b'phases',
1874 1881 b'new-commit',
1875 1882 default=b'draft',
1876 1883 )
1877 1884 coreconfigitem(
1878 1885 b'phases',
1879 1886 b'publish',
1880 1887 default=True,
1881 1888 )
1882 1889 coreconfigitem(
1883 1890 b'profiling',
1884 1891 b'enabled',
1885 1892 default=False,
1886 1893 )
1887 1894 coreconfigitem(
1888 1895 b'profiling',
1889 1896 b'format',
1890 1897 default=b'text',
1891 1898 )
1892 1899 coreconfigitem(
1893 1900 b'profiling',
1894 1901 b'freq',
1895 1902 default=1000,
1896 1903 )
1897 1904 coreconfigitem(
1898 1905 b'profiling',
1899 1906 b'limit',
1900 1907 default=30,
1901 1908 )
1902 1909 coreconfigitem(
1903 1910 b'profiling',
1904 1911 b'nested',
1905 1912 default=0,
1906 1913 )
1907 1914 coreconfigitem(
1908 1915 b'profiling',
1909 1916 b'output',
1910 1917 default=None,
1911 1918 )
1912 1919 coreconfigitem(
1913 1920 b'profiling',
1914 1921 b'showmax',
1915 1922 default=0.999,
1916 1923 )
1917 1924 coreconfigitem(
1918 1925 b'profiling',
1919 1926 b'showmin',
1920 1927 default=dynamicdefault,
1921 1928 )
1922 1929 coreconfigitem(
1923 1930 b'profiling',
1924 1931 b'showtime',
1925 1932 default=True,
1926 1933 )
1927 1934 coreconfigitem(
1928 1935 b'profiling',
1929 1936 b'sort',
1930 1937 default=b'inlinetime',
1931 1938 )
1932 1939 coreconfigitem(
1933 1940 b'profiling',
1934 1941 b'statformat',
1935 1942 default=b'hotpath',
1936 1943 )
1937 1944 coreconfigitem(
1938 1945 b'profiling',
1939 1946 b'time-track',
1940 1947 default=dynamicdefault,
1941 1948 )
1942 1949 coreconfigitem(
1943 1950 b'profiling',
1944 1951 b'type',
1945 1952 default=b'stat',
1946 1953 )
1947 1954 coreconfigitem(
1948 1955 b'progress',
1949 1956 b'assume-tty',
1950 1957 default=False,
1951 1958 )
1952 1959 coreconfigitem(
1953 1960 b'progress',
1954 1961 b'changedelay',
1955 1962 default=1,
1956 1963 )
1957 1964 coreconfigitem(
1958 1965 b'progress',
1959 1966 b'clear-complete',
1960 1967 default=True,
1961 1968 )
1962 1969 coreconfigitem(
1963 1970 b'progress',
1964 1971 b'debug',
1965 1972 default=False,
1966 1973 )
1967 1974 coreconfigitem(
1968 1975 b'progress',
1969 1976 b'delay',
1970 1977 default=3,
1971 1978 )
1972 1979 coreconfigitem(
1973 1980 b'progress',
1974 1981 b'disable',
1975 1982 default=False,
1976 1983 )
1977 1984 coreconfigitem(
1978 1985 b'progress',
1979 1986 b'estimateinterval',
1980 1987 default=60.0,
1981 1988 )
1982 1989 coreconfigitem(
1983 1990 b'progress',
1984 1991 b'format',
1985 1992 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1986 1993 )
1987 1994 coreconfigitem(
1988 1995 b'progress',
1989 1996 b'refresh',
1990 1997 default=0.1,
1991 1998 )
1992 1999 coreconfigitem(
1993 2000 b'progress',
1994 2001 b'width',
1995 2002 default=dynamicdefault,
1996 2003 )
1997 2004 coreconfigitem(
1998 2005 b'pull',
1999 2006 b'confirm',
2000 2007 default=False,
2001 2008 )
2002 2009 coreconfigitem(
2003 2010 b'push',
2004 2011 b'pushvars.server',
2005 2012 default=False,
2006 2013 )
2007 2014 coreconfigitem(
2008 2015 b'rewrite',
2009 2016 b'backup-bundle',
2010 2017 default=True,
2011 2018 alias=[(b'ui', b'history-editing-backup')],
2012 2019 )
2013 2020 coreconfigitem(
2014 2021 b'rewrite',
2015 2022 b'update-timestamp',
2016 2023 default=False,
2017 2024 )
2018 2025 coreconfigitem(
2019 2026 b'rewrite',
2020 2027 b'empty-successor',
2021 2028 default=b'skip',
2022 2029 experimental=True,
2023 2030 )
2024 2031 # experimental as long as format.use-dirstate-v2 is.
2025 2032 coreconfigitem(
2026 2033 b'storage',
2027 2034 b'dirstate-v2.slow-path',
2028 2035 default=b"abort",
2029 2036 experimental=True,
2030 2037 )
2031 2038 coreconfigitem(
2032 2039 b'storage',
2033 2040 b'new-repo-backend',
2034 2041 default=b'revlogv1',
2035 2042 experimental=True,
2036 2043 )
2037 2044 coreconfigitem(
2038 2045 b'storage',
2039 2046 b'revlog.optimize-delta-parent-choice',
2040 2047 default=True,
2041 2048 alias=[(b'format', b'aggressivemergedeltas')],
2042 2049 )
2043 2050 coreconfigitem(
2044 2051 b'storage',
2045 2052 b'revlog.delta-parent-search.candidate-group-chunk-size',
2046 2053 default=10,
2047 2054 )
2048 2055 coreconfigitem(
2049 2056 b'storage',
2050 2057 b'revlog.issue6528.fix-incoming',
2051 2058 default=True,
2052 2059 )
2053 2060 # experimental as long as rust is experimental (or a C version is implemented)
2054 2061 coreconfigitem(
2055 2062 b'storage',
2056 2063 b'revlog.persistent-nodemap.mmap',
2057 2064 default=True,
2058 2065 )
2059 2066 # experimental as long as format.use-persistent-nodemap is.
2060 2067 coreconfigitem(
2061 2068 b'storage',
2062 2069 b'revlog.persistent-nodemap.slow-path',
2063 2070 default=b"abort",
2064 2071 )
2065 2072
2066 2073 coreconfigitem(
2067 2074 b'storage',
2068 2075 b'revlog.reuse-external-delta',
2069 2076 default=True,
2070 2077 )
2071 2078 coreconfigitem(
2072 2079 b'storage',
2073 2080 b'revlog.reuse-external-delta-parent',
2074 2081 default=None,
2075 2082 )
2076 2083 coreconfigitem(
2077 2084 b'storage',
2078 2085 b'revlog.zlib.level',
2079 2086 default=None,
2080 2087 )
2081 2088 coreconfigitem(
2082 2089 b'storage',
2083 2090 b'revlog.zstd.level',
2084 2091 default=None,
2085 2092 )
2086 2093 coreconfigitem(
2087 2094 b'server',
2088 2095 b'bookmarks-pushkey-compat',
2089 2096 default=True,
2090 2097 )
2091 2098 coreconfigitem(
2092 2099 b'server',
2093 2100 b'bundle1',
2094 2101 default=True,
2095 2102 )
2096 2103 coreconfigitem(
2097 2104 b'server',
2098 2105 b'bundle1gd',
2099 2106 default=None,
2100 2107 )
2101 2108 coreconfigitem(
2102 2109 b'server',
2103 2110 b'bundle1.pull',
2104 2111 default=None,
2105 2112 )
2106 2113 coreconfigitem(
2107 2114 b'server',
2108 2115 b'bundle1gd.pull',
2109 2116 default=None,
2110 2117 )
2111 2118 coreconfigitem(
2112 2119 b'server',
2113 2120 b'bundle1.push',
2114 2121 default=None,
2115 2122 )
2116 2123 coreconfigitem(
2117 2124 b'server',
2118 2125 b'bundle1gd.push',
2119 2126 default=None,
2120 2127 )
2121 2128 coreconfigitem(
2122 2129 b'server',
2123 2130 b'bundle2.stream',
2124 2131 default=True,
2125 2132 alias=[(b'experimental', b'bundle2.stream')],
2126 2133 )
2127 2134 coreconfigitem(
2128 2135 b'server',
2129 2136 b'compressionengines',
2130 2137 default=list,
2131 2138 )
2132 2139 coreconfigitem(
2133 2140 b'server',
2134 2141 b'concurrent-push-mode',
2135 2142 default=b'check-related',
2136 2143 )
2137 2144 coreconfigitem(
2138 2145 b'server',
2139 2146 b'disablefullbundle',
2140 2147 default=False,
2141 2148 )
2142 2149 coreconfigitem(
2143 2150 b'server',
2144 2151 b'maxhttpheaderlen',
2145 2152 default=1024,
2146 2153 )
2147 2154 coreconfigitem(
2148 2155 b'server',
2149 2156 b'pullbundle',
2150 2157 default=True,
2151 2158 )
2152 2159 coreconfigitem(
2153 2160 b'server',
2154 2161 b'preferuncompressed',
2155 2162 default=False,
2156 2163 )
2157 2164 coreconfigitem(
2158 2165 b'server',
2159 2166 b'streamunbundle',
2160 2167 default=False,
2161 2168 )
2162 2169 coreconfigitem(
2163 2170 b'server',
2164 2171 b'uncompressed',
2165 2172 default=True,
2166 2173 )
2167 2174 coreconfigitem(
2168 2175 b'server',
2169 2176 b'uncompressedallowsecret',
2170 2177 default=False,
2171 2178 )
2172 2179 coreconfigitem(
2173 2180 b'server',
2174 2181 b'view',
2175 2182 default=b'served',
2176 2183 )
2177 2184 coreconfigitem(
2178 2185 b'server',
2179 2186 b'validate',
2180 2187 default=False,
2181 2188 )
2182 2189 coreconfigitem(
2183 2190 b'server',
2184 2191 b'zliblevel',
2185 2192 default=-1,
2186 2193 )
2187 2194 coreconfigitem(
2188 2195 b'server',
2189 2196 b'zstdlevel',
2190 2197 default=3,
2191 2198 )
2192 2199 coreconfigitem(
2193 2200 b'share',
2194 2201 b'pool',
2195 2202 default=None,
2196 2203 )
2197 2204 coreconfigitem(
2198 2205 b'share',
2199 2206 b'poolnaming',
2200 2207 default=b'identity',
2201 2208 )
2202 2209 coreconfigitem(
2203 2210 b'share',
2204 2211 b'safe-mismatch.source-not-safe',
2205 2212 default=b'abort',
2206 2213 )
2207 2214 coreconfigitem(
2208 2215 b'share',
2209 2216 b'safe-mismatch.source-safe',
2210 2217 default=b'abort',
2211 2218 )
2212 2219 coreconfigitem(
2213 2220 b'share',
2214 2221 b'safe-mismatch.source-not-safe.warn',
2215 2222 default=True,
2216 2223 )
2217 2224 coreconfigitem(
2218 2225 b'share',
2219 2226 b'safe-mismatch.source-safe.warn',
2220 2227 default=True,
2221 2228 )
2222 2229 coreconfigitem(
2223 2230 b'share',
2224 2231 b'safe-mismatch.source-not-safe:verbose-upgrade',
2225 2232 default=True,
2226 2233 )
2227 2234 coreconfigitem(
2228 2235 b'share',
2229 2236 b'safe-mismatch.source-safe:verbose-upgrade',
2230 2237 default=True,
2231 2238 )
2232 2239 coreconfigitem(
2233 2240 b'shelve',
2234 2241 b'maxbackups',
2235 2242 default=10,
2236 2243 )
2237 2244 coreconfigitem(
2238 2245 b'smtp',
2239 2246 b'host',
2240 2247 default=None,
2241 2248 )
2242 2249 coreconfigitem(
2243 2250 b'smtp',
2244 2251 b'local_hostname',
2245 2252 default=None,
2246 2253 )
2247 2254 coreconfigitem(
2248 2255 b'smtp',
2249 2256 b'password',
2250 2257 default=None,
2251 2258 )
2252 2259 coreconfigitem(
2253 2260 b'smtp',
2254 2261 b'port',
2255 2262 default=dynamicdefault,
2256 2263 )
2257 2264 coreconfigitem(
2258 2265 b'smtp',
2259 2266 b'tls',
2260 2267 default=b'none',
2261 2268 )
2262 2269 coreconfigitem(
2263 2270 b'smtp',
2264 2271 b'username',
2265 2272 default=None,
2266 2273 )
2267 2274 coreconfigitem(
2268 2275 b'sparse',
2269 2276 b'missingwarning',
2270 2277 default=True,
2271 2278 experimental=True,
2272 2279 )
2273 2280 coreconfigitem(
2274 2281 b'subrepos',
2275 2282 b'allowed',
2276 2283 default=dynamicdefault, # to make backporting simpler
2277 2284 )
2278 2285 coreconfigitem(
2279 2286 b'subrepos',
2280 2287 b'hg:allowed',
2281 2288 default=dynamicdefault,
2282 2289 )
2283 2290 coreconfigitem(
2284 2291 b'subrepos',
2285 2292 b'git:allowed',
2286 2293 default=dynamicdefault,
2287 2294 )
2288 2295 coreconfigitem(
2289 2296 b'subrepos',
2290 2297 b'svn:allowed',
2291 2298 default=dynamicdefault,
2292 2299 )
2293 2300 coreconfigitem(
2294 2301 b'templates',
2295 2302 b'.*',
2296 2303 default=None,
2297 2304 generic=True,
2298 2305 )
2299 2306 coreconfigitem(
2300 2307 b'templateconfig',
2301 2308 b'.*',
2302 2309 default=dynamicdefault,
2303 2310 generic=True,
2304 2311 )
2305 2312 coreconfigitem(
2306 2313 b'trusted',
2307 2314 b'groups',
2308 2315 default=list,
2309 2316 )
2310 2317 coreconfigitem(
2311 2318 b'trusted',
2312 2319 b'users',
2313 2320 default=list,
2314 2321 )
2315 2322 coreconfigitem(
2316 2323 b'ui',
2317 2324 b'_usedassubrepo',
2318 2325 default=False,
2319 2326 )
2320 2327 coreconfigitem(
2321 2328 b'ui',
2322 2329 b'allowemptycommit',
2323 2330 default=False,
2324 2331 )
2325 2332 coreconfigitem(
2326 2333 b'ui',
2327 2334 b'archivemeta',
2328 2335 default=True,
2329 2336 )
2330 2337 coreconfigitem(
2331 2338 b'ui',
2332 2339 b'askusername',
2333 2340 default=False,
2334 2341 )
2335 2342 coreconfigitem(
2336 2343 b'ui',
2337 2344 b'available-memory',
2338 2345 default=None,
2339 2346 )
2340 2347
2341 2348 coreconfigitem(
2342 2349 b'ui',
2343 2350 b'clonebundlefallback',
2344 2351 default=False,
2345 2352 )
2346 2353 coreconfigitem(
2347 2354 b'ui',
2348 2355 b'clonebundleprefers',
2349 2356 default=list,
2350 2357 )
2351 2358 coreconfigitem(
2352 2359 b'ui',
2353 2360 b'clonebundles',
2354 2361 default=True,
2355 2362 )
2356 2363 coreconfigitem(
2357 2364 b'ui',
2358 2365 b'color',
2359 2366 default=b'auto',
2360 2367 )
2361 2368 coreconfigitem(
2362 2369 b'ui',
2363 2370 b'commitsubrepos',
2364 2371 default=False,
2365 2372 )
2366 2373 coreconfigitem(
2367 2374 b'ui',
2368 2375 b'debug',
2369 2376 default=False,
2370 2377 )
2371 2378 coreconfigitem(
2372 2379 b'ui',
2373 2380 b'debugger',
2374 2381 default=None,
2375 2382 )
2376 2383 coreconfigitem(
2377 2384 b'ui',
2378 2385 b'editor',
2379 2386 default=dynamicdefault,
2380 2387 )
2381 2388 coreconfigitem(
2382 2389 b'ui',
2383 2390 b'detailed-exit-code',
2384 2391 default=False,
2385 2392 experimental=True,
2386 2393 )
2387 2394 coreconfigitem(
2388 2395 b'ui',
2389 2396 b'fallbackencoding',
2390 2397 default=None,
2391 2398 )
2392 2399 coreconfigitem(
2393 2400 b'ui',
2394 2401 b'forcecwd',
2395 2402 default=None,
2396 2403 )
2397 2404 coreconfigitem(
2398 2405 b'ui',
2399 2406 b'forcemerge',
2400 2407 default=None,
2401 2408 )
2402 2409 coreconfigitem(
2403 2410 b'ui',
2404 2411 b'formatdebug',
2405 2412 default=False,
2406 2413 )
2407 2414 coreconfigitem(
2408 2415 b'ui',
2409 2416 b'formatjson',
2410 2417 default=False,
2411 2418 )
2412 2419 coreconfigitem(
2413 2420 b'ui',
2414 2421 b'formatted',
2415 2422 default=None,
2416 2423 )
2417 2424 coreconfigitem(
2418 2425 b'ui',
2419 2426 b'interactive',
2420 2427 default=None,
2421 2428 )
2422 2429 coreconfigitem(
2423 2430 b'ui',
2424 2431 b'interface',
2425 2432 default=None,
2426 2433 )
2427 2434 coreconfigitem(
2428 2435 b'ui',
2429 2436 b'interface.chunkselector',
2430 2437 default=None,
2431 2438 )
2432 2439 coreconfigitem(
2433 2440 b'ui',
2434 2441 b'large-file-limit',
2435 2442 default=10 * (2 ** 20),
2436 2443 )
2437 2444 coreconfigitem(
2438 2445 b'ui',
2439 2446 b'logblockedtimes',
2440 2447 default=False,
2441 2448 )
2442 2449 coreconfigitem(
2443 2450 b'ui',
2444 2451 b'merge',
2445 2452 default=None,
2446 2453 )
2447 2454 coreconfigitem(
2448 2455 b'ui',
2449 2456 b'mergemarkers',
2450 2457 default=b'basic',
2451 2458 )
2452 2459 coreconfigitem(
2453 2460 b'ui',
2454 2461 b'message-output',
2455 2462 default=b'stdio',
2456 2463 )
2457 2464 coreconfigitem(
2458 2465 b'ui',
2459 2466 b'nontty',
2460 2467 default=False,
2461 2468 )
2462 2469 coreconfigitem(
2463 2470 b'ui',
2464 2471 b'origbackuppath',
2465 2472 default=None,
2466 2473 )
2467 2474 coreconfigitem(
2468 2475 b'ui',
2469 2476 b'paginate',
2470 2477 default=True,
2471 2478 )
2472 2479 coreconfigitem(
2473 2480 b'ui',
2474 2481 b'patch',
2475 2482 default=None,
2476 2483 )
2477 2484 coreconfigitem(
2478 2485 b'ui',
2479 2486 b'portablefilenames',
2480 2487 default=b'warn',
2481 2488 )
2482 2489 coreconfigitem(
2483 2490 b'ui',
2484 2491 b'promptecho',
2485 2492 default=False,
2486 2493 )
2487 2494 coreconfigitem(
2488 2495 b'ui',
2489 2496 b'quiet',
2490 2497 default=False,
2491 2498 )
2492 2499 coreconfigitem(
2493 2500 b'ui',
2494 2501 b'quietbookmarkmove',
2495 2502 default=False,
2496 2503 )
2497 2504 coreconfigitem(
2498 2505 b'ui',
2499 2506 b'relative-paths',
2500 2507 default=b'legacy',
2501 2508 )
2502 2509 coreconfigitem(
2503 2510 b'ui',
2504 2511 b'remotecmd',
2505 2512 default=b'hg',
2506 2513 )
2507 2514 coreconfigitem(
2508 2515 b'ui',
2509 2516 b'report_untrusted',
2510 2517 default=True,
2511 2518 )
2512 2519 coreconfigitem(
2513 2520 b'ui',
2514 2521 b'rollback',
2515 2522 default=True,
2516 2523 )
2517 2524 coreconfigitem(
2518 2525 b'ui',
2519 2526 b'signal-safe-lock',
2520 2527 default=True,
2521 2528 )
2522 2529 coreconfigitem(
2523 2530 b'ui',
2524 2531 b'slash',
2525 2532 default=False,
2526 2533 )
2527 2534 coreconfigitem(
2528 2535 b'ui',
2529 2536 b'ssh',
2530 2537 default=b'ssh',
2531 2538 )
2532 2539 coreconfigitem(
2533 2540 b'ui',
2534 2541 b'ssherrorhint',
2535 2542 default=None,
2536 2543 )
2537 2544 coreconfigitem(
2538 2545 b'ui',
2539 2546 b'statuscopies',
2540 2547 default=False,
2541 2548 )
2542 2549 coreconfigitem(
2543 2550 b'ui',
2544 2551 b'strict',
2545 2552 default=False,
2546 2553 )
2547 2554 coreconfigitem(
2548 2555 b'ui',
2549 2556 b'style',
2550 2557 default=b'',
2551 2558 )
2552 2559 coreconfigitem(
2553 2560 b'ui',
2554 2561 b'supportcontact',
2555 2562 default=None,
2556 2563 )
2557 2564 coreconfigitem(
2558 2565 b'ui',
2559 2566 b'textwidth',
2560 2567 default=78,
2561 2568 )
2562 2569 coreconfigitem(
2563 2570 b'ui',
2564 2571 b'timeout',
2565 2572 default=b'600',
2566 2573 )
2567 2574 coreconfigitem(
2568 2575 b'ui',
2569 2576 b'timeout.warn',
2570 2577 default=0,
2571 2578 )
2572 2579 coreconfigitem(
2573 2580 b'ui',
2574 2581 b'timestamp-output',
2575 2582 default=False,
2576 2583 )
2577 2584 coreconfigitem(
2578 2585 b'ui',
2579 2586 b'traceback',
2580 2587 default=False,
2581 2588 )
2582 2589 coreconfigitem(
2583 2590 b'ui',
2584 2591 b'tweakdefaults',
2585 2592 default=False,
2586 2593 )
2587 2594 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2588 2595 coreconfigitem(
2589 2596 b'ui',
2590 2597 b'verbose',
2591 2598 default=False,
2592 2599 )
2593 2600 coreconfigitem(
2594 2601 b'verify',
2595 2602 b'skipflags',
2596 2603 default=0,
2597 2604 )
2598 2605 coreconfigitem(
2599 2606 b'web',
2600 2607 b'allowbz2',
2601 2608 default=False,
2602 2609 )
2603 2610 coreconfigitem(
2604 2611 b'web',
2605 2612 b'allowgz',
2606 2613 default=False,
2607 2614 )
2608 2615 coreconfigitem(
2609 2616 b'web',
2610 2617 b'allow-pull',
2611 2618 alias=[(b'web', b'allowpull')],
2612 2619 default=True,
2613 2620 )
2614 2621 coreconfigitem(
2615 2622 b'web',
2616 2623 b'allow-push',
2617 2624 alias=[(b'web', b'allow_push')],
2618 2625 default=list,
2619 2626 )
2620 2627 coreconfigitem(
2621 2628 b'web',
2622 2629 b'allowzip',
2623 2630 default=False,
2624 2631 )
2625 2632 coreconfigitem(
2626 2633 b'web',
2627 2634 b'archivesubrepos',
2628 2635 default=False,
2629 2636 )
2630 2637 coreconfigitem(
2631 2638 b'web',
2632 2639 b'cache',
2633 2640 default=True,
2634 2641 )
2635 2642 coreconfigitem(
2636 2643 b'web',
2637 2644 b'comparisoncontext',
2638 2645 default=5,
2639 2646 )
2640 2647 coreconfigitem(
2641 2648 b'web',
2642 2649 b'contact',
2643 2650 default=None,
2644 2651 )
2645 2652 coreconfigitem(
2646 2653 b'web',
2647 2654 b'deny_push',
2648 2655 default=list,
2649 2656 )
2650 2657 coreconfigitem(
2651 2658 b'web',
2652 2659 b'guessmime',
2653 2660 default=False,
2654 2661 )
2655 2662 coreconfigitem(
2656 2663 b'web',
2657 2664 b'hidden',
2658 2665 default=False,
2659 2666 )
2660 2667 coreconfigitem(
2661 2668 b'web',
2662 2669 b'labels',
2663 2670 default=list,
2664 2671 )
2665 2672 coreconfigitem(
2666 2673 b'web',
2667 2674 b'logoimg',
2668 2675 default=b'hglogo.png',
2669 2676 )
2670 2677 coreconfigitem(
2671 2678 b'web',
2672 2679 b'logourl',
2673 2680 default=b'https://mercurial-scm.org/',
2674 2681 )
2675 2682 coreconfigitem(
2676 2683 b'web',
2677 2684 b'accesslog',
2678 2685 default=b'-',
2679 2686 )
2680 2687 coreconfigitem(
2681 2688 b'web',
2682 2689 b'address',
2683 2690 default=b'',
2684 2691 )
2685 2692 coreconfigitem(
2686 2693 b'web',
2687 2694 b'allow-archive',
2688 2695 alias=[(b'web', b'allow_archive')],
2689 2696 default=list,
2690 2697 )
2691 2698 coreconfigitem(
2692 2699 b'web',
2693 2700 b'allow_read',
2694 2701 default=list,
2695 2702 )
2696 2703 coreconfigitem(
2697 2704 b'web',
2698 2705 b'baseurl',
2699 2706 default=None,
2700 2707 )
2701 2708 coreconfigitem(
2702 2709 b'web',
2703 2710 b'cacerts',
2704 2711 default=None,
2705 2712 )
2706 2713 coreconfigitem(
2707 2714 b'web',
2708 2715 b'certificate',
2709 2716 default=None,
2710 2717 )
2711 2718 coreconfigitem(
2712 2719 b'web',
2713 2720 b'collapse',
2714 2721 default=False,
2715 2722 )
2716 2723 coreconfigitem(
2717 2724 b'web',
2718 2725 b'csp',
2719 2726 default=None,
2720 2727 )
2721 2728 coreconfigitem(
2722 2729 b'web',
2723 2730 b'deny_read',
2724 2731 default=list,
2725 2732 )
2726 2733 coreconfigitem(
2727 2734 b'web',
2728 2735 b'descend',
2729 2736 default=True,
2730 2737 )
2731 2738 coreconfigitem(
2732 2739 b'web',
2733 2740 b'description',
2734 2741 default=b"",
2735 2742 )
2736 2743 coreconfigitem(
2737 2744 b'web',
2738 2745 b'encoding',
2739 2746 default=lambda: encoding.encoding,
2740 2747 )
2741 2748 coreconfigitem(
2742 2749 b'web',
2743 2750 b'errorlog',
2744 2751 default=b'-',
2745 2752 )
2746 2753 coreconfigitem(
2747 2754 b'web',
2748 2755 b'ipv6',
2749 2756 default=False,
2750 2757 )
2751 2758 coreconfigitem(
2752 2759 b'web',
2753 2760 b'maxchanges',
2754 2761 default=10,
2755 2762 )
2756 2763 coreconfigitem(
2757 2764 b'web',
2758 2765 b'maxfiles',
2759 2766 default=10,
2760 2767 )
2761 2768 coreconfigitem(
2762 2769 b'web',
2763 2770 b'maxshortchanges',
2764 2771 default=60,
2765 2772 )
2766 2773 coreconfigitem(
2767 2774 b'web',
2768 2775 b'motd',
2769 2776 default=b'',
2770 2777 )
2771 2778 coreconfigitem(
2772 2779 b'web',
2773 2780 b'name',
2774 2781 default=dynamicdefault,
2775 2782 )
2776 2783 coreconfigitem(
2777 2784 b'web',
2778 2785 b'port',
2779 2786 default=8000,
2780 2787 )
2781 2788 coreconfigitem(
2782 2789 b'web',
2783 2790 b'prefix',
2784 2791 default=b'',
2785 2792 )
2786 2793 coreconfigitem(
2787 2794 b'web',
2788 2795 b'push_ssl',
2789 2796 default=True,
2790 2797 )
2791 2798 coreconfigitem(
2792 2799 b'web',
2793 2800 b'refreshinterval',
2794 2801 default=20,
2795 2802 )
2796 2803 coreconfigitem(
2797 2804 b'web',
2798 2805 b'server-header',
2799 2806 default=None,
2800 2807 )
2801 2808 coreconfigitem(
2802 2809 b'web',
2803 2810 b'static',
2804 2811 default=None,
2805 2812 )
2806 2813 coreconfigitem(
2807 2814 b'web',
2808 2815 b'staticurl',
2809 2816 default=None,
2810 2817 )
2811 2818 coreconfigitem(
2812 2819 b'web',
2813 2820 b'stripes',
2814 2821 default=1,
2815 2822 )
2816 2823 coreconfigitem(
2817 2824 b'web',
2818 2825 b'style',
2819 2826 default=b'paper',
2820 2827 )
2821 2828 coreconfigitem(
2822 2829 b'web',
2823 2830 b'templates',
2824 2831 default=None,
2825 2832 )
2826 2833 coreconfigitem(
2827 2834 b'web',
2828 2835 b'view',
2829 2836 default=b'served',
2830 2837 experimental=True,
2831 2838 )
2832 2839 coreconfigitem(
2833 2840 b'worker',
2834 2841 b'backgroundclose',
2835 2842 default=dynamicdefault,
2836 2843 )
2837 2844 # Windows defaults to a limit of 512 open files. A buffer of 128
2838 2845 # should give us enough headway.
2839 2846 coreconfigitem(
2840 2847 b'worker',
2841 2848 b'backgroundclosemaxqueue',
2842 2849 default=384,
2843 2850 )
2844 2851 coreconfigitem(
2845 2852 b'worker',
2846 2853 b'backgroundcloseminfilecount',
2847 2854 default=2048,
2848 2855 )
2849 2856 coreconfigitem(
2850 2857 b'worker',
2851 2858 b'backgroundclosethreadcount',
2852 2859 default=4,
2853 2860 )
2854 2861 coreconfigitem(
2855 2862 b'worker',
2856 2863 b'enabled',
2857 2864 default=True,
2858 2865 )
2859 2866 coreconfigitem(
2860 2867 b'worker',
2861 2868 b'numcpus',
2862 2869 default=None,
2863 2870 )
2864 2871
2865 2872 # Rebase related configuration moved to core because other extension are doing
2866 2873 # strange things. For example, shelve import the extensions to reuse some bit
2867 2874 # without formally loading it.
2868 2875 coreconfigitem(
2869 2876 b'commands',
2870 2877 b'rebase.requiredest',
2871 2878 default=False,
2872 2879 )
2873 2880 coreconfigitem(
2874 2881 b'experimental',
2875 2882 b'rebaseskipobsolete',
2876 2883 default=True,
2877 2884 )
2878 2885 coreconfigitem(
2879 2886 b'rebase',
2880 2887 b'singletransaction',
2881 2888 default=False,
2882 2889 )
2883 2890 coreconfigitem(
2884 2891 b'rebase',
2885 2892 b'experimental.inmemory',
2886 2893 default=False,
2887 2894 )
2888 2895
2889 2896 # This setting controls creation of a rebase_source extra field
2890 2897 # during rebase. When False, no such field is created. This is
2891 2898 # useful eg for incrementally converting changesets and then
2892 2899 # rebasing them onto an existing repo.
2893 2900 # WARNING: this is an advanced setting reserved for people who know
2894 2901 # exactly what they are doing. Misuse of this setting can easily
2895 2902 # result in obsmarker cycles and a vivid headache.
2896 2903 coreconfigitem(
2897 2904 b'rebase',
2898 2905 b'store-source',
2899 2906 default=True,
2900 2907 experimental=True,
2901 2908 )
@@ -1,3978 +1,3980 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from concurrent import futures
18 18 from typing import (
19 19 Optional,
20 20 )
21 21
22 22 from .i18n import _
23 23 from .node import (
24 24 bin,
25 25 hex,
26 26 nullrev,
27 27 sha1nodeconstants,
28 28 short,
29 29 )
30 30 from .pycompat import (
31 31 delattr,
32 32 getattr,
33 33 )
34 34 from . import (
35 35 bookmarks,
36 36 branchmap,
37 37 bundle2,
38 38 bundlecaches,
39 39 changegroup,
40 40 color,
41 41 commit,
42 42 context,
43 43 dirstate,
44 44 dirstateguard,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 pushkey,
62 62 pycompat,
63 63 rcutil,
64 64 repoview,
65 65 requirements as requirementsmod,
66 66 revlog,
67 67 revset,
68 68 revsetlang,
69 69 scmutil,
70 70 sparse,
71 71 store as storemod,
72 72 subrepoutil,
73 73 tags as tagsmod,
74 74 transaction,
75 75 txnutil,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprototypes,
79 79 )
80 80
81 81 from .interfaces import (
82 82 repository,
83 83 util as interfaceutil,
84 84 )
85 85
86 86 from .utils import (
87 87 hashutil,
88 88 procutil,
89 89 stringutil,
90 90 urlutil,
91 91 )
92 92
93 93 from .revlogutils import (
94 94 concurrency_checker as revlogchecker,
95 95 constants as revlogconst,
96 96 sidedata as sidedatamod,
97 97 )
98 98
99 99 release = lockmod.release
100 100 urlerr = util.urlerr
101 101 urlreq = util.urlreq
102 102
103 103 # set of (path, vfs-location) tuples. vfs-location is:
104 104 # - 'plain for vfs relative paths
105 105 # - '' for svfs relative paths
106 106 _cachedfiles = set()
107 107
108 108
109 109 class _basefilecache(scmutil.filecache):
110 110 """All filecache usage on repo are done for logic that should be unfiltered"""
111 111
112 112 def __get__(self, repo, type=None):
113 113 if repo is None:
114 114 return self
115 115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 116 unfi = repo.unfiltered()
117 117 try:
118 118 return unfi.__dict__[self.sname]
119 119 except KeyError:
120 120 pass
121 121 return super(_basefilecache, self).__get__(unfi, type)
122 122
123 123 def set(self, repo, value):
124 124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125 125
126 126
127 127 class repofilecache(_basefilecache):
128 128 """filecache for files in .hg but outside of .hg/store"""
129 129
130 130 def __init__(self, *paths):
131 131 super(repofilecache, self).__init__(*paths)
132 132 for path in paths:
133 133 _cachedfiles.add((path, b'plain'))
134 134
135 135 def join(self, obj, fname):
136 136 return obj.vfs.join(fname)
137 137
138 138
139 139 class storecache(_basefilecache):
140 140 """filecache for files in the store"""
141 141
142 142 def __init__(self, *paths):
143 143 super(storecache, self).__init__(*paths)
144 144 for path in paths:
145 145 _cachedfiles.add((path, b''))
146 146
147 147 def join(self, obj, fname):
148 148 return obj.sjoin(fname)
149 149
150 150
151 151 class changelogcache(storecache):
152 152 """filecache for the changelog"""
153 153
154 154 def __init__(self):
155 155 super(changelogcache, self).__init__()
156 156 _cachedfiles.add((b'00changelog.i', b''))
157 157 _cachedfiles.add((b'00changelog.n', b''))
158 158
159 159 def tracked_paths(self, obj):
160 160 paths = [self.join(obj, b'00changelog.i')]
161 161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 162 paths.append(self.join(obj, b'00changelog.n'))
163 163 return paths
164 164
165 165
166 166 class manifestlogcache(storecache):
167 167 """filecache for the manifestlog"""
168 168
169 169 def __init__(self):
170 170 super(manifestlogcache, self).__init__()
171 171 _cachedfiles.add((b'00manifest.i', b''))
172 172 _cachedfiles.add((b'00manifest.n', b''))
173 173
174 174 def tracked_paths(self, obj):
175 175 paths = [self.join(obj, b'00manifest.i')]
176 176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 177 paths.append(self.join(obj, b'00manifest.n'))
178 178 return paths
179 179
180 180
181 181 class mixedrepostorecache(_basefilecache):
182 182 """filecache for a mix files in .hg/store and outside"""
183 183
184 184 def __init__(self, *pathsandlocations):
185 185 # scmutil.filecache only uses the path for passing back into our
186 186 # join(), so we can safely pass a list of paths and locations
187 187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 188 _cachedfiles.update(pathsandlocations)
189 189
190 190 def join(self, obj, fnameandlocation):
191 191 fname, location = fnameandlocation
192 192 if location == b'plain':
193 193 return obj.vfs.join(fname)
194 194 else:
195 195 if location != b'':
196 196 raise error.ProgrammingError(
197 197 b'unexpected location: %s' % location
198 198 )
199 199 return obj.sjoin(fname)
200 200
201 201
202 202 def isfilecached(repo, name):
203 203 """check if a repo has already cached "name" filecache-ed property
204 204
205 205 This returns (cachedobj-or-None, iscached) tuple.
206 206 """
207 207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 208 if not cacheentry:
209 209 return None, False
210 210 return cacheentry.obj, True
211 211
212 212
213 213 class unfilteredpropertycache(util.propertycache):
214 214 """propertycache that apply to unfiltered repo only"""
215 215
216 216 def __get__(self, repo, type=None):
217 217 unfi = repo.unfiltered()
218 218 if unfi is repo:
219 219 return super(unfilteredpropertycache, self).__get__(unfi)
220 220 return getattr(unfi, self.name)
221 221
222 222
223 223 class filteredpropertycache(util.propertycache):
224 224 """propertycache that must take filtering in account"""
225 225
226 226 def cachevalue(self, obj, value):
227 227 object.__setattr__(obj, self.name, value)
228 228
229 229
230 230 def hasunfilteredcache(repo, name):
231 231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 232 return name in vars(repo.unfiltered())
233 233
234 234
235 235 def unfilteredmethod(orig):
236 236 """decorate method that always need to be run on unfiltered version"""
237 237
238 238 @functools.wraps(orig)
239 239 def wrapper(repo, *args, **kwargs):
240 240 return orig(repo.unfiltered(), *args, **kwargs)
241 241
242 242 return wrapper
243 243
244 244
245 245 moderncaps = {
246 246 b'lookup',
247 247 b'branchmap',
248 248 b'pushkey',
249 249 b'known',
250 250 b'getbundle',
251 251 b'unbundle',
252 252 }
253 253 legacycaps = moderncaps.union({b'changegroupsubset'})
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 257 class localcommandexecutor:
258 258 def __init__(self, peer):
259 259 self._peer = peer
260 260 self._sent = False
261 261 self._closed = False
262 262
263 263 def __enter__(self):
264 264 return self
265 265
266 266 def __exit__(self, exctype, excvalue, exctb):
267 267 self.close()
268 268
269 269 def callcommand(self, command, args):
270 270 if self._sent:
271 271 raise error.ProgrammingError(
272 272 b'callcommand() cannot be used after sendcommands()'
273 273 )
274 274
275 275 if self._closed:
276 276 raise error.ProgrammingError(
277 277 b'callcommand() cannot be used after close()'
278 278 )
279 279
280 280 # We don't need to support anything fancy. Just call the named
281 281 # method on the peer and return a resolved future.
282 282 fn = getattr(self._peer, pycompat.sysstr(command))
283 283
284 284 f = futures.Future()
285 285
286 286 try:
287 287 result = fn(**pycompat.strkwargs(args))
288 288 except Exception:
289 289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 290 else:
291 291 f.set_result(result)
292 292
293 293 return f
294 294
295 295 def sendcommands(self):
296 296 self._sent = True
297 297
298 298 def close(self):
299 299 self._closed = True
300 300
301 301
302 302 @interfaceutil.implementer(repository.ipeercommands)
303 303 class localpeer(repository.peer):
304 304 '''peer for a local repo; reflects only the most recent API'''
305 305
306 306 def __init__(self, repo, caps=None):
307 307 super(localpeer, self).__init__()
308 308
309 309 if caps is None:
310 310 caps = moderncaps.copy()
311 311 self._repo = repo.filtered(b'served')
312 312 self.ui = repo.ui
313 313
314 314 if repo._wanted_sidedata:
315 315 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 316 caps.add(b'exp-wanted-sidedata=' + formatted)
317 317
318 318 self._caps = repo._restrictcapabilities(caps)
319 319
320 320 # Begin of _basepeer interface.
321 321
322 322 def url(self):
323 323 return self._repo.url()
324 324
325 325 def local(self):
326 326 return self._repo
327 327
328 328 def peer(self):
329 329 return self
330 330
331 331 def canpush(self):
332 332 return True
333 333
334 334 def close(self):
335 335 self._repo.close()
336 336
337 337 # End of _basepeer interface.
338 338
339 339 # Begin of _basewirecommands interface.
340 340
341 341 def branchmap(self):
342 342 return self._repo.branchmap()
343 343
344 344 def capabilities(self):
345 345 return self._caps
346 346
347 347 def clonebundles(self):
348 348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349 349
350 350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 351 """Used to test argument passing over the wire"""
352 352 return b"%s %s %s %s %s" % (
353 353 one,
354 354 two,
355 355 pycompat.bytestr(three),
356 356 pycompat.bytestr(four),
357 357 pycompat.bytestr(five),
358 358 )
359 359
360 360 def getbundle(
361 361 self,
362 362 source,
363 363 heads=None,
364 364 common=None,
365 365 bundlecaps=None,
366 366 remote_sidedata=None,
367 367 **kwargs
368 368 ):
369 369 chunks = exchange.getbundlechunks(
370 370 self._repo,
371 371 source,
372 372 heads=heads,
373 373 common=common,
374 374 bundlecaps=bundlecaps,
375 375 remote_sidedata=remote_sidedata,
376 376 **kwargs
377 377 )[1]
378 378 cb = util.chunkbuffer(chunks)
379 379
380 380 if exchange.bundle2requested(bundlecaps):
381 381 # When requesting a bundle2, getbundle returns a stream to make the
382 382 # wire level function happier. We need to build a proper object
383 383 # from it in local peer.
384 384 return bundle2.getunbundler(self.ui, cb)
385 385 else:
386 386 return changegroup.getunbundler(b'01', cb, None)
387 387
388 388 def heads(self):
389 389 return self._repo.heads()
390 390
391 391 def known(self, nodes):
392 392 return self._repo.known(nodes)
393 393
394 394 def listkeys(self, namespace):
395 395 return self._repo.listkeys(namespace)
396 396
397 397 def lookup(self, key):
398 398 return self._repo.lookup(key)
399 399
400 400 def pushkey(self, namespace, key, old, new):
401 401 return self._repo.pushkey(namespace, key, old, new)
402 402
403 403 def stream_out(self):
404 404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405 405
406 406 def unbundle(self, bundle, heads, url):
407 407 """apply a bundle on a repo
408 408
409 409 This function handles the repo locking itself."""
410 410 try:
411 411 try:
412 412 bundle = exchange.readbundle(self.ui, bundle, None)
413 413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 414 if util.safehasattr(ret, b'getchunks'):
415 415 # This is a bundle20 object, turn it into an unbundler.
416 416 # This little dance should be dropped eventually when the
417 417 # API is finally improved.
418 418 stream = util.chunkbuffer(ret.getchunks())
419 419 ret = bundle2.getunbundler(self.ui, stream)
420 420 return ret
421 421 except Exception as exc:
422 422 # If the exception contains output salvaged from a bundle2
423 423 # reply, we need to make sure it is printed before continuing
424 424 # to fail. So we build a bundle2 with such output and consume
425 425 # it directly.
426 426 #
427 427 # This is not very elegant but allows a "simple" solution for
428 428 # issue4594
429 429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 430 if output:
431 431 bundler = bundle2.bundle20(self._repo.ui)
432 432 for out in output:
433 433 bundler.addpart(out)
434 434 stream = util.chunkbuffer(bundler.getchunks())
435 435 b = bundle2.getunbundler(self.ui, stream)
436 436 bundle2.processbundle(self._repo, b)
437 437 raise
438 438 except error.PushRaced as exc:
439 439 raise error.ResponseError(
440 440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 441 )
442 442
443 443 # End of _basewirecommands interface.
444 444
445 445 # Begin of peer interface.
446 446
447 447 def commandexecutor(self):
448 448 return localcommandexecutor(self)
449 449
450 450 # End of peer interface.
451 451
452 452
453 453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 454 class locallegacypeer(localpeer):
455 455 """peer extension which implements legacy methods too; used for tests with
456 456 restricted capabilities"""
457 457
458 458 def __init__(self, repo):
459 459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
460 460
461 461 # Begin of baselegacywirecommands interface.
462 462
463 463 def between(self, pairs):
464 464 return self._repo.between(pairs)
465 465
466 466 def branches(self, nodes):
467 467 return self._repo.branches(nodes)
468 468
469 469 def changegroup(self, nodes, source):
470 470 outgoing = discovery.outgoing(
471 471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 472 )
473 473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474 474
475 475 def changegroupsubset(self, bases, heads, source):
476 476 outgoing = discovery.outgoing(
477 477 self._repo, missingroots=bases, ancestorsof=heads
478 478 )
479 479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 480
481 481 # End of baselegacywirecommands interface.
482 482
483 483
484 484 # Functions receiving (ui, features) that extensions can register to impact
485 485 # the ability to load repositories with custom requirements. Only
486 486 # functions defined in loaded extensions are called.
487 487 #
488 488 # The function receives a set of requirement strings that the repository
489 489 # is capable of opening. Functions will typically add elements to the
490 490 # set to reflect that the extension knows how to handle that requirements.
491 491 featuresetupfuncs = set()
492 492
493 493
494 494 def _getsharedvfs(hgvfs, requirements):
495 495 """returns the vfs object pointing to root of shared source
496 496 repo for a shared repository
497 497
498 498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 499 requirements is a set of requirements of current repo (shared one)
500 500 """
501 501 # The ``shared`` or ``relshared`` requirements indicate the
502 502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 503 # This is an absolute path for ``shared`` and relative to
504 504 # ``.hg/`` for ``relshared``.
505 505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508 508
509 509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510 510
511 511 if not sharedvfs.exists():
512 512 raise error.RepoError(
513 513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 514 % sharedvfs.base
515 515 )
516 516 return sharedvfs
517 517
518 518
519 519 def _readrequires(vfs, allowmissing):
520 520 """reads the require file present at root of this vfs
521 521 and return a set of requirements
522 522
523 523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 524 # requires file contains a newline-delimited list of
525 525 # features/capabilities the opener (us) must have in order to use
526 526 # the repository. This file was introduced in Mercurial 0.9.2,
527 527 # which means very old repositories may not have one. We assume
528 528 # a missing file translates to no requirements.
529 529 read = vfs.tryread if allowmissing else vfs.read
530 530 return set(read(b'requires').splitlines())
531 531
532 532
533 533 def makelocalrepository(baseui, path: bytes, intents=None):
534 534 """Create a local repository object.
535 535
536 536 Given arguments needed to construct a local repository, this function
537 537 performs various early repository loading functionality (such as
538 538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 539 the repository can be opened, derives a type suitable for representing
540 540 that repository, and returns an instance of it.
541 541
542 542 The returned object conforms to the ``repository.completelocalrepository``
543 543 interface.
544 544
545 545 The repository type is derived by calling a series of factory functions
546 546 for each aspect/interface of the final repository. These are defined by
547 547 ``REPO_INTERFACES``.
548 548
549 549 Each factory function is called to produce a type implementing a specific
550 550 interface. The cumulative list of returned types will be combined into a
551 551 new type and that type will be instantiated to represent the local
552 552 repository.
553 553
554 554 The factory functions each receive various state that may be consulted
555 555 as part of deriving a type.
556 556
557 557 Extensions should wrap these factory functions to customize repository type
558 558 creation. Note that an extension's wrapped function may be called even if
559 559 that extension is not loaded for the repo being constructed. Extensions
560 560 should check if their ``__name__`` appears in the
561 561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 562 not.
563 563 """
564 564 ui = baseui.copy()
565 565 # Prevent copying repo configuration.
566 566 ui.copy = baseui.copy
567 567
568 568 # Working directory VFS rooted at repository root.
569 569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 570
571 571 # Main VFS for .hg/ directory.
572 572 hgpath = wdirvfs.join(b'.hg')
573 573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 574 # Whether this repository is shared one or not
575 575 shared = False
576 576 # If this repository is shared, vfs pointing to shared repo
577 577 sharedvfs = None
578 578
579 579 # The .hg/ path should exist and should be a directory. All other
580 580 # cases are errors.
581 581 if not hgvfs.isdir():
582 582 try:
583 583 hgvfs.stat()
584 584 except FileNotFoundError:
585 585 pass
586 586 except ValueError as e:
587 587 # Can be raised on Python 3.8 when path is invalid.
588 588 raise error.Abort(
589 589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 590 )
591 591
592 592 raise error.RepoError(_(b'repository %s not found') % path)
593 593
594 594 requirements = _readrequires(hgvfs, True)
595 595 shared = (
596 596 requirementsmod.SHARED_REQUIREMENT in requirements
597 597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 598 )
599 599 storevfs = None
600 600 if shared:
601 601 # This is a shared repo
602 602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 604 else:
605 605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606 606
607 607 # if .hg/requires contains the sharesafe requirement, it means
608 608 # there exists a `.hg/store/requires` too and we should read it
609 609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 611 # is not present, refer checkrequirementscompat() for that
612 612 #
613 613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 614 # repository was shared the old way. We check the share source .hg/requires
615 615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 616 # to be reshared
617 617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 619
620 620 if (
621 621 shared
622 622 and requirementsmod.SHARESAFE_REQUIREMENT
623 623 not in _readrequires(sharedvfs, True)
624 624 ):
625 625 mismatch_warn = ui.configbool(
626 626 b'share', b'safe-mismatch.source-not-safe.warn'
627 627 )
628 628 mismatch_config = ui.config(
629 629 b'share', b'safe-mismatch.source-not-safe'
630 630 )
631 631 mismatch_verbose_upgrade = ui.configbool(
632 632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 633 )
634 634 if mismatch_config in (
635 635 b'downgrade-allow',
636 636 b'allow',
637 637 b'downgrade-abort',
638 638 ):
639 639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 640 from . import upgrade
641 641
642 642 upgrade.downgrade_share_to_non_safe(
643 643 ui,
644 644 hgvfs,
645 645 sharedvfs,
646 646 requirements,
647 647 mismatch_config,
648 648 mismatch_warn,
649 649 mismatch_verbose_upgrade,
650 650 )
651 651 elif mismatch_config == b'abort':
652 652 raise error.Abort(
653 653 _(b"share source does not support share-safe requirement"),
654 654 hint=hint,
655 655 )
656 656 else:
657 657 raise error.Abort(
658 658 _(
659 659 b"share-safe mismatch with source.\nUnrecognized"
660 660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 661 b" set."
662 662 )
663 663 % mismatch_config,
664 664 hint=hint,
665 665 )
666 666 else:
667 667 requirements |= _readrequires(storevfs, False)
668 668 elif shared:
669 669 sourcerequires = _readrequires(sharedvfs, False)
670 670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 672 mismatch_warn = ui.configbool(
673 673 b'share', b'safe-mismatch.source-safe.warn'
674 674 )
675 675 mismatch_verbose_upgrade = ui.configbool(
676 676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 677 )
678 678 if mismatch_config in (
679 679 b'upgrade-allow',
680 680 b'allow',
681 681 b'upgrade-abort',
682 682 ):
683 683 # prevent cyclic import localrepo -> upgrade -> localrepo
684 684 from . import upgrade
685 685
686 686 upgrade.upgrade_share_to_safe(
687 687 ui,
688 688 hgvfs,
689 689 storevfs,
690 690 requirements,
691 691 mismatch_config,
692 692 mismatch_warn,
693 693 mismatch_verbose_upgrade,
694 694 )
695 695 elif mismatch_config == b'abort':
696 696 raise error.Abort(
697 697 _(
698 698 b'version mismatch: source uses share-safe'
699 699 b' functionality while the current share does not'
700 700 ),
701 701 hint=hint,
702 702 )
703 703 else:
704 704 raise error.Abort(
705 705 _(
706 706 b"share-safe mismatch with source.\nUnrecognized"
707 707 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 708 )
709 709 % mismatch_config,
710 710 hint=hint,
711 711 )
712 712
713 713 # The .hg/hgrc file may load extensions or contain config options
714 714 # that influence repository construction. Attempt to load it and
715 715 # process any new extensions that it may have pulled in.
716 716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 718 extensions.loadall(ui)
719 719 extensions.populateui(ui)
720 720
721 721 # Set of module names of extensions loaded for this repository.
722 722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723 723
724 724 supportedrequirements = gathersupportedrequirements(ui)
725 725
726 726 # We first validate the requirements are known.
727 727 ensurerequirementsrecognized(requirements, supportedrequirements)
728 728
729 729 # Then we validate that the known set is reasonable to use together.
730 730 ensurerequirementscompatible(ui, requirements)
731 731
732 732 # TODO there are unhandled edge cases related to opening repositories with
733 733 # shared storage. If storage is shared, we should also test for requirements
734 734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 735 # that repo, as that repo may load extensions needed to open it. This is a
736 736 # bit complicated because we don't want the other hgrc to overwrite settings
737 737 # in this hgrc.
738 738 #
739 739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 740 # file when sharing repos. But if a requirement is added after the share is
741 741 # performed, thereby introducing a new requirement for the opener, we may
742 742 # will not see that and could encounter a run-time error interacting with
743 743 # that shared store since it has an unknown-to-us requirement.
744 744
745 745 # At this point, we know we should be capable of opening the repository.
746 746 # Now get on with doing that.
747 747
748 748 features = set()
749 749
750 750 # The "store" part of the repository holds versioned data. How it is
751 751 # accessed is determined by various requirements. If `shared` or
752 752 # `relshared` requirements are present, this indicates current repository
753 753 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 754 if shared:
755 755 storebasepath = sharedvfs.base
756 756 cachepath = sharedvfs.join(b'cache')
757 757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 758 else:
759 759 storebasepath = hgvfs.base
760 760 cachepath = hgvfs.join(b'cache')
761 761 wcachepath = hgvfs.join(b'wcache')
762 762
763 763 # The store has changed over time and the exact layout is dictated by
764 764 # requirements. The store interface abstracts differences across all
765 765 # of them.
766 766 store = makestore(
767 767 requirements,
768 768 storebasepath,
769 769 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 770 )
771 771 hgvfs.createmode = store.createmode
772 772
773 773 storevfs = store.vfs
774 774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775 775
776 776 if (
777 777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 779 ):
780 780 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 781 # the revlogv2 docket introduced race condition that we need to fix
782 782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783 783
784 784 # The cache vfs is used to manage cache files.
785 785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 786 cachevfs.createmode = store.createmode
787 787 # The cache vfs is used to manage cache files related to the working copy
788 788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 789 wcachevfs.createmode = store.createmode
790 790
791 791 # Now resolve the type for the repository object. We do this by repeatedly
792 792 # calling a factory function to produces types for specific aspects of the
793 793 # repo's operation. The aggregate returned types are used as base classes
794 794 # for a dynamically-derived type, which will represent our new repository.
795 795
796 796 bases = []
797 797 extrastate = {}
798 798
799 799 for iface, fn in REPO_INTERFACES:
800 800 # We pass all potentially useful state to give extensions tons of
801 801 # flexibility.
802 802 typ = fn()(
803 803 ui=ui,
804 804 intents=intents,
805 805 requirements=requirements,
806 806 features=features,
807 807 wdirvfs=wdirvfs,
808 808 hgvfs=hgvfs,
809 809 store=store,
810 810 storevfs=storevfs,
811 811 storeoptions=storevfs.options,
812 812 cachevfs=cachevfs,
813 813 wcachevfs=wcachevfs,
814 814 extensionmodulenames=extensionmodulenames,
815 815 extrastate=extrastate,
816 816 baseclasses=bases,
817 817 )
818 818
819 819 if not isinstance(typ, type):
820 820 raise error.ProgrammingError(
821 821 b'unable to construct type for %s' % iface
822 822 )
823 823
824 824 bases.append(typ)
825 825
826 826 # type() allows you to use characters in type names that wouldn't be
827 827 # recognized as Python symbols in source code. We abuse that to add
828 828 # rich information about our constructed repo.
829 829 name = pycompat.sysstr(
830 830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 831 )
832 832
833 833 cls = type(name, tuple(bases), {})
834 834
835 835 return cls(
836 836 baseui=baseui,
837 837 ui=ui,
838 838 origroot=path,
839 839 wdirvfs=wdirvfs,
840 840 hgvfs=hgvfs,
841 841 requirements=requirements,
842 842 supportedrequirements=supportedrequirements,
843 843 sharedpath=storebasepath,
844 844 store=store,
845 845 cachevfs=cachevfs,
846 846 wcachevfs=wcachevfs,
847 847 features=features,
848 848 intents=intents,
849 849 )
850 850
851 851
852 852 def loadhgrc(
853 853 ui,
854 854 wdirvfs: vfsmod.vfs,
855 855 hgvfs: vfsmod.vfs,
856 856 requirements,
857 857 sharedvfs: Optional[vfsmod.vfs] = None,
858 858 ):
859 859 """Load hgrc files/content into a ui instance.
860 860
861 861 This is called during repository opening to load any additional
862 862 config files or settings relevant to the current repository.
863 863
864 864 Returns a bool indicating whether any additional configs were loaded.
865 865
866 866 Extensions should monkeypatch this function to modify how per-repo
867 867 configs are loaded. For example, an extension may wish to pull in
868 868 configs from alternate files or sources.
869 869
870 870 sharedvfs is vfs object pointing to source repo if the current one is a
871 871 shared one
872 872 """
873 873 if not rcutil.use_repo_hgrc():
874 874 return False
875 875
876 876 ret = False
877 877 # first load config from shared source if we has to
878 878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
879 879 try:
880 880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
881 881 ret = True
882 882 except IOError:
883 883 pass
884 884
885 885 try:
886 886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
887 887 ret = True
888 888 except IOError:
889 889 pass
890 890
891 891 try:
892 892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
893 893 ret = True
894 894 except IOError:
895 895 pass
896 896
897 897 return ret
898 898
899 899
900 900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
901 901 """Perform additional actions after .hg/hgrc is loaded.
902 902
903 903 This function is called during repository loading immediately after
904 904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
905 905
906 906 The function can be used to validate configs, automatically add
907 907 options (including extensions) based on requirements, etc.
908 908 """
909 909
910 910 # Map of requirements to list of extensions to load automatically when
911 911 # requirement is present.
912 912 autoextensions = {
913 913 b'git': [b'git'],
914 914 b'largefiles': [b'largefiles'],
915 915 b'lfs': [b'lfs'],
916 916 }
917 917
918 918 for requirement, names in sorted(autoextensions.items()):
919 919 if requirement not in requirements:
920 920 continue
921 921
922 922 for name in names:
923 923 if not ui.hasconfig(b'extensions', name):
924 924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
925 925
926 926
927 927 def gathersupportedrequirements(ui):
928 928 """Determine the complete set of recognized requirements."""
929 929 # Start with all requirements supported by this file.
930 930 supported = set(localrepository._basesupported)
931 931
932 932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
933 933 # relevant to this ui instance.
934 934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
935 935
936 936 for fn in featuresetupfuncs:
937 937 if fn.__module__ in modules:
938 938 fn(ui, supported)
939 939
940 940 # Add derived requirements from registered compression engines.
941 941 for name in util.compengines:
942 942 engine = util.compengines[name]
943 943 if engine.available() and engine.revlogheader():
944 944 supported.add(b'exp-compression-%s' % name)
945 945 if engine.name() == b'zstd':
946 946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
947 947
948 948 return supported
949 949
950 950
951 951 def ensurerequirementsrecognized(requirements, supported):
952 952 """Validate that a set of local requirements is recognized.
953 953
954 954 Receives a set of requirements. Raises an ``error.RepoError`` if there
955 955 exists any requirement in that set that currently loaded code doesn't
956 956 recognize.
957 957
958 958 Returns a set of supported requirements.
959 959 """
960 960 missing = set()
961 961
962 962 for requirement in requirements:
963 963 if requirement in supported:
964 964 continue
965 965
966 966 if not requirement or not requirement[0:1].isalnum():
967 967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
968 968
969 969 missing.add(requirement)
970 970
971 971 if missing:
972 972 raise error.RequirementError(
973 973 _(b'repository requires features unknown to this Mercurial: %s')
974 974 % b' '.join(sorted(missing)),
975 975 hint=_(
976 976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
977 977 b'for more information'
978 978 ),
979 979 )
980 980
981 981
982 982 def ensurerequirementscompatible(ui, requirements):
983 983 """Validates that a set of recognized requirements is mutually compatible.
984 984
985 985 Some requirements may not be compatible with others or require
986 986 config options that aren't enabled. This function is called during
987 987 repository opening to ensure that the set of requirements needed
988 988 to open a repository is sane and compatible with config options.
989 989
990 990 Extensions can monkeypatch this function to perform additional
991 991 checking.
992 992
993 993 ``error.RepoError`` should be raised on failure.
994 994 """
995 995 if (
996 996 requirementsmod.SPARSE_REQUIREMENT in requirements
997 997 and not sparse.enabled
998 998 ):
999 999 raise error.RepoError(
1000 1000 _(
1001 1001 b'repository is using sparse feature but '
1002 1002 b'sparse is not enabled; enable the '
1003 1003 b'"sparse" extensions to access'
1004 1004 )
1005 1005 )
1006 1006
1007 1007
1008 1008 def makestore(requirements, path, vfstype):
1009 1009 """Construct a storage object for a repository."""
1010 1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1011 1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1012 1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1013 1013 return storemod.fncachestore(path, vfstype, dotencode)
1014 1014
1015 1015 return storemod.encodedstore(path, vfstype)
1016 1016
1017 1017 return storemod.basicstore(path, vfstype)
1018 1018
1019 1019
1020 1020 def resolvestorevfsoptions(ui, requirements, features):
1021 1021 """Resolve the options to pass to the store vfs opener.
1022 1022
1023 1023 The returned dict is used to influence behavior of the storage layer.
1024 1024 """
1025 1025 options = {}
1026 1026
1027 1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1028 1028 options[b'treemanifest'] = True
1029 1029
1030 1030 # experimental config: format.manifestcachesize
1031 1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1032 1032 if manifestcachesize is not None:
1033 1033 options[b'manifestcachesize'] = manifestcachesize
1034 1034
1035 1035 # In the absence of another requirement superseding a revlog-related
1036 1036 # requirement, we have to assume the repo is using revlog version 0.
1037 1037 # This revlog format is super old and we don't bother trying to parse
1038 1038 # opener options for it because those options wouldn't do anything
1039 1039 # meaningful on such old repos.
1040 1040 if (
1041 1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1042 1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1043 1043 ):
1044 1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1045 1045 else: # explicitly mark repo as using revlogv0
1046 1046 options[b'revlogv0'] = True
1047 1047
1048 1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1049 1049 options[b'copies-storage'] = b'changeset-sidedata'
1050 1050 else:
1051 1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1052 1052 copiesextramode = (b'changeset-only', b'compatibility')
1053 1053 if writecopiesto in copiesextramode:
1054 1054 options[b'copies-storage'] = b'extra'
1055 1055
1056 1056 return options
1057 1057
1058 1058
1059 1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1060 1060 """Resolve opener options specific to revlogs."""
1061 1061
1062 1062 options = {}
1063 1063 options[b'flagprocessors'] = {}
1064 1064
1065 1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1066 1066 options[b'revlogv1'] = True
1067 1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1068 1068 options[b'revlogv2'] = True
1069 1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1070 1070 options[b'changelogv2'] = True
1071 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1072 options[b'changelogv2.compute-rank'] = cmp_rank
1071 1073
1072 1074 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 1075 options[b'generaldelta'] = True
1074 1076
1075 1077 # experimental config: format.chunkcachesize
1076 1078 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 1079 if chunkcachesize is not None:
1078 1080 options[b'chunkcachesize'] = chunkcachesize
1079 1081
1080 1082 deltabothparents = ui.configbool(
1081 1083 b'storage', b'revlog.optimize-delta-parent-choice'
1082 1084 )
1083 1085 options[b'deltabothparents'] = deltabothparents
1084 1086 dps_cgds = ui.configint(
1085 1087 b'storage',
1086 1088 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 1089 )
1088 1090 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1089 1091 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1090 1092
1091 1093 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1092 1094 options[b'issue6528.fix-incoming'] = issue6528
1093 1095
1094 1096 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1095 1097 lazydeltabase = False
1096 1098 if lazydelta:
1097 1099 lazydeltabase = ui.configbool(
1098 1100 b'storage', b'revlog.reuse-external-delta-parent'
1099 1101 )
1100 1102 if lazydeltabase is None:
1101 1103 lazydeltabase = not scmutil.gddeltaconfig(ui)
1102 1104 options[b'lazydelta'] = lazydelta
1103 1105 options[b'lazydeltabase'] = lazydeltabase
1104 1106
1105 1107 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1106 1108 if 0 <= chainspan:
1107 1109 options[b'maxdeltachainspan'] = chainspan
1108 1110
1109 1111 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1110 1112 if mmapindexthreshold is not None:
1111 1113 options[b'mmapindexthreshold'] = mmapindexthreshold
1112 1114
1113 1115 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1114 1116 srdensitythres = float(
1115 1117 ui.config(b'experimental', b'sparse-read.density-threshold')
1116 1118 )
1117 1119 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1118 1120 options[b'with-sparse-read'] = withsparseread
1119 1121 options[b'sparse-read-density-threshold'] = srdensitythres
1120 1122 options[b'sparse-read-min-gap-size'] = srmingapsize
1121 1123
1122 1124 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1123 1125 options[b'sparse-revlog'] = sparserevlog
1124 1126 if sparserevlog:
1125 1127 options[b'generaldelta'] = True
1126 1128
1127 1129 maxchainlen = None
1128 1130 if sparserevlog:
1129 1131 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1130 1132 # experimental config: format.maxchainlen
1131 1133 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1132 1134 if maxchainlen is not None:
1133 1135 options[b'maxchainlen'] = maxchainlen
1134 1136
1135 1137 for r in requirements:
1136 1138 # we allow multiple compression engine requirement to co-exist because
1137 1139 # strickly speaking, revlog seems to support mixed compression style.
1138 1140 #
1139 1141 # The compression used for new entries will be "the last one"
1140 1142 prefix = r.startswith
1141 1143 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1142 1144 options[b'compengine'] = r.split(b'-', 2)[2]
1143 1145
1144 1146 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1145 1147 if options[b'zlib.level'] is not None:
1146 1148 if not (0 <= options[b'zlib.level'] <= 9):
1147 1149 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1148 1150 raise error.Abort(msg % options[b'zlib.level'])
1149 1151 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1150 1152 if options[b'zstd.level'] is not None:
1151 1153 if not (0 <= options[b'zstd.level'] <= 22):
1152 1154 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1153 1155 raise error.Abort(msg % options[b'zstd.level'])
1154 1156
1155 1157 if requirementsmod.NARROW_REQUIREMENT in requirements:
1156 1158 options[b'enableellipsis'] = True
1157 1159
1158 1160 if ui.configbool(b'experimental', b'rust.index'):
1159 1161 options[b'rust.index'] = True
1160 1162 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1161 1163 slow_path = ui.config(
1162 1164 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 1165 )
1164 1166 if slow_path not in (b'allow', b'warn', b'abort'):
1165 1167 default = ui.config_default(
1166 1168 b'storage', b'revlog.persistent-nodemap.slow-path'
1167 1169 )
1168 1170 msg = _(
1169 1171 b'unknown value for config '
1170 1172 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1171 1173 )
1172 1174 ui.warn(msg % slow_path)
1173 1175 if not ui.quiet:
1174 1176 ui.warn(_(b'falling back to default value: %s\n') % default)
1175 1177 slow_path = default
1176 1178
1177 1179 msg = _(
1178 1180 b"accessing `persistent-nodemap` repository without associated "
1179 1181 b"fast implementation."
1180 1182 )
1181 1183 hint = _(
1182 1184 b"check `hg help config.format.use-persistent-nodemap` "
1183 1185 b"for details"
1184 1186 )
1185 1187 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1186 1188 if slow_path == b'warn':
1187 1189 msg = b"warning: " + msg + b'\n'
1188 1190 ui.warn(msg)
1189 1191 if not ui.quiet:
1190 1192 hint = b'(' + hint + b')\n'
1191 1193 ui.warn(hint)
1192 1194 if slow_path == b'abort':
1193 1195 raise error.Abort(msg, hint=hint)
1194 1196 options[b'persistent-nodemap'] = True
1195 1197 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1196 1198 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1197 1199 if slow_path not in (b'allow', b'warn', b'abort'):
1198 1200 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1199 1201 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1200 1202 ui.warn(msg % slow_path)
1201 1203 if not ui.quiet:
1202 1204 ui.warn(_(b'falling back to default value: %s\n') % default)
1203 1205 slow_path = default
1204 1206
1205 1207 msg = _(
1206 1208 b"accessing `dirstate-v2` repository without associated "
1207 1209 b"fast implementation."
1208 1210 )
1209 1211 hint = _(
1210 1212 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1211 1213 )
1212 1214 if not dirstate.HAS_FAST_DIRSTATE_V2:
1213 1215 if slow_path == b'warn':
1214 1216 msg = b"warning: " + msg + b'\n'
1215 1217 ui.warn(msg)
1216 1218 if not ui.quiet:
1217 1219 hint = b'(' + hint + b')\n'
1218 1220 ui.warn(hint)
1219 1221 if slow_path == b'abort':
1220 1222 raise error.Abort(msg, hint=hint)
1221 1223 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1222 1224 options[b'persistent-nodemap.mmap'] = True
1223 1225 if ui.configbool(b'devel', b'persistent-nodemap'):
1224 1226 options[b'devel-force-nodemap'] = True
1225 1227
1226 1228 return options
1227 1229
1228 1230
1229 1231 def makemain(**kwargs):
1230 1232 """Produce a type conforming to ``ilocalrepositorymain``."""
1231 1233 return localrepository
1232 1234
1233 1235
1234 1236 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1235 1237 class revlogfilestorage:
1236 1238 """File storage when using revlogs."""
1237 1239
1238 1240 def file(self, path):
1239 1241 if path.startswith(b'/'):
1240 1242 path = path[1:]
1241 1243
1242 1244 return filelog.filelog(self.svfs, path)
1243 1245
1244 1246
1245 1247 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 1248 class revlognarrowfilestorage:
1247 1249 """File storage when using revlogs and narrow files."""
1248 1250
1249 1251 def file(self, path):
1250 1252 if path.startswith(b'/'):
1251 1253 path = path[1:]
1252 1254
1253 1255 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1254 1256
1255 1257
1256 1258 def makefilestorage(requirements, features, **kwargs):
1257 1259 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1258 1260 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1259 1261 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1260 1262
1261 1263 if requirementsmod.NARROW_REQUIREMENT in requirements:
1262 1264 return revlognarrowfilestorage
1263 1265 else:
1264 1266 return revlogfilestorage
1265 1267
1266 1268
1267 1269 # List of repository interfaces and factory functions for them. Each
1268 1270 # will be called in order during ``makelocalrepository()`` to iteratively
1269 1271 # derive the final type for a local repository instance. We capture the
1270 1272 # function as a lambda so we don't hold a reference and the module-level
1271 1273 # functions can be wrapped.
1272 1274 REPO_INTERFACES = [
1273 1275 (repository.ilocalrepositorymain, lambda: makemain),
1274 1276 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1275 1277 ]
1276 1278
1277 1279
1278 1280 @interfaceutil.implementer(repository.ilocalrepositorymain)
1279 1281 class localrepository:
1280 1282 """Main class for representing local repositories.
1281 1283
1282 1284 All local repositories are instances of this class.
1283 1285
1284 1286 Constructed on its own, instances of this class are not usable as
1285 1287 repository objects. To obtain a usable repository object, call
1286 1288 ``hg.repository()``, ``localrepo.instance()``, or
1287 1289 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1288 1290 ``instance()`` adds support for creating new repositories.
1289 1291 ``hg.repository()`` adds more extension integration, including calling
1290 1292 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1291 1293 used.
1292 1294 """
1293 1295
1294 1296 _basesupported = {
1295 1297 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1296 1298 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1297 1299 requirementsmod.CHANGELOGV2_REQUIREMENT,
1298 1300 requirementsmod.COPIESSDC_REQUIREMENT,
1299 1301 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1300 1302 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1301 1303 requirementsmod.DOTENCODE_REQUIREMENT,
1302 1304 requirementsmod.FNCACHE_REQUIREMENT,
1303 1305 requirementsmod.GENERALDELTA_REQUIREMENT,
1304 1306 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1305 1307 requirementsmod.NODEMAP_REQUIREMENT,
1306 1308 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1307 1309 requirementsmod.REVLOGV1_REQUIREMENT,
1308 1310 requirementsmod.REVLOGV2_REQUIREMENT,
1309 1311 requirementsmod.SHARED_REQUIREMENT,
1310 1312 requirementsmod.SHARESAFE_REQUIREMENT,
1311 1313 requirementsmod.SPARSE_REQUIREMENT,
1312 1314 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1313 1315 requirementsmod.STORE_REQUIREMENT,
1314 1316 requirementsmod.TREEMANIFEST_REQUIREMENT,
1315 1317 }
1316 1318
1317 1319 # list of prefix for file which can be written without 'wlock'
1318 1320 # Extensions should extend this list when needed
1319 1321 _wlockfreeprefix = {
1320 1322 # We migh consider requiring 'wlock' for the next
1321 1323 # two, but pretty much all the existing code assume
1322 1324 # wlock is not needed so we keep them excluded for
1323 1325 # now.
1324 1326 b'hgrc',
1325 1327 b'requires',
1326 1328 # XXX cache is a complicatged business someone
1327 1329 # should investigate this in depth at some point
1328 1330 b'cache/',
1329 1331 # XXX shouldn't be dirstate covered by the wlock?
1330 1332 b'dirstate',
1331 1333 # XXX bisect was still a bit too messy at the time
1332 1334 # this changeset was introduced. Someone should fix
1333 1335 # the remainig bit and drop this line
1334 1336 b'bisect.state',
1335 1337 }
1336 1338
1337 1339 def __init__(
1338 1340 self,
1339 1341 baseui,
1340 1342 ui,
1341 1343 origroot: bytes,
1342 1344 wdirvfs: vfsmod.vfs,
1343 1345 hgvfs: vfsmod.vfs,
1344 1346 requirements,
1345 1347 supportedrequirements,
1346 1348 sharedpath: bytes,
1347 1349 store,
1348 1350 cachevfs: vfsmod.vfs,
1349 1351 wcachevfs: vfsmod.vfs,
1350 1352 features,
1351 1353 intents=None,
1352 1354 ):
1353 1355 """Create a new local repository instance.
1354 1356
1355 1357 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1356 1358 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1357 1359 object.
1358 1360
1359 1361 Arguments:
1360 1362
1361 1363 baseui
1362 1364 ``ui.ui`` instance that ``ui`` argument was based off of.
1363 1365
1364 1366 ui
1365 1367 ``ui.ui`` instance for use by the repository.
1366 1368
1367 1369 origroot
1368 1370 ``bytes`` path to working directory root of this repository.
1369 1371
1370 1372 wdirvfs
1371 1373 ``vfs.vfs`` rooted at the working directory.
1372 1374
1373 1375 hgvfs
1374 1376 ``vfs.vfs`` rooted at .hg/
1375 1377
1376 1378 requirements
1377 1379 ``set`` of bytestrings representing repository opening requirements.
1378 1380
1379 1381 supportedrequirements
1380 1382 ``set`` of bytestrings representing repository requirements that we
1381 1383 know how to open. May be a supetset of ``requirements``.
1382 1384
1383 1385 sharedpath
1384 1386 ``bytes`` Defining path to storage base directory. Points to a
1385 1387 ``.hg/`` directory somewhere.
1386 1388
1387 1389 store
1388 1390 ``store.basicstore`` (or derived) instance providing access to
1389 1391 versioned storage.
1390 1392
1391 1393 cachevfs
1392 1394 ``vfs.vfs`` used for cache files.
1393 1395
1394 1396 wcachevfs
1395 1397 ``vfs.vfs`` used for cache files related to the working copy.
1396 1398
1397 1399 features
1398 1400 ``set`` of bytestrings defining features/capabilities of this
1399 1401 instance.
1400 1402
1401 1403 intents
1402 1404 ``set`` of system strings indicating what this repo will be used
1403 1405 for.
1404 1406 """
1405 1407 self.baseui = baseui
1406 1408 self.ui = ui
1407 1409 self.origroot = origroot
1408 1410 # vfs rooted at working directory.
1409 1411 self.wvfs = wdirvfs
1410 1412 self.root = wdirvfs.base
1411 1413 # vfs rooted at .hg/. Used to access most non-store paths.
1412 1414 self.vfs = hgvfs
1413 1415 self.path = hgvfs.base
1414 1416 self.requirements = requirements
1415 1417 self.nodeconstants = sha1nodeconstants
1416 1418 self.nullid = self.nodeconstants.nullid
1417 1419 self.supported = supportedrequirements
1418 1420 self.sharedpath = sharedpath
1419 1421 self.store = store
1420 1422 self.cachevfs = cachevfs
1421 1423 self.wcachevfs = wcachevfs
1422 1424 self.features = features
1423 1425
1424 1426 self.filtername = None
1425 1427
1426 1428 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1427 1429 b'devel', b'check-locks'
1428 1430 ):
1429 1431 self.vfs.audit = self._getvfsward(self.vfs.audit)
1430 1432 # A list of callback to shape the phase if no data were found.
1431 1433 # Callback are in the form: func(repo, roots) --> processed root.
1432 1434 # This list it to be filled by extension during repo setup
1433 1435 self._phasedefaults = []
1434 1436
1435 1437 color.setup(self.ui)
1436 1438
1437 1439 self.spath = self.store.path
1438 1440 self.svfs = self.store.vfs
1439 1441 self.sjoin = self.store.join
1440 1442 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1441 1443 b'devel', b'check-locks'
1442 1444 ):
1443 1445 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1444 1446 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1445 1447 else: # standard vfs
1446 1448 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1447 1449
1448 1450 self._dirstatevalidatewarned = False
1449 1451
1450 1452 self._branchcaches = branchmap.BranchMapCache()
1451 1453 self._revbranchcache = None
1452 1454 self._filterpats = {}
1453 1455 self._datafilters = {}
1454 1456 self._transref = self._lockref = self._wlockref = None
1455 1457
1456 1458 # A cache for various files under .hg/ that tracks file changes,
1457 1459 # (used by the filecache decorator)
1458 1460 #
1459 1461 # Maps a property name to its util.filecacheentry
1460 1462 self._filecache = {}
1461 1463
1462 1464 # hold sets of revision to be filtered
1463 1465 # should be cleared when something might have changed the filter value:
1464 1466 # - new changesets,
1465 1467 # - phase change,
1466 1468 # - new obsolescence marker,
1467 1469 # - working directory parent change,
1468 1470 # - bookmark changes
1469 1471 self.filteredrevcache = {}
1470 1472
1471 1473 # post-dirstate-status hooks
1472 1474 self._postdsstatus = []
1473 1475
1474 1476 # generic mapping between names and nodes
1475 1477 self.names = namespaces.namespaces()
1476 1478
1477 1479 # Key to signature value.
1478 1480 self._sparsesignaturecache = {}
1479 1481 # Signature to cached matcher instance.
1480 1482 self._sparsematchercache = {}
1481 1483
1482 1484 self._extrafilterid = repoview.extrafilter(ui)
1483 1485
1484 1486 self.filecopiesmode = None
1485 1487 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1486 1488 self.filecopiesmode = b'changeset-sidedata'
1487 1489
1488 1490 self._wanted_sidedata = set()
1489 1491 self._sidedata_computers = {}
1490 1492 sidedatamod.set_sidedata_spec_for_repo(self)
1491 1493
1492 1494 def _getvfsward(self, origfunc):
1493 1495 """build a ward for self.vfs"""
1494 1496 rref = weakref.ref(self)
1495 1497
1496 1498 def checkvfs(path, mode=None):
1497 1499 ret = origfunc(path, mode=mode)
1498 1500 repo = rref()
1499 1501 if (
1500 1502 repo is None
1501 1503 or not util.safehasattr(repo, b'_wlockref')
1502 1504 or not util.safehasattr(repo, b'_lockref')
1503 1505 ):
1504 1506 return
1505 1507 if mode in (None, b'r', b'rb'):
1506 1508 return
1507 1509 if path.startswith(repo.path):
1508 1510 # truncate name relative to the repository (.hg)
1509 1511 path = path[len(repo.path) + 1 :]
1510 1512 if path.startswith(b'cache/'):
1511 1513 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1512 1514 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1513 1515 # path prefixes covered by 'lock'
1514 1516 vfs_path_prefixes = (
1515 1517 b'journal.',
1516 1518 b'undo.',
1517 1519 b'strip-backup/',
1518 1520 b'cache/',
1519 1521 )
1520 1522 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1521 1523 if repo._currentlock(repo._lockref) is None:
1522 1524 repo.ui.develwarn(
1523 1525 b'write with no lock: "%s"' % path,
1524 1526 stacklevel=3,
1525 1527 config=b'check-locks',
1526 1528 )
1527 1529 elif repo._currentlock(repo._wlockref) is None:
1528 1530 # rest of vfs files are covered by 'wlock'
1529 1531 #
1530 1532 # exclude special files
1531 1533 for prefix in self._wlockfreeprefix:
1532 1534 if path.startswith(prefix):
1533 1535 return
1534 1536 repo.ui.develwarn(
1535 1537 b'write with no wlock: "%s"' % path,
1536 1538 stacklevel=3,
1537 1539 config=b'check-locks',
1538 1540 )
1539 1541 return ret
1540 1542
1541 1543 return checkvfs
1542 1544
1543 1545 def _getsvfsward(self, origfunc):
1544 1546 """build a ward for self.svfs"""
1545 1547 rref = weakref.ref(self)
1546 1548
1547 1549 def checksvfs(path, mode=None):
1548 1550 ret = origfunc(path, mode=mode)
1549 1551 repo = rref()
1550 1552 if repo is None or not util.safehasattr(repo, b'_lockref'):
1551 1553 return
1552 1554 if mode in (None, b'r', b'rb'):
1553 1555 return
1554 1556 if path.startswith(repo.sharedpath):
1555 1557 # truncate name relative to the repository (.hg)
1556 1558 path = path[len(repo.sharedpath) + 1 :]
1557 1559 if repo._currentlock(repo._lockref) is None:
1558 1560 repo.ui.develwarn(
1559 1561 b'write with no lock: "%s"' % path, stacklevel=4
1560 1562 )
1561 1563 return ret
1562 1564
1563 1565 return checksvfs
1564 1566
1565 1567 def close(self):
1566 1568 self._writecaches()
1567 1569
1568 1570 def _writecaches(self):
1569 1571 if self._revbranchcache:
1570 1572 self._revbranchcache.write()
1571 1573
1572 1574 def _restrictcapabilities(self, caps):
1573 1575 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1574 1576 caps = set(caps)
1575 1577 capsblob = bundle2.encodecaps(
1576 1578 bundle2.getrepocaps(self, role=b'client')
1577 1579 )
1578 1580 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1579 1581 if self.ui.configbool(b'experimental', b'narrow'):
1580 1582 caps.add(wireprototypes.NARROWCAP)
1581 1583 return caps
1582 1584
1583 1585 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1584 1586 # self -> auditor -> self._checknested -> self
1585 1587
1586 1588 @property
1587 1589 def auditor(self):
1588 1590 # This is only used by context.workingctx.match in order to
1589 1591 # detect files in subrepos.
1590 1592 return pathutil.pathauditor(self.root, callback=self._checknested)
1591 1593
1592 1594 @property
1593 1595 def nofsauditor(self):
1594 1596 # This is only used by context.basectx.match in order to detect
1595 1597 # files in subrepos.
1596 1598 return pathutil.pathauditor(
1597 1599 self.root, callback=self._checknested, realfs=False, cached=True
1598 1600 )
1599 1601
1600 1602 def _checknested(self, path):
1601 1603 """Determine if path is a legal nested repository."""
1602 1604 if not path.startswith(self.root):
1603 1605 return False
1604 1606 subpath = path[len(self.root) + 1 :]
1605 1607 normsubpath = util.pconvert(subpath)
1606 1608
1607 1609 # XXX: Checking against the current working copy is wrong in
1608 1610 # the sense that it can reject things like
1609 1611 #
1610 1612 # $ hg cat -r 10 sub/x.txt
1611 1613 #
1612 1614 # if sub/ is no longer a subrepository in the working copy
1613 1615 # parent revision.
1614 1616 #
1615 1617 # However, it can of course also allow things that would have
1616 1618 # been rejected before, such as the above cat command if sub/
1617 1619 # is a subrepository now, but was a normal directory before.
1618 1620 # The old path auditor would have rejected by mistake since it
1619 1621 # panics when it sees sub/.hg/.
1620 1622 #
1621 1623 # All in all, checking against the working copy seems sensible
1622 1624 # since we want to prevent access to nested repositories on
1623 1625 # the filesystem *now*.
1624 1626 ctx = self[None]
1625 1627 parts = util.splitpath(subpath)
1626 1628 while parts:
1627 1629 prefix = b'/'.join(parts)
1628 1630 if prefix in ctx.substate:
1629 1631 if prefix == normsubpath:
1630 1632 return True
1631 1633 else:
1632 1634 sub = ctx.sub(prefix)
1633 1635 return sub.checknested(subpath[len(prefix) + 1 :])
1634 1636 else:
1635 1637 parts.pop()
1636 1638 return False
1637 1639
1638 1640 def peer(self):
1639 1641 return localpeer(self) # not cached to avoid reference cycle
1640 1642
1641 1643 def unfiltered(self):
1642 1644 """Return unfiltered version of the repository
1643 1645
1644 1646 Intended to be overwritten by filtered repo."""
1645 1647 return self
1646 1648
1647 1649 def filtered(self, name, visibilityexceptions=None):
1648 1650 """Return a filtered version of a repository
1649 1651
1650 1652 The `name` parameter is the identifier of the requested view. This
1651 1653 will return a repoview object set "exactly" to the specified view.
1652 1654
1653 1655 This function does not apply recursive filtering to a repository. For
1654 1656 example calling `repo.filtered("served")` will return a repoview using
1655 1657 the "served" view, regardless of the initial view used by `repo`.
1656 1658
1657 1659 In other word, there is always only one level of `repoview` "filtering".
1658 1660 """
1659 1661 if self._extrafilterid is not None and b'%' not in name:
1660 1662 name = name + b'%' + self._extrafilterid
1661 1663
1662 1664 cls = repoview.newtype(self.unfiltered().__class__)
1663 1665 return cls(self, name, visibilityexceptions)
1664 1666
1665 1667 @mixedrepostorecache(
1666 1668 (b'bookmarks', b'plain'),
1667 1669 (b'bookmarks.current', b'plain'),
1668 1670 (b'bookmarks', b''),
1669 1671 (b'00changelog.i', b''),
1670 1672 )
1671 1673 def _bookmarks(self):
1672 1674 # Since the multiple files involved in the transaction cannot be
1673 1675 # written atomically (with current repository format), there is a race
1674 1676 # condition here.
1675 1677 #
1676 1678 # 1) changelog content A is read
1677 1679 # 2) outside transaction update changelog to content B
1678 1680 # 3) outside transaction update bookmark file referring to content B
1679 1681 # 4) bookmarks file content is read and filtered against changelog-A
1680 1682 #
1681 1683 # When this happens, bookmarks against nodes missing from A are dropped.
1682 1684 #
1683 1685 # Having this happening during read is not great, but it become worse
1684 1686 # when this happen during write because the bookmarks to the "unknown"
1685 1687 # nodes will be dropped for good. However, writes happen within locks.
1686 1688 # This locking makes it possible to have a race free consistent read.
1687 1689 # For this purpose data read from disc before locking are
1688 1690 # "invalidated" right after the locks are taken. This invalidations are
1689 1691 # "light", the `filecache` mechanism keep the data in memory and will
1690 1692 # reuse them if the underlying files did not changed. Not parsing the
1691 1693 # same data multiple times helps performances.
1692 1694 #
1693 1695 # Unfortunately in the case describe above, the files tracked by the
1694 1696 # bookmarks file cache might not have changed, but the in-memory
1695 1697 # content is still "wrong" because we used an older changelog content
1696 1698 # to process the on-disk data. So after locking, the changelog would be
1697 1699 # refreshed but `_bookmarks` would be preserved.
1698 1700 # Adding `00changelog.i` to the list of tracked file is not
1699 1701 # enough, because at the time we build the content for `_bookmarks` in
1700 1702 # (4), the changelog file has already diverged from the content used
1701 1703 # for loading `changelog` in (1)
1702 1704 #
1703 1705 # To prevent the issue, we force the changelog to be explicitly
1704 1706 # reloaded while computing `_bookmarks`. The data race can still happen
1705 1707 # without the lock (with a narrower window), but it would no longer go
1706 1708 # undetected during the lock time refresh.
1707 1709 #
1708 1710 # The new schedule is as follow
1709 1711 #
1710 1712 # 1) filecache logic detect that `_bookmarks` needs to be computed
1711 1713 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1712 1714 # 3) We force `changelog` filecache to be tested
1713 1715 # 4) cachestat for `changelog` are captured (for changelog)
1714 1716 # 5) `_bookmarks` is computed and cached
1715 1717 #
1716 1718 # The step in (3) ensure we have a changelog at least as recent as the
1717 1719 # cache stat computed in (1). As a result at locking time:
1718 1720 # * if the changelog did not changed since (1) -> we can reuse the data
1719 1721 # * otherwise -> the bookmarks get refreshed.
1720 1722 self._refreshchangelog()
1721 1723 return bookmarks.bmstore(self)
1722 1724
1723 1725 def _refreshchangelog(self):
1724 1726 """make sure the in memory changelog match the on-disk one"""
1725 1727 if 'changelog' in vars(self) and self.currenttransaction() is None:
1726 1728 del self.changelog
1727 1729
1728 1730 @property
1729 1731 def _activebookmark(self):
1730 1732 return self._bookmarks.active
1731 1733
1732 1734 # _phasesets depend on changelog. what we need is to call
1733 1735 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1734 1736 # can't be easily expressed in filecache mechanism.
1735 1737 @storecache(b'phaseroots', b'00changelog.i')
1736 1738 def _phasecache(self):
1737 1739 return phases.phasecache(self, self._phasedefaults)
1738 1740
1739 1741 @storecache(b'obsstore')
1740 1742 def obsstore(self):
1741 1743 return obsolete.makestore(self.ui, self)
1742 1744
1743 1745 @changelogcache()
1744 1746 def changelog(repo):
1745 1747 # load dirstate before changelog to avoid race see issue6303
1746 1748 repo.dirstate.prefetch_parents()
1747 1749 return repo.store.changelog(
1748 1750 txnutil.mayhavepending(repo.root),
1749 1751 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1750 1752 )
1751 1753
1752 1754 @manifestlogcache()
1753 1755 def manifestlog(self):
1754 1756 return self.store.manifestlog(self, self._storenarrowmatch)
1755 1757
1756 1758 @repofilecache(b'dirstate')
1757 1759 def dirstate(self):
1758 1760 return self._makedirstate()
1759 1761
1760 1762 def _makedirstate(self):
1761 1763 """Extension point for wrapping the dirstate per-repo."""
1762 1764 sparsematchfn = None
1763 1765 if sparse.use_sparse(self):
1764 1766 sparsematchfn = lambda: sparse.matcher(self)
1765 1767 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1766 1768 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1767 1769 use_dirstate_v2 = v2_req in self.requirements
1768 1770 use_tracked_hint = th in self.requirements
1769 1771
1770 1772 return dirstate.dirstate(
1771 1773 self.vfs,
1772 1774 self.ui,
1773 1775 self.root,
1774 1776 self._dirstatevalidate,
1775 1777 sparsematchfn,
1776 1778 self.nodeconstants,
1777 1779 use_dirstate_v2,
1778 1780 use_tracked_hint=use_tracked_hint,
1779 1781 )
1780 1782
1781 1783 def _dirstatevalidate(self, node):
1782 1784 try:
1783 1785 self.changelog.rev(node)
1784 1786 return node
1785 1787 except error.LookupError:
1786 1788 if not self._dirstatevalidatewarned:
1787 1789 self._dirstatevalidatewarned = True
1788 1790 self.ui.warn(
1789 1791 _(b"warning: ignoring unknown working parent %s!\n")
1790 1792 % short(node)
1791 1793 )
1792 1794 return self.nullid
1793 1795
1794 1796 @storecache(narrowspec.FILENAME)
1795 1797 def narrowpats(self):
1796 1798 """matcher patterns for this repository's narrowspec
1797 1799
1798 1800 A tuple of (includes, excludes).
1799 1801 """
1800 1802 return narrowspec.load(self)
1801 1803
1802 1804 @storecache(narrowspec.FILENAME)
1803 1805 def _storenarrowmatch(self):
1804 1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1805 1807 return matchmod.always()
1806 1808 include, exclude = self.narrowpats
1807 1809 return narrowspec.match(self.root, include=include, exclude=exclude)
1808 1810
1809 1811 @storecache(narrowspec.FILENAME)
1810 1812 def _narrowmatch(self):
1811 1813 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1812 1814 return matchmod.always()
1813 1815 narrowspec.checkworkingcopynarrowspec(self)
1814 1816 include, exclude = self.narrowpats
1815 1817 return narrowspec.match(self.root, include=include, exclude=exclude)
1816 1818
1817 1819 def narrowmatch(self, match=None, includeexact=False):
1818 1820 """matcher corresponding the the repo's narrowspec
1819 1821
1820 1822 If `match` is given, then that will be intersected with the narrow
1821 1823 matcher.
1822 1824
1823 1825 If `includeexact` is True, then any exact matches from `match` will
1824 1826 be included even if they're outside the narrowspec.
1825 1827 """
1826 1828 if match:
1827 1829 if includeexact and not self._narrowmatch.always():
1828 1830 # do not exclude explicitly-specified paths so that they can
1829 1831 # be warned later on
1830 1832 em = matchmod.exact(match.files())
1831 1833 nm = matchmod.unionmatcher([self._narrowmatch, em])
1832 1834 return matchmod.intersectmatchers(match, nm)
1833 1835 return matchmod.intersectmatchers(match, self._narrowmatch)
1834 1836 return self._narrowmatch
1835 1837
1836 1838 def setnarrowpats(self, newincludes, newexcludes):
1837 1839 narrowspec.save(self, newincludes, newexcludes)
1838 1840 self.invalidate(clearfilecache=True)
1839 1841
1840 1842 @unfilteredpropertycache
1841 1843 def _quick_access_changeid_null(self):
1842 1844 return {
1843 1845 b'null': (nullrev, self.nodeconstants.nullid),
1844 1846 nullrev: (nullrev, self.nodeconstants.nullid),
1845 1847 self.nullid: (nullrev, self.nullid),
1846 1848 }
1847 1849
1848 1850 @unfilteredpropertycache
1849 1851 def _quick_access_changeid_wc(self):
1850 1852 # also fast path access to the working copy parents
1851 1853 # however, only do it for filter that ensure wc is visible.
1852 1854 quick = self._quick_access_changeid_null.copy()
1853 1855 cl = self.unfiltered().changelog
1854 1856 for node in self.dirstate.parents():
1855 1857 if node == self.nullid:
1856 1858 continue
1857 1859 rev = cl.index.get_rev(node)
1858 1860 if rev is None:
1859 1861 # unknown working copy parent case:
1860 1862 #
1861 1863 # skip the fast path and let higher code deal with it
1862 1864 continue
1863 1865 pair = (rev, node)
1864 1866 quick[rev] = pair
1865 1867 quick[node] = pair
1866 1868 # also add the parents of the parents
1867 1869 for r in cl.parentrevs(rev):
1868 1870 if r == nullrev:
1869 1871 continue
1870 1872 n = cl.node(r)
1871 1873 pair = (r, n)
1872 1874 quick[r] = pair
1873 1875 quick[n] = pair
1874 1876 p1node = self.dirstate.p1()
1875 1877 if p1node != self.nullid:
1876 1878 quick[b'.'] = quick[p1node]
1877 1879 return quick
1878 1880
1879 1881 @unfilteredmethod
1880 1882 def _quick_access_changeid_invalidate(self):
1881 1883 if '_quick_access_changeid_wc' in vars(self):
1882 1884 del self.__dict__['_quick_access_changeid_wc']
1883 1885
1884 1886 @property
1885 1887 def _quick_access_changeid(self):
1886 1888 """an helper dictionnary for __getitem__ calls
1887 1889
1888 1890 This contains a list of symbol we can recognise right away without
1889 1891 further processing.
1890 1892 """
1891 1893 if self.filtername in repoview.filter_has_wc:
1892 1894 return self._quick_access_changeid_wc
1893 1895 return self._quick_access_changeid_null
1894 1896
1895 1897 def __getitem__(self, changeid):
1896 1898 # dealing with special cases
1897 1899 if changeid is None:
1898 1900 return context.workingctx(self)
1899 1901 if isinstance(changeid, context.basectx):
1900 1902 return changeid
1901 1903
1902 1904 # dealing with multiple revisions
1903 1905 if isinstance(changeid, slice):
1904 1906 # wdirrev isn't contiguous so the slice shouldn't include it
1905 1907 return [
1906 1908 self[i]
1907 1909 for i in range(*changeid.indices(len(self)))
1908 1910 if i not in self.changelog.filteredrevs
1909 1911 ]
1910 1912
1911 1913 # dealing with some special values
1912 1914 quick_access = self._quick_access_changeid.get(changeid)
1913 1915 if quick_access is not None:
1914 1916 rev, node = quick_access
1915 1917 return context.changectx(self, rev, node, maybe_filtered=False)
1916 1918 if changeid == b'tip':
1917 1919 node = self.changelog.tip()
1918 1920 rev = self.changelog.rev(node)
1919 1921 return context.changectx(self, rev, node)
1920 1922
1921 1923 # dealing with arbitrary values
1922 1924 try:
1923 1925 if isinstance(changeid, int):
1924 1926 node = self.changelog.node(changeid)
1925 1927 rev = changeid
1926 1928 elif changeid == b'.':
1927 1929 # this is a hack to delay/avoid loading obsmarkers
1928 1930 # when we know that '.' won't be hidden
1929 1931 node = self.dirstate.p1()
1930 1932 rev = self.unfiltered().changelog.rev(node)
1931 1933 elif len(changeid) == self.nodeconstants.nodelen:
1932 1934 try:
1933 1935 node = changeid
1934 1936 rev = self.changelog.rev(changeid)
1935 1937 except error.FilteredLookupError:
1936 1938 changeid = hex(changeid) # for the error message
1937 1939 raise
1938 1940 except LookupError:
1939 1941 # check if it might have come from damaged dirstate
1940 1942 #
1941 1943 # XXX we could avoid the unfiltered if we had a recognizable
1942 1944 # exception for filtered changeset access
1943 1945 if (
1944 1946 self.local()
1945 1947 and changeid in self.unfiltered().dirstate.parents()
1946 1948 ):
1947 1949 msg = _(b"working directory has unknown parent '%s'!")
1948 1950 raise error.Abort(msg % short(changeid))
1949 1951 changeid = hex(changeid) # for the error message
1950 1952 raise
1951 1953
1952 1954 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1953 1955 node = bin(changeid)
1954 1956 rev = self.changelog.rev(node)
1955 1957 else:
1956 1958 raise error.ProgrammingError(
1957 1959 b"unsupported changeid '%s' of type %s"
1958 1960 % (changeid, pycompat.bytestr(type(changeid)))
1959 1961 )
1960 1962
1961 1963 return context.changectx(self, rev, node)
1962 1964
1963 1965 except (error.FilteredIndexError, error.FilteredLookupError):
1964 1966 raise error.FilteredRepoLookupError(
1965 1967 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1966 1968 )
1967 1969 except (IndexError, LookupError):
1968 1970 raise error.RepoLookupError(
1969 1971 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1970 1972 )
1971 1973 except error.WdirUnsupported:
1972 1974 return context.workingctx(self)
1973 1975
1974 1976 def __contains__(self, changeid):
1975 1977 """True if the given changeid exists"""
1976 1978 try:
1977 1979 self[changeid]
1978 1980 return True
1979 1981 except error.RepoLookupError:
1980 1982 return False
1981 1983
1982 1984 def __nonzero__(self):
1983 1985 return True
1984 1986
1985 1987 __bool__ = __nonzero__
1986 1988
1987 1989 def __len__(self):
1988 1990 # no need to pay the cost of repoview.changelog
1989 1991 unfi = self.unfiltered()
1990 1992 return len(unfi.changelog)
1991 1993
1992 1994 def __iter__(self):
1993 1995 return iter(self.changelog)
1994 1996
1995 1997 def revs(self, expr: bytes, *args):
1996 1998 """Find revisions matching a revset.
1997 1999
1998 2000 The revset is specified as a string ``expr`` that may contain
1999 2001 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2000 2002
2001 2003 Revset aliases from the configuration are not expanded. To expand
2002 2004 user aliases, consider calling ``scmutil.revrange()`` or
2003 2005 ``repo.anyrevs([expr], user=True)``.
2004 2006
2005 2007 Returns a smartset.abstractsmartset, which is a list-like interface
2006 2008 that contains integer revisions.
2007 2009 """
2008 2010 tree = revsetlang.spectree(expr, *args)
2009 2011 return revset.makematcher(tree)(self)
2010 2012
2011 2013 def set(self, expr: bytes, *args):
2012 2014 """Find revisions matching a revset and emit changectx instances.
2013 2015
2014 2016 This is a convenience wrapper around ``revs()`` that iterates the
2015 2017 result and is a generator of changectx instances.
2016 2018
2017 2019 Revset aliases from the configuration are not expanded. To expand
2018 2020 user aliases, consider calling ``scmutil.revrange()``.
2019 2021 """
2020 2022 for r in self.revs(expr, *args):
2021 2023 yield self[r]
2022 2024
2023 2025 def anyrevs(self, specs: bytes, user=False, localalias=None):
2024 2026 """Find revisions matching one of the given revsets.
2025 2027
2026 2028 Revset aliases from the configuration are not expanded by default. To
2027 2029 expand user aliases, specify ``user=True``. To provide some local
2028 2030 definitions overriding user aliases, set ``localalias`` to
2029 2031 ``{name: definitionstring}``.
2030 2032 """
2031 2033 if specs == [b'null']:
2032 2034 return revset.baseset([nullrev])
2033 2035 if specs == [b'.']:
2034 2036 quick_data = self._quick_access_changeid.get(b'.')
2035 2037 if quick_data is not None:
2036 2038 return revset.baseset([quick_data[0]])
2037 2039 if user:
2038 2040 m = revset.matchany(
2039 2041 self.ui,
2040 2042 specs,
2041 2043 lookup=revset.lookupfn(self),
2042 2044 localalias=localalias,
2043 2045 )
2044 2046 else:
2045 2047 m = revset.matchany(None, specs, localalias=localalias)
2046 2048 return m(self)
2047 2049
2048 2050 def url(self) -> bytes:
2049 2051 return b'file:' + self.root
2050 2052
2051 2053 def hook(self, name, throw=False, **args):
2052 2054 """Call a hook, passing this repo instance.
2053 2055
2054 2056 This a convenience method to aid invoking hooks. Extensions likely
2055 2057 won't call this unless they have registered a custom hook or are
2056 2058 replacing code that is expected to call a hook.
2057 2059 """
2058 2060 return hook.hook(self.ui, self, name, throw, **args)
2059 2061
2060 2062 @filteredpropertycache
2061 2063 def _tagscache(self):
2062 2064 """Returns a tagscache object that contains various tags related
2063 2065 caches."""
2064 2066
2065 2067 # This simplifies its cache management by having one decorated
2066 2068 # function (this one) and the rest simply fetch things from it.
2067 2069 class tagscache:
2068 2070 def __init__(self):
2069 2071 # These two define the set of tags for this repository. tags
2070 2072 # maps tag name to node; tagtypes maps tag name to 'global' or
2071 2073 # 'local'. (Global tags are defined by .hgtags across all
2072 2074 # heads, and local tags are defined in .hg/localtags.)
2073 2075 # They constitute the in-memory cache of tags.
2074 2076 self.tags = self.tagtypes = None
2075 2077
2076 2078 self.nodetagscache = self.tagslist = None
2077 2079
2078 2080 cache = tagscache()
2079 2081 cache.tags, cache.tagtypes = self._findtags()
2080 2082
2081 2083 return cache
2082 2084
2083 2085 def tags(self):
2084 2086 '''return a mapping of tag to node'''
2085 2087 t = {}
2086 2088 if self.changelog.filteredrevs:
2087 2089 tags, tt = self._findtags()
2088 2090 else:
2089 2091 tags = self._tagscache.tags
2090 2092 rev = self.changelog.rev
2091 2093 for k, v in tags.items():
2092 2094 try:
2093 2095 # ignore tags to unknown nodes
2094 2096 rev(v)
2095 2097 t[k] = v
2096 2098 except (error.LookupError, ValueError):
2097 2099 pass
2098 2100 return t
2099 2101
2100 2102 def _findtags(self):
2101 2103 """Do the hard work of finding tags. Return a pair of dicts
2102 2104 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2103 2105 maps tag name to a string like \'global\' or \'local\'.
2104 2106 Subclasses or extensions are free to add their own tags, but
2105 2107 should be aware that the returned dicts will be retained for the
2106 2108 duration of the localrepo object."""
2107 2109
2108 2110 # XXX what tagtype should subclasses/extensions use? Currently
2109 2111 # mq and bookmarks add tags, but do not set the tagtype at all.
2110 2112 # Should each extension invent its own tag type? Should there
2111 2113 # be one tagtype for all such "virtual" tags? Or is the status
2112 2114 # quo fine?
2113 2115
2114 2116 # map tag name to (node, hist)
2115 2117 alltags = tagsmod.findglobaltags(self.ui, self)
2116 2118 # map tag name to tag type
2117 2119 tagtypes = {tag: b'global' for tag in alltags}
2118 2120
2119 2121 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2120 2122
2121 2123 # Build the return dicts. Have to re-encode tag names because
2122 2124 # the tags module always uses UTF-8 (in order not to lose info
2123 2125 # writing to the cache), but the rest of Mercurial wants them in
2124 2126 # local encoding.
2125 2127 tags = {}
2126 2128 for (name, (node, hist)) in alltags.items():
2127 2129 if node != self.nullid:
2128 2130 tags[encoding.tolocal(name)] = node
2129 2131 tags[b'tip'] = self.changelog.tip()
2130 2132 tagtypes = {
2131 2133 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2132 2134 }
2133 2135 return (tags, tagtypes)
2134 2136
2135 2137 def tagtype(self, tagname):
2136 2138 """
2137 2139 return the type of the given tag. result can be:
2138 2140
2139 2141 'local' : a local tag
2140 2142 'global' : a global tag
2141 2143 None : tag does not exist
2142 2144 """
2143 2145
2144 2146 return self._tagscache.tagtypes.get(tagname)
2145 2147
2146 2148 def tagslist(self):
2147 2149 '''return a list of tags ordered by revision'''
2148 2150 if not self._tagscache.tagslist:
2149 2151 l = []
2150 2152 for t, n in self.tags().items():
2151 2153 l.append((self.changelog.rev(n), t, n))
2152 2154 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2153 2155
2154 2156 return self._tagscache.tagslist
2155 2157
2156 2158 def nodetags(self, node):
2157 2159 '''return the tags associated with a node'''
2158 2160 if not self._tagscache.nodetagscache:
2159 2161 nodetagscache = {}
2160 2162 for t, n in self._tagscache.tags.items():
2161 2163 nodetagscache.setdefault(n, []).append(t)
2162 2164 for tags in nodetagscache.values():
2163 2165 tags.sort()
2164 2166 self._tagscache.nodetagscache = nodetagscache
2165 2167 return self._tagscache.nodetagscache.get(node, [])
2166 2168
2167 2169 def nodebookmarks(self, node):
2168 2170 """return the list of bookmarks pointing to the specified node"""
2169 2171 return self._bookmarks.names(node)
2170 2172
2171 2173 def branchmap(self):
2172 2174 """returns a dictionary {branch: [branchheads]} with branchheads
2173 2175 ordered by increasing revision number"""
2174 2176 return self._branchcaches[self]
2175 2177
2176 2178 @unfilteredmethod
2177 2179 def revbranchcache(self):
2178 2180 if not self._revbranchcache:
2179 2181 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2180 2182 return self._revbranchcache
2181 2183
2182 2184 def register_changeset(self, rev, changelogrevision):
2183 2185 self.revbranchcache().setdata(rev, changelogrevision)
2184 2186
2185 2187 def branchtip(self, branch, ignoremissing=False):
2186 2188 """return the tip node for a given branch
2187 2189
2188 2190 If ignoremissing is True, then this method will not raise an error.
2189 2191 This is helpful for callers that only expect None for a missing branch
2190 2192 (e.g. namespace).
2191 2193
2192 2194 """
2193 2195 try:
2194 2196 return self.branchmap().branchtip(branch)
2195 2197 except KeyError:
2196 2198 if not ignoremissing:
2197 2199 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2198 2200 else:
2199 2201 pass
2200 2202
2201 2203 def lookup(self, key):
2202 2204 node = scmutil.revsymbol(self, key).node()
2203 2205 if node is None:
2204 2206 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2205 2207 return node
2206 2208
2207 2209 def lookupbranch(self, key):
2208 2210 if self.branchmap().hasbranch(key):
2209 2211 return key
2210 2212
2211 2213 return scmutil.revsymbol(self, key).branch()
2212 2214
2213 2215 def known(self, nodes):
2214 2216 cl = self.changelog
2215 2217 get_rev = cl.index.get_rev
2216 2218 filtered = cl.filteredrevs
2217 2219 result = []
2218 2220 for n in nodes:
2219 2221 r = get_rev(n)
2220 2222 resp = not (r is None or r in filtered)
2221 2223 result.append(resp)
2222 2224 return result
2223 2225
2224 2226 def local(self):
2225 2227 return self
2226 2228
2227 2229 def publishing(self):
2228 2230 # it's safe (and desirable) to trust the publish flag unconditionally
2229 2231 # so that we don't finalize changes shared between users via ssh or nfs
2230 2232 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2231 2233
2232 2234 def cancopy(self):
2233 2235 # so statichttprepo's override of local() works
2234 2236 if not self.local():
2235 2237 return False
2236 2238 if not self.publishing():
2237 2239 return True
2238 2240 # if publishing we can't copy if there is filtered content
2239 2241 return not self.filtered(b'visible').changelog.filteredrevs
2240 2242
2241 2243 def shared(self):
2242 2244 '''the type of shared repository (None if not shared)'''
2243 2245 if self.sharedpath != self.path:
2244 2246 return b'store'
2245 2247 return None
2246 2248
2247 2249 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2248 2250 return self.vfs.reljoin(self.root, f, *insidef)
2249 2251
2250 2252 def setparents(self, p1, p2=None):
2251 2253 if p2 is None:
2252 2254 p2 = self.nullid
2253 2255 self[None].setparents(p1, p2)
2254 2256 self._quick_access_changeid_invalidate()
2255 2257
2256 2258 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2257 2259 """changeid must be a changeset revision, if specified.
2258 2260 fileid can be a file revision or node."""
2259 2261 return context.filectx(
2260 2262 self, path, changeid, fileid, changectx=changectx
2261 2263 )
2262 2264
2263 2265 def getcwd(self) -> bytes:
2264 2266 return self.dirstate.getcwd()
2265 2267
2266 2268 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2267 2269 return self.dirstate.pathto(f, cwd)
2268 2270
2269 2271 def _loadfilter(self, filter):
2270 2272 if filter not in self._filterpats:
2271 2273 l = []
2272 2274 for pat, cmd in self.ui.configitems(filter):
2273 2275 if cmd == b'!':
2274 2276 continue
2275 2277 mf = matchmod.match(self.root, b'', [pat])
2276 2278 fn = None
2277 2279 params = cmd
2278 2280 for name, filterfn in self._datafilters.items():
2279 2281 if cmd.startswith(name):
2280 2282 fn = filterfn
2281 2283 params = cmd[len(name) :].lstrip()
2282 2284 break
2283 2285 if not fn:
2284 2286 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2285 2287 fn.__name__ = 'commandfilter'
2286 2288 # Wrap old filters not supporting keyword arguments
2287 2289 if not pycompat.getargspec(fn)[2]:
2288 2290 oldfn = fn
2289 2291 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2290 2292 fn.__name__ = 'compat-' + oldfn.__name__
2291 2293 l.append((mf, fn, params))
2292 2294 self._filterpats[filter] = l
2293 2295 return self._filterpats[filter]
2294 2296
2295 2297 def _filter(self, filterpats, filename, data):
2296 2298 for mf, fn, cmd in filterpats:
2297 2299 if mf(filename):
2298 2300 self.ui.debug(
2299 2301 b"filtering %s through %s\n"
2300 2302 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2301 2303 )
2302 2304 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2303 2305 break
2304 2306
2305 2307 return data
2306 2308
2307 2309 @unfilteredpropertycache
2308 2310 def _encodefilterpats(self):
2309 2311 return self._loadfilter(b'encode')
2310 2312
2311 2313 @unfilteredpropertycache
2312 2314 def _decodefilterpats(self):
2313 2315 return self._loadfilter(b'decode')
2314 2316
2315 2317 def adddatafilter(self, name, filter):
2316 2318 self._datafilters[name] = filter
2317 2319
2318 2320 def wread(self, filename: bytes) -> bytes:
2319 2321 if self.wvfs.islink(filename):
2320 2322 data = self.wvfs.readlink(filename)
2321 2323 else:
2322 2324 data = self.wvfs.read(filename)
2323 2325 return self._filter(self._encodefilterpats, filename, data)
2324 2326
2325 2327 def wwrite(
2326 2328 self,
2327 2329 filename: bytes,
2328 2330 data: bytes,
2329 2331 flags: bytes,
2330 2332 backgroundclose=False,
2331 2333 **kwargs
2332 2334 ) -> int:
2333 2335 """write ``data`` into ``filename`` in the working directory
2334 2336
2335 2337 This returns length of written (maybe decoded) data.
2336 2338 """
2337 2339 data = self._filter(self._decodefilterpats, filename, data)
2338 2340 if b'l' in flags:
2339 2341 self.wvfs.symlink(data, filename)
2340 2342 else:
2341 2343 self.wvfs.write(
2342 2344 filename, data, backgroundclose=backgroundclose, **kwargs
2343 2345 )
2344 2346 if b'x' in flags:
2345 2347 self.wvfs.setflags(filename, False, True)
2346 2348 else:
2347 2349 self.wvfs.setflags(filename, False, False)
2348 2350 return len(data)
2349 2351
2350 2352 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2351 2353 return self._filter(self._decodefilterpats, filename, data)
2352 2354
2353 2355 def currenttransaction(self):
2354 2356 """return the current transaction or None if non exists"""
2355 2357 if self._transref:
2356 2358 tr = self._transref()
2357 2359 else:
2358 2360 tr = None
2359 2361
2360 2362 if tr and tr.running():
2361 2363 return tr
2362 2364 return None
2363 2365
2364 2366 def transaction(self, desc, report=None):
2365 2367 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2366 2368 b'devel', b'check-locks'
2367 2369 ):
2368 2370 if self._currentlock(self._lockref) is None:
2369 2371 raise error.ProgrammingError(b'transaction requires locking')
2370 2372 tr = self.currenttransaction()
2371 2373 if tr is not None:
2372 2374 return tr.nest(name=desc)
2373 2375
2374 2376 # abort here if the journal already exists
2375 2377 if self.svfs.exists(b"journal"):
2376 2378 raise error.RepoError(
2377 2379 _(b"abandoned transaction found"),
2378 2380 hint=_(b"run 'hg recover' to clean up transaction"),
2379 2381 )
2380 2382
2381 2383 idbase = b"%.40f#%f" % (random.random(), time.time())
2382 2384 ha = hex(hashutil.sha1(idbase).digest())
2383 2385 txnid = b'TXN:' + ha
2384 2386 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2385 2387
2386 2388 self._writejournal(desc)
2387 2389 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2388 2390 if report:
2389 2391 rp = report
2390 2392 else:
2391 2393 rp = self.ui.warn
2392 2394 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2393 2395 # we must avoid cyclic reference between repo and transaction.
2394 2396 reporef = weakref.ref(self)
2395 2397 # Code to track tag movement
2396 2398 #
2397 2399 # Since tags are all handled as file content, it is actually quite hard
2398 2400 # to track these movement from a code perspective. So we fallback to a
2399 2401 # tracking at the repository level. One could envision to track changes
2400 2402 # to the '.hgtags' file through changegroup apply but that fails to
2401 2403 # cope with case where transaction expose new heads without changegroup
2402 2404 # being involved (eg: phase movement).
2403 2405 #
2404 2406 # For now, We gate the feature behind a flag since this likely comes
2405 2407 # with performance impacts. The current code run more often than needed
2406 2408 # and do not use caches as much as it could. The current focus is on
2407 2409 # the behavior of the feature so we disable it by default. The flag
2408 2410 # will be removed when we are happy with the performance impact.
2409 2411 #
2410 2412 # Once this feature is no longer experimental move the following
2411 2413 # documentation to the appropriate help section:
2412 2414 #
2413 2415 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2414 2416 # tags (new or changed or deleted tags). In addition the details of
2415 2417 # these changes are made available in a file at:
2416 2418 # ``REPOROOT/.hg/changes/tags.changes``.
2417 2419 # Make sure you check for HG_TAG_MOVED before reading that file as it
2418 2420 # might exist from a previous transaction even if no tag were touched
2419 2421 # in this one. Changes are recorded in a line base format::
2420 2422 #
2421 2423 # <action> <hex-node> <tag-name>\n
2422 2424 #
2423 2425 # Actions are defined as follow:
2424 2426 # "-R": tag is removed,
2425 2427 # "+A": tag is added,
2426 2428 # "-M": tag is moved (old value),
2427 2429 # "+M": tag is moved (new value),
2428 2430 tracktags = lambda x: None
2429 2431 # experimental config: experimental.hook-track-tags
2430 2432 shouldtracktags = self.ui.configbool(
2431 2433 b'experimental', b'hook-track-tags'
2432 2434 )
2433 2435 if desc != b'strip' and shouldtracktags:
2434 2436 oldheads = self.changelog.headrevs()
2435 2437
2436 2438 def tracktags(tr2):
2437 2439 repo = reporef()
2438 2440 assert repo is not None # help pytype
2439 2441 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2440 2442 newheads = repo.changelog.headrevs()
2441 2443 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2442 2444 # notes: we compare lists here.
2443 2445 # As we do it only once buiding set would not be cheaper
2444 2446 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2445 2447 if changes:
2446 2448 tr2.hookargs[b'tag_moved'] = b'1'
2447 2449 with repo.vfs(
2448 2450 b'changes/tags.changes', b'w', atomictemp=True
2449 2451 ) as changesfile:
2450 2452 # note: we do not register the file to the transaction
2451 2453 # because we needs it to still exist on the transaction
2452 2454 # is close (for txnclose hooks)
2453 2455 tagsmod.writediff(changesfile, changes)
2454 2456
2455 2457 def validate(tr2):
2456 2458 """will run pre-closing hooks"""
2457 2459 # XXX the transaction API is a bit lacking here so we take a hacky
2458 2460 # path for now
2459 2461 #
2460 2462 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2461 2463 # dict is copied before these run. In addition we needs the data
2462 2464 # available to in memory hooks too.
2463 2465 #
2464 2466 # Moreover, we also need to make sure this runs before txnclose
2465 2467 # hooks and there is no "pending" mechanism that would execute
2466 2468 # logic only if hooks are about to run.
2467 2469 #
2468 2470 # Fixing this limitation of the transaction is also needed to track
2469 2471 # other families of changes (bookmarks, phases, obsolescence).
2470 2472 #
2471 2473 # This will have to be fixed before we remove the experimental
2472 2474 # gating.
2473 2475 tracktags(tr2)
2474 2476 repo = reporef()
2475 2477 assert repo is not None # help pytype
2476 2478
2477 2479 singleheadopt = (b'experimental', b'single-head-per-branch')
2478 2480 singlehead = repo.ui.configbool(*singleheadopt)
2479 2481 if singlehead:
2480 2482 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2481 2483 accountclosed = singleheadsub.get(
2482 2484 b"account-closed-heads", False
2483 2485 )
2484 2486 if singleheadsub.get(b"public-changes-only", False):
2485 2487 filtername = b"immutable"
2486 2488 else:
2487 2489 filtername = b"visible"
2488 2490 scmutil.enforcesinglehead(
2489 2491 repo, tr2, desc, accountclosed, filtername
2490 2492 )
2491 2493 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2492 2494 for name, (old, new) in sorted(
2493 2495 tr.changes[b'bookmarks'].items()
2494 2496 ):
2495 2497 args = tr.hookargs.copy()
2496 2498 args.update(bookmarks.preparehookargs(name, old, new))
2497 2499 repo.hook(
2498 2500 b'pretxnclose-bookmark',
2499 2501 throw=True,
2500 2502 **pycompat.strkwargs(args)
2501 2503 )
2502 2504 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2503 2505 cl = repo.unfiltered().changelog
2504 2506 for revs, (old, new) in tr.changes[b'phases']:
2505 2507 for rev in revs:
2506 2508 args = tr.hookargs.copy()
2507 2509 node = hex(cl.node(rev))
2508 2510 args.update(phases.preparehookargs(node, old, new))
2509 2511 repo.hook(
2510 2512 b'pretxnclose-phase',
2511 2513 throw=True,
2512 2514 **pycompat.strkwargs(args)
2513 2515 )
2514 2516
2515 2517 repo.hook(
2516 2518 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2517 2519 )
2518 2520
2519 2521 def releasefn(tr, success):
2520 2522 repo = reporef()
2521 2523 if repo is None:
2522 2524 # If the repo has been GC'd (and this release function is being
2523 2525 # called from transaction.__del__), there's not much we can do,
2524 2526 # so just leave the unfinished transaction there and let the
2525 2527 # user run `hg recover`.
2526 2528 return
2527 2529 if success:
2528 2530 # this should be explicitly invoked here, because
2529 2531 # in-memory changes aren't written out at closing
2530 2532 # transaction, if tr.addfilegenerator (via
2531 2533 # dirstate.write or so) isn't invoked while
2532 2534 # transaction running
2533 2535 repo.dirstate.write(None)
2534 2536 else:
2535 2537 # discard all changes (including ones already written
2536 2538 # out) in this transaction
2537 2539 narrowspec.restorebackup(self, b'journal.narrowspec')
2538 2540 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2539 2541 repo.dirstate.restorebackup(None, b'journal.dirstate')
2540 2542
2541 2543 repo.invalidate(clearfilecache=True)
2542 2544
2543 2545 tr = transaction.transaction(
2544 2546 rp,
2545 2547 self.svfs,
2546 2548 vfsmap,
2547 2549 b"journal",
2548 2550 b"undo",
2549 2551 aftertrans(renames),
2550 2552 self.store.createmode,
2551 2553 validator=validate,
2552 2554 releasefn=releasefn,
2553 2555 checkambigfiles=_cachedfiles,
2554 2556 name=desc,
2555 2557 )
2556 2558 tr.changes[b'origrepolen'] = len(self)
2557 2559 tr.changes[b'obsmarkers'] = set()
2558 2560 tr.changes[b'phases'] = []
2559 2561 tr.changes[b'bookmarks'] = {}
2560 2562
2561 2563 tr.hookargs[b'txnid'] = txnid
2562 2564 tr.hookargs[b'txnname'] = desc
2563 2565 tr.hookargs[b'changes'] = tr.changes
2564 2566 # note: writing the fncache only during finalize mean that the file is
2565 2567 # outdated when running hooks. As fncache is used for streaming clone,
2566 2568 # this is not expected to break anything that happen during the hooks.
2567 2569 tr.addfinalize(b'flush-fncache', self.store.write)
2568 2570
2569 2571 def txnclosehook(tr2):
2570 2572 """To be run if transaction is successful, will schedule a hook run"""
2571 2573 # Don't reference tr2 in hook() so we don't hold a reference.
2572 2574 # This reduces memory consumption when there are multiple
2573 2575 # transactions per lock. This can likely go away if issue5045
2574 2576 # fixes the function accumulation.
2575 2577 hookargs = tr2.hookargs
2576 2578
2577 2579 def hookfunc(unused_success):
2578 2580 repo = reporef()
2579 2581 assert repo is not None # help pytype
2580 2582
2581 2583 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2582 2584 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2583 2585 for name, (old, new) in bmchanges:
2584 2586 args = tr.hookargs.copy()
2585 2587 args.update(bookmarks.preparehookargs(name, old, new))
2586 2588 repo.hook(
2587 2589 b'txnclose-bookmark',
2588 2590 throw=False,
2589 2591 **pycompat.strkwargs(args)
2590 2592 )
2591 2593
2592 2594 if hook.hashook(repo.ui, b'txnclose-phase'):
2593 2595 cl = repo.unfiltered().changelog
2594 2596 phasemv = sorted(
2595 2597 tr.changes[b'phases'], key=lambda r: r[0][0]
2596 2598 )
2597 2599 for revs, (old, new) in phasemv:
2598 2600 for rev in revs:
2599 2601 args = tr.hookargs.copy()
2600 2602 node = hex(cl.node(rev))
2601 2603 args.update(phases.preparehookargs(node, old, new))
2602 2604 repo.hook(
2603 2605 b'txnclose-phase',
2604 2606 throw=False,
2605 2607 **pycompat.strkwargs(args)
2606 2608 )
2607 2609
2608 2610 repo.hook(
2609 2611 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2610 2612 )
2611 2613
2612 2614 repo = reporef()
2613 2615 assert repo is not None # help pytype
2614 2616 repo._afterlock(hookfunc)
2615 2617
2616 2618 tr.addfinalize(b'txnclose-hook', txnclosehook)
2617 2619 # Include a leading "-" to make it happen before the transaction summary
2618 2620 # reports registered via scmutil.registersummarycallback() whose names
2619 2621 # are 00-txnreport etc. That way, the caches will be warm when the
2620 2622 # callbacks run.
2621 2623 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2622 2624
2623 2625 def txnaborthook(tr2):
2624 2626 """To be run if transaction is aborted"""
2625 2627 repo = reporef()
2626 2628 assert repo is not None # help pytype
2627 2629 repo.hook(
2628 2630 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2629 2631 )
2630 2632
2631 2633 tr.addabort(b'txnabort-hook', txnaborthook)
2632 2634 # avoid eager cache invalidation. in-memory data should be identical
2633 2635 # to stored data if transaction has no error.
2634 2636 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2635 2637 self._transref = weakref.ref(tr)
2636 2638 scmutil.registersummarycallback(self, tr, desc)
2637 2639 return tr
2638 2640
2639 2641 def _journalfiles(self):
2640 2642 first = (
2641 2643 (self.svfs, b'journal'),
2642 2644 (self.svfs, b'journal.narrowspec'),
2643 2645 (self.vfs, b'journal.narrowspec.dirstate'),
2644 2646 (self.vfs, b'journal.dirstate'),
2645 2647 )
2646 2648 middle = []
2647 2649 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2648 2650 if dirstate_data is not None:
2649 2651 middle.append((self.vfs, dirstate_data))
2650 2652 end = (
2651 2653 (self.vfs, b'journal.branch'),
2652 2654 (self.vfs, b'journal.desc'),
2653 2655 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2654 2656 (self.svfs, b'journal.phaseroots'),
2655 2657 )
2656 2658 return first + tuple(middle) + end
2657 2659
2658 2660 def undofiles(self):
2659 2661 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2660 2662
2661 2663 @unfilteredmethod
2662 2664 def _writejournal(self, desc):
2663 2665 self.dirstate.savebackup(None, b'journal.dirstate')
2664 2666 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2665 2667 narrowspec.savebackup(self, b'journal.narrowspec')
2666 2668 self.vfs.write(
2667 2669 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2668 2670 )
2669 2671 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2670 2672 bookmarksvfs = bookmarks.bookmarksvfs(self)
2671 2673 bookmarksvfs.write(
2672 2674 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2673 2675 )
2674 2676 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2675 2677
2676 2678 def recover(self):
2677 2679 with self.lock():
2678 2680 if self.svfs.exists(b"journal"):
2679 2681 self.ui.status(_(b"rolling back interrupted transaction\n"))
2680 2682 vfsmap = {
2681 2683 b'': self.svfs,
2682 2684 b'plain': self.vfs,
2683 2685 }
2684 2686 transaction.rollback(
2685 2687 self.svfs,
2686 2688 vfsmap,
2687 2689 b"journal",
2688 2690 self.ui.warn,
2689 2691 checkambigfiles=_cachedfiles,
2690 2692 )
2691 2693 self.invalidate()
2692 2694 return True
2693 2695 else:
2694 2696 self.ui.warn(_(b"no interrupted transaction available\n"))
2695 2697 return False
2696 2698
2697 2699 def rollback(self, dryrun=False, force=False):
2698 2700 wlock = lock = dsguard = None
2699 2701 try:
2700 2702 wlock = self.wlock()
2701 2703 lock = self.lock()
2702 2704 if self.svfs.exists(b"undo"):
2703 2705 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2704 2706
2705 2707 return self._rollback(dryrun, force, dsguard)
2706 2708 else:
2707 2709 self.ui.warn(_(b"no rollback information available\n"))
2708 2710 return 1
2709 2711 finally:
2710 2712 release(dsguard, lock, wlock)
2711 2713
2712 2714 @unfilteredmethod # Until we get smarter cache management
2713 2715 def _rollback(self, dryrun, force, dsguard):
2714 2716 ui = self.ui
2715 2717 try:
2716 2718 args = self.vfs.read(b'undo.desc').splitlines()
2717 2719 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2718 2720 if len(args) >= 3:
2719 2721 detail = args[2]
2720 2722 oldtip = oldlen - 1
2721 2723
2722 2724 if detail and ui.verbose:
2723 2725 msg = _(
2724 2726 b'repository tip rolled back to revision %d'
2725 2727 b' (undo %s: %s)\n'
2726 2728 ) % (oldtip, desc, detail)
2727 2729 else:
2728 2730 msg = _(
2729 2731 b'repository tip rolled back to revision %d (undo %s)\n'
2730 2732 ) % (oldtip, desc)
2731 2733 except IOError:
2732 2734 msg = _(b'rolling back unknown transaction\n')
2733 2735 desc = None
2734 2736
2735 2737 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2736 2738 raise error.Abort(
2737 2739 _(
2738 2740 b'rollback of last commit while not checked out '
2739 2741 b'may lose data'
2740 2742 ),
2741 2743 hint=_(b'use -f to force'),
2742 2744 )
2743 2745
2744 2746 ui.status(msg)
2745 2747 if dryrun:
2746 2748 return 0
2747 2749
2748 2750 parents = self.dirstate.parents()
2749 2751 self.destroying()
2750 2752 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2751 2753 transaction.rollback(
2752 2754 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2753 2755 )
2754 2756 bookmarksvfs = bookmarks.bookmarksvfs(self)
2755 2757 if bookmarksvfs.exists(b'undo.bookmarks'):
2756 2758 bookmarksvfs.rename(
2757 2759 b'undo.bookmarks', b'bookmarks', checkambig=True
2758 2760 )
2759 2761 if self.svfs.exists(b'undo.phaseroots'):
2760 2762 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2761 2763 self.invalidate()
2762 2764
2763 2765 has_node = self.changelog.index.has_node
2764 2766 parentgone = any(not has_node(p) for p in parents)
2765 2767 if parentgone:
2766 2768 # prevent dirstateguard from overwriting already restored one
2767 2769 dsguard.close()
2768 2770
2769 2771 narrowspec.restorebackup(self, b'undo.narrowspec')
2770 2772 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2771 2773 self.dirstate.restorebackup(None, b'undo.dirstate')
2772 2774 try:
2773 2775 branch = self.vfs.read(b'undo.branch')
2774 2776 self.dirstate.setbranch(encoding.tolocal(branch))
2775 2777 except IOError:
2776 2778 ui.warn(
2777 2779 _(
2778 2780 b'named branch could not be reset: '
2779 2781 b'current branch is still \'%s\'\n'
2780 2782 )
2781 2783 % self.dirstate.branch()
2782 2784 )
2783 2785
2784 2786 parents = tuple([p.rev() for p in self[None].parents()])
2785 2787 if len(parents) > 1:
2786 2788 ui.status(
2787 2789 _(
2788 2790 b'working directory now based on '
2789 2791 b'revisions %d and %d\n'
2790 2792 )
2791 2793 % parents
2792 2794 )
2793 2795 else:
2794 2796 ui.status(
2795 2797 _(b'working directory now based on revision %d\n') % parents
2796 2798 )
2797 2799 mergestatemod.mergestate.clean(self)
2798 2800
2799 2801 # TODO: if we know which new heads may result from this rollback, pass
2800 2802 # them to destroy(), which will prevent the branchhead cache from being
2801 2803 # invalidated.
2802 2804 self.destroyed()
2803 2805 return 0
2804 2806
2805 2807 def _buildcacheupdater(self, newtransaction):
2806 2808 """called during transaction to build the callback updating cache
2807 2809
2808 2810 Lives on the repository to help extension who might want to augment
2809 2811 this logic. For this purpose, the created transaction is passed to the
2810 2812 method.
2811 2813 """
2812 2814 # we must avoid cyclic reference between repo and transaction.
2813 2815 reporef = weakref.ref(self)
2814 2816
2815 2817 def updater(tr):
2816 2818 repo = reporef()
2817 2819 assert repo is not None # help pytype
2818 2820 repo.updatecaches(tr)
2819 2821
2820 2822 return updater
2821 2823
2822 2824 @unfilteredmethod
2823 2825 def updatecaches(self, tr=None, full=False, caches=None):
2824 2826 """warm appropriate caches
2825 2827
2826 2828 If this function is called after a transaction closed. The transaction
2827 2829 will be available in the 'tr' argument. This can be used to selectively
2828 2830 update caches relevant to the changes in that transaction.
2829 2831
2830 2832 If 'full' is set, make sure all caches the function knows about have
2831 2833 up-to-date data. Even the ones usually loaded more lazily.
2832 2834
2833 2835 The `full` argument can take a special "post-clone" value. In this case
2834 2836 the cache warming is made after a clone and of the slower cache might
2835 2837 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2836 2838 as we plan for a cleaner way to deal with this for 5.9.
2837 2839 """
2838 2840 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2839 2841 # During strip, many caches are invalid but
2840 2842 # later call to `destroyed` will refresh them.
2841 2843 return
2842 2844
2843 2845 unfi = self.unfiltered()
2844 2846
2845 2847 if full:
2846 2848 msg = (
2847 2849 "`full` argument for `repo.updatecaches` is deprecated\n"
2848 2850 "(use `caches=repository.CACHE_ALL` instead)"
2849 2851 )
2850 2852 self.ui.deprecwarn(msg, b"5.9")
2851 2853 caches = repository.CACHES_ALL
2852 2854 if full == b"post-clone":
2853 2855 caches = repository.CACHES_POST_CLONE
2854 2856 caches = repository.CACHES_ALL
2855 2857 elif caches is None:
2856 2858 caches = repository.CACHES_DEFAULT
2857 2859
2858 2860 if repository.CACHE_BRANCHMAP_SERVED in caches:
2859 2861 if tr is None or tr.changes[b'origrepolen'] < len(self):
2860 2862 # accessing the 'served' branchmap should refresh all the others,
2861 2863 self.ui.debug(b'updating the branch cache\n')
2862 2864 self.filtered(b'served').branchmap()
2863 2865 self.filtered(b'served.hidden').branchmap()
2864 2866 # flush all possibly delayed write.
2865 2867 self._branchcaches.write_delayed(self)
2866 2868
2867 2869 if repository.CACHE_CHANGELOG_CACHE in caches:
2868 2870 self.changelog.update_caches(transaction=tr)
2869 2871
2870 2872 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2871 2873 self.manifestlog.update_caches(transaction=tr)
2872 2874
2873 2875 if repository.CACHE_REV_BRANCH in caches:
2874 2876 rbc = unfi.revbranchcache()
2875 2877 for r in unfi.changelog:
2876 2878 rbc.branchinfo(r)
2877 2879 rbc.write()
2878 2880
2879 2881 if repository.CACHE_FULL_MANIFEST in caches:
2880 2882 # ensure the working copy parents are in the manifestfulltextcache
2881 2883 for ctx in self[b'.'].parents():
2882 2884 ctx.manifest() # accessing the manifest is enough
2883 2885
2884 2886 if repository.CACHE_FILE_NODE_TAGS in caches:
2885 2887 # accessing fnode cache warms the cache
2886 2888 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2887 2889
2888 2890 if repository.CACHE_TAGS_DEFAULT in caches:
2889 2891 # accessing tags warm the cache
2890 2892 self.tags()
2891 2893 if repository.CACHE_TAGS_SERVED in caches:
2892 2894 self.filtered(b'served').tags()
2893 2895
2894 2896 if repository.CACHE_BRANCHMAP_ALL in caches:
2895 2897 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2896 2898 # so we're forcing a write to cause these caches to be warmed up
2897 2899 # even if they haven't explicitly been requested yet (if they've
2898 2900 # never been used by hg, they won't ever have been written, even if
2899 2901 # they're a subset of another kind of cache that *has* been used).
2900 2902 for filt in repoview.filtertable.keys():
2901 2903 filtered = self.filtered(filt)
2902 2904 filtered.branchmap().write(filtered)
2903 2905
2904 2906 def invalidatecaches(self):
2905 2907
2906 2908 if '_tagscache' in vars(self):
2907 2909 # can't use delattr on proxy
2908 2910 del self.__dict__['_tagscache']
2909 2911
2910 2912 self._branchcaches.clear()
2911 2913 self.invalidatevolatilesets()
2912 2914 self._sparsesignaturecache.clear()
2913 2915
2914 2916 def invalidatevolatilesets(self):
2915 2917 self.filteredrevcache.clear()
2916 2918 obsolete.clearobscaches(self)
2917 2919 self._quick_access_changeid_invalidate()
2918 2920
2919 2921 def invalidatedirstate(self):
2920 2922 """Invalidates the dirstate, causing the next call to dirstate
2921 2923 to check if it was modified since the last time it was read,
2922 2924 rereading it if it has.
2923 2925
2924 2926 This is different to dirstate.invalidate() that it doesn't always
2925 2927 rereads the dirstate. Use dirstate.invalidate() if you want to
2926 2928 explicitly read the dirstate again (i.e. restoring it to a previous
2927 2929 known good state)."""
2928 2930 if hasunfilteredcache(self, 'dirstate'):
2929 2931 for k in self.dirstate._filecache:
2930 2932 try:
2931 2933 delattr(self.dirstate, k)
2932 2934 except AttributeError:
2933 2935 pass
2934 2936 delattr(self.unfiltered(), 'dirstate')
2935 2937
2936 2938 def invalidate(self, clearfilecache=False):
2937 2939 """Invalidates both store and non-store parts other than dirstate
2938 2940
2939 2941 If a transaction is running, invalidation of store is omitted,
2940 2942 because discarding in-memory changes might cause inconsistency
2941 2943 (e.g. incomplete fncache causes unintentional failure, but
2942 2944 redundant one doesn't).
2943 2945 """
2944 2946 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2945 2947 for k in list(self._filecache.keys()):
2946 2948 # dirstate is invalidated separately in invalidatedirstate()
2947 2949 if k == b'dirstate':
2948 2950 continue
2949 2951 if (
2950 2952 k == b'changelog'
2951 2953 and self.currenttransaction()
2952 2954 and self.changelog._delayed
2953 2955 ):
2954 2956 # The changelog object may store unwritten revisions. We don't
2955 2957 # want to lose them.
2956 2958 # TODO: Solve the problem instead of working around it.
2957 2959 continue
2958 2960
2959 2961 if clearfilecache:
2960 2962 del self._filecache[k]
2961 2963 try:
2962 2964 delattr(unfiltered, k)
2963 2965 except AttributeError:
2964 2966 pass
2965 2967 self.invalidatecaches()
2966 2968 if not self.currenttransaction():
2967 2969 # TODO: Changing contents of store outside transaction
2968 2970 # causes inconsistency. We should make in-memory store
2969 2971 # changes detectable, and abort if changed.
2970 2972 self.store.invalidatecaches()
2971 2973
2972 2974 def invalidateall(self):
2973 2975 """Fully invalidates both store and non-store parts, causing the
2974 2976 subsequent operation to reread any outside changes."""
2975 2977 # extension should hook this to invalidate its caches
2976 2978 self.invalidate()
2977 2979 self.invalidatedirstate()
2978 2980
2979 2981 @unfilteredmethod
2980 2982 def _refreshfilecachestats(self, tr):
2981 2983 """Reload stats of cached files so that they are flagged as valid"""
2982 2984 for k, ce in self._filecache.items():
2983 2985 k = pycompat.sysstr(k)
2984 2986 if k == 'dirstate' or k not in self.__dict__:
2985 2987 continue
2986 2988 ce.refresh()
2987 2989
2988 2990 def _lock(
2989 2991 self,
2990 2992 vfs,
2991 2993 lockname,
2992 2994 wait,
2993 2995 releasefn,
2994 2996 acquirefn,
2995 2997 desc,
2996 2998 ):
2997 2999 timeout = 0
2998 3000 warntimeout = 0
2999 3001 if wait:
3000 3002 timeout = self.ui.configint(b"ui", b"timeout")
3001 3003 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3002 3004 # internal config: ui.signal-safe-lock
3003 3005 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3004 3006
3005 3007 l = lockmod.trylock(
3006 3008 self.ui,
3007 3009 vfs,
3008 3010 lockname,
3009 3011 timeout,
3010 3012 warntimeout,
3011 3013 releasefn=releasefn,
3012 3014 acquirefn=acquirefn,
3013 3015 desc=desc,
3014 3016 signalsafe=signalsafe,
3015 3017 )
3016 3018 return l
3017 3019
3018 3020 def _afterlock(self, callback):
3019 3021 """add a callback to be run when the repository is fully unlocked
3020 3022
3021 3023 The callback will be executed when the outermost lock is released
3022 3024 (with wlock being higher level than 'lock')."""
3023 3025 for ref in (self._wlockref, self._lockref):
3024 3026 l = ref and ref()
3025 3027 if l and l.held:
3026 3028 l.postrelease.append(callback)
3027 3029 break
3028 3030 else: # no lock have been found.
3029 3031 callback(True)
3030 3032
3031 3033 def lock(self, wait=True):
3032 3034 """Lock the repository store (.hg/store) and return a weak reference
3033 3035 to the lock. Use this before modifying the store (e.g. committing or
3034 3036 stripping). If you are opening a transaction, get a lock as well.)
3035 3037
3036 3038 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3037 3039 'wlock' first to avoid a dead-lock hazard."""
3038 3040 l = self._currentlock(self._lockref)
3039 3041 if l is not None:
3040 3042 l.lock()
3041 3043 return l
3042 3044
3043 3045 l = self._lock(
3044 3046 vfs=self.svfs,
3045 3047 lockname=b"lock",
3046 3048 wait=wait,
3047 3049 releasefn=None,
3048 3050 acquirefn=self.invalidate,
3049 3051 desc=_(b'repository %s') % self.origroot,
3050 3052 )
3051 3053 self._lockref = weakref.ref(l)
3052 3054 return l
3053 3055
3054 3056 def wlock(self, wait=True):
3055 3057 """Lock the non-store parts of the repository (everything under
3056 3058 .hg except .hg/store) and return a weak reference to the lock.
3057 3059
3058 3060 Use this before modifying files in .hg.
3059 3061
3060 3062 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3061 3063 'wlock' first to avoid a dead-lock hazard."""
3062 3064 l = self._wlockref() if self._wlockref else None
3063 3065 if l is not None and l.held:
3064 3066 l.lock()
3065 3067 return l
3066 3068
3067 3069 # We do not need to check for non-waiting lock acquisition. Such
3068 3070 # acquisition would not cause dead-lock as they would just fail.
3069 3071 if wait and (
3070 3072 self.ui.configbool(b'devel', b'all-warnings')
3071 3073 or self.ui.configbool(b'devel', b'check-locks')
3072 3074 ):
3073 3075 if self._currentlock(self._lockref) is not None:
3074 3076 self.ui.develwarn(b'"wlock" acquired after "lock"')
3075 3077
3076 3078 def unlock():
3077 3079 if self.dirstate.pendingparentchange():
3078 3080 self.dirstate.invalidate()
3079 3081 else:
3080 3082 self.dirstate.write(None)
3081 3083
3082 3084 self._filecache[b'dirstate'].refresh()
3083 3085
3084 3086 l = self._lock(
3085 3087 self.vfs,
3086 3088 b"wlock",
3087 3089 wait,
3088 3090 unlock,
3089 3091 self.invalidatedirstate,
3090 3092 _(b'working directory of %s') % self.origroot,
3091 3093 )
3092 3094 self._wlockref = weakref.ref(l)
3093 3095 return l
3094 3096
3095 3097 def _currentlock(self, lockref):
3096 3098 """Returns the lock if it's held, or None if it's not."""
3097 3099 if lockref is None:
3098 3100 return None
3099 3101 l = lockref()
3100 3102 if l is None or not l.held:
3101 3103 return None
3102 3104 return l
3103 3105
3104 3106 def currentwlock(self):
3105 3107 """Returns the wlock if it's held, or None if it's not."""
3106 3108 return self._currentlock(self._wlockref)
3107 3109
3108 3110 def checkcommitpatterns(self, wctx, match, status, fail):
3109 3111 """check for commit arguments that aren't committable"""
3110 3112 if match.isexact() or match.prefix():
3111 3113 matched = set(status.modified + status.added + status.removed)
3112 3114
3113 3115 for f in match.files():
3114 3116 f = self.dirstate.normalize(f)
3115 3117 if f == b'.' or f in matched or f in wctx.substate:
3116 3118 continue
3117 3119 if f in status.deleted:
3118 3120 fail(f, _(b'file not found!'))
3119 3121 # Is it a directory that exists or used to exist?
3120 3122 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3121 3123 d = f + b'/'
3122 3124 for mf in matched:
3123 3125 if mf.startswith(d):
3124 3126 break
3125 3127 else:
3126 3128 fail(f, _(b"no match under directory!"))
3127 3129 elif f not in self.dirstate:
3128 3130 fail(f, _(b"file not tracked!"))
3129 3131
3130 3132 @unfilteredmethod
3131 3133 def commit(
3132 3134 self,
3133 3135 text=b"",
3134 3136 user=None,
3135 3137 date=None,
3136 3138 match=None,
3137 3139 force=False,
3138 3140 editor=None,
3139 3141 extra=None,
3140 3142 ):
3141 3143 """Add a new revision to current repository.
3142 3144
3143 3145 Revision information is gathered from the working directory,
3144 3146 match can be used to filter the committed files. If editor is
3145 3147 supplied, it is called to get a commit message.
3146 3148 """
3147 3149 if extra is None:
3148 3150 extra = {}
3149 3151
3150 3152 def fail(f, msg):
3151 3153 raise error.InputError(b'%s: %s' % (f, msg))
3152 3154
3153 3155 if not match:
3154 3156 match = matchmod.always()
3155 3157
3156 3158 if not force:
3157 3159 match.bad = fail
3158 3160
3159 3161 # lock() for recent changelog (see issue4368)
3160 3162 with self.wlock(), self.lock():
3161 3163 wctx = self[None]
3162 3164 merge = len(wctx.parents()) > 1
3163 3165
3164 3166 if not force and merge and not match.always():
3165 3167 raise error.Abort(
3166 3168 _(
3167 3169 b'cannot partially commit a merge '
3168 3170 b'(do not specify files or patterns)'
3169 3171 )
3170 3172 )
3171 3173
3172 3174 status = self.status(match=match, clean=force)
3173 3175 if force:
3174 3176 status.modified.extend(
3175 3177 status.clean
3176 3178 ) # mq may commit clean files
3177 3179
3178 3180 # check subrepos
3179 3181 subs, commitsubs, newstate = subrepoutil.precommit(
3180 3182 self.ui, wctx, status, match, force=force
3181 3183 )
3182 3184
3183 3185 # make sure all explicit patterns are matched
3184 3186 if not force:
3185 3187 self.checkcommitpatterns(wctx, match, status, fail)
3186 3188
3187 3189 cctx = context.workingcommitctx(
3188 3190 self, status, text, user, date, extra
3189 3191 )
3190 3192
3191 3193 ms = mergestatemod.mergestate.read(self)
3192 3194 mergeutil.checkunresolved(ms)
3193 3195
3194 3196 # internal config: ui.allowemptycommit
3195 3197 if cctx.isempty() and not self.ui.configbool(
3196 3198 b'ui', b'allowemptycommit'
3197 3199 ):
3198 3200 self.ui.debug(b'nothing to commit, clearing merge state\n')
3199 3201 ms.reset()
3200 3202 return None
3201 3203
3202 3204 if merge and cctx.deleted():
3203 3205 raise error.Abort(_(b"cannot commit merge with missing files"))
3204 3206
3205 3207 if editor:
3206 3208 cctx._text = editor(self, cctx, subs)
3207 3209 edited = text != cctx._text
3208 3210
3209 3211 # Save commit message in case this transaction gets rolled back
3210 3212 # (e.g. by a pretxncommit hook). Leave the content alone on
3211 3213 # the assumption that the user will use the same editor again.
3212 3214 msg_path = self.savecommitmessage(cctx._text)
3213 3215
3214 3216 # commit subs and write new state
3215 3217 if subs:
3216 3218 uipathfn = scmutil.getuipathfn(self)
3217 3219 for s in sorted(commitsubs):
3218 3220 sub = wctx.sub(s)
3219 3221 self.ui.status(
3220 3222 _(b'committing subrepository %s\n')
3221 3223 % uipathfn(subrepoutil.subrelpath(sub))
3222 3224 )
3223 3225 sr = sub.commit(cctx._text, user, date)
3224 3226 newstate[s] = (newstate[s][0], sr)
3225 3227 subrepoutil.writestate(self, newstate)
3226 3228
3227 3229 p1, p2 = self.dirstate.parents()
3228 3230 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3229 3231 try:
3230 3232 self.hook(
3231 3233 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3232 3234 )
3233 3235 with self.transaction(b'commit'):
3234 3236 ret = self.commitctx(cctx, True)
3235 3237 # update bookmarks, dirstate and mergestate
3236 3238 bookmarks.update(self, [p1, p2], ret)
3237 3239 cctx.markcommitted(ret)
3238 3240 ms.reset()
3239 3241 except: # re-raises
3240 3242 if edited:
3241 3243 self.ui.write(
3242 3244 _(b'note: commit message saved in %s\n') % msg_path
3243 3245 )
3244 3246 self.ui.write(
3245 3247 _(
3246 3248 b"note: use 'hg commit --logfile "
3247 3249 b"%s --edit' to reuse it\n"
3248 3250 )
3249 3251 % msg_path
3250 3252 )
3251 3253 raise
3252 3254
3253 3255 def commithook(unused_success):
3254 3256 # hack for command that use a temporary commit (eg: histedit)
3255 3257 # temporary commit got stripped before hook release
3256 3258 if self.changelog.hasnode(ret):
3257 3259 self.hook(
3258 3260 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3259 3261 )
3260 3262
3261 3263 self._afterlock(commithook)
3262 3264 return ret
3263 3265
3264 3266 @unfilteredmethod
3265 3267 def commitctx(self, ctx, error=False, origctx=None):
3266 3268 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3267 3269
3268 3270 @unfilteredmethod
3269 3271 def destroying(self):
3270 3272 """Inform the repository that nodes are about to be destroyed.
3271 3273 Intended for use by strip and rollback, so there's a common
3272 3274 place for anything that has to be done before destroying history.
3273 3275
3274 3276 This is mostly useful for saving state that is in memory and waiting
3275 3277 to be flushed when the current lock is released. Because a call to
3276 3278 destroyed is imminent, the repo will be invalidated causing those
3277 3279 changes to stay in memory (waiting for the next unlock), or vanish
3278 3280 completely.
3279 3281 """
3280 3282 # When using the same lock to commit and strip, the phasecache is left
3281 3283 # dirty after committing. Then when we strip, the repo is invalidated,
3282 3284 # causing those changes to disappear.
3283 3285 if '_phasecache' in vars(self):
3284 3286 self._phasecache.write()
3285 3287
3286 3288 @unfilteredmethod
3287 3289 def destroyed(self):
3288 3290 """Inform the repository that nodes have been destroyed.
3289 3291 Intended for use by strip and rollback, so there's a common
3290 3292 place for anything that has to be done after destroying history.
3291 3293 """
3292 3294 # When one tries to:
3293 3295 # 1) destroy nodes thus calling this method (e.g. strip)
3294 3296 # 2) use phasecache somewhere (e.g. commit)
3295 3297 #
3296 3298 # then 2) will fail because the phasecache contains nodes that were
3297 3299 # removed. We can either remove phasecache from the filecache,
3298 3300 # causing it to reload next time it is accessed, or simply filter
3299 3301 # the removed nodes now and write the updated cache.
3300 3302 self._phasecache.filterunknown(self)
3301 3303 self._phasecache.write()
3302 3304
3303 3305 # refresh all repository caches
3304 3306 self.updatecaches()
3305 3307
3306 3308 # Ensure the persistent tag cache is updated. Doing it now
3307 3309 # means that the tag cache only has to worry about destroyed
3308 3310 # heads immediately after a strip/rollback. That in turn
3309 3311 # guarantees that "cachetip == currenttip" (comparing both rev
3310 3312 # and node) always means no nodes have been added or destroyed.
3311 3313
3312 3314 # XXX this is suboptimal when qrefresh'ing: we strip the current
3313 3315 # head, refresh the tag cache, then immediately add a new head.
3314 3316 # But I think doing it this way is necessary for the "instant
3315 3317 # tag cache retrieval" case to work.
3316 3318 self.invalidate()
3317 3319
3318 3320 def status(
3319 3321 self,
3320 3322 node1=b'.',
3321 3323 node2=None,
3322 3324 match=None,
3323 3325 ignored=False,
3324 3326 clean=False,
3325 3327 unknown=False,
3326 3328 listsubrepos=False,
3327 3329 ):
3328 3330 '''a convenience method that calls node1.status(node2)'''
3329 3331 return self[node1].status(
3330 3332 node2, match, ignored, clean, unknown, listsubrepos
3331 3333 )
3332 3334
3333 3335 def addpostdsstatus(self, ps):
3334 3336 """Add a callback to run within the wlock, at the point at which status
3335 3337 fixups happen.
3336 3338
3337 3339 On status completion, callback(wctx, status) will be called with the
3338 3340 wlock held, unless the dirstate has changed from underneath or the wlock
3339 3341 couldn't be grabbed.
3340 3342
3341 3343 Callbacks should not capture and use a cached copy of the dirstate --
3342 3344 it might change in the meanwhile. Instead, they should access the
3343 3345 dirstate via wctx.repo().dirstate.
3344 3346
3345 3347 This list is emptied out after each status run -- extensions should
3346 3348 make sure it adds to this list each time dirstate.status is called.
3347 3349 Extensions should also make sure they don't call this for statuses
3348 3350 that don't involve the dirstate.
3349 3351 """
3350 3352
3351 3353 # The list is located here for uniqueness reasons -- it is actually
3352 3354 # managed by the workingctx, but that isn't unique per-repo.
3353 3355 self._postdsstatus.append(ps)
3354 3356
3355 3357 def postdsstatus(self):
3356 3358 """Used by workingctx to get the list of post-dirstate-status hooks."""
3357 3359 return self._postdsstatus
3358 3360
3359 3361 def clearpostdsstatus(self):
3360 3362 """Used by workingctx to clear post-dirstate-status hooks."""
3361 3363 del self._postdsstatus[:]
3362 3364
3363 3365 def heads(self, start=None):
3364 3366 if start is None:
3365 3367 cl = self.changelog
3366 3368 headrevs = reversed(cl.headrevs())
3367 3369 return [cl.node(rev) for rev in headrevs]
3368 3370
3369 3371 heads = self.changelog.heads(start)
3370 3372 # sort the output in rev descending order
3371 3373 return sorted(heads, key=self.changelog.rev, reverse=True)
3372 3374
3373 3375 def branchheads(self, branch=None, start=None, closed=False):
3374 3376 """return a (possibly filtered) list of heads for the given branch
3375 3377
3376 3378 Heads are returned in topological order, from newest to oldest.
3377 3379 If branch is None, use the dirstate branch.
3378 3380 If start is not None, return only heads reachable from start.
3379 3381 If closed is True, return heads that are marked as closed as well.
3380 3382 """
3381 3383 if branch is None:
3382 3384 branch = self[None].branch()
3383 3385 branches = self.branchmap()
3384 3386 if not branches.hasbranch(branch):
3385 3387 return []
3386 3388 # the cache returns heads ordered lowest to highest
3387 3389 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3388 3390 if start is not None:
3389 3391 # filter out the heads that cannot be reached from startrev
3390 3392 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3391 3393 bheads = [h for h in bheads if h in fbheads]
3392 3394 return bheads
3393 3395
3394 3396 def branches(self, nodes):
3395 3397 if not nodes:
3396 3398 nodes = [self.changelog.tip()]
3397 3399 b = []
3398 3400 for n in nodes:
3399 3401 t = n
3400 3402 while True:
3401 3403 p = self.changelog.parents(n)
3402 3404 if p[1] != self.nullid or p[0] == self.nullid:
3403 3405 b.append((t, n, p[0], p[1]))
3404 3406 break
3405 3407 n = p[0]
3406 3408 return b
3407 3409
3408 3410 def between(self, pairs):
3409 3411 r = []
3410 3412
3411 3413 for top, bottom in pairs:
3412 3414 n, l, i = top, [], 0
3413 3415 f = 1
3414 3416
3415 3417 while n != bottom and n != self.nullid:
3416 3418 p = self.changelog.parents(n)[0]
3417 3419 if i == f:
3418 3420 l.append(n)
3419 3421 f = f * 2
3420 3422 n = p
3421 3423 i += 1
3422 3424
3423 3425 r.append(l)
3424 3426
3425 3427 return r
3426 3428
3427 3429 def checkpush(self, pushop):
3428 3430 """Extensions can override this function if additional checks have
3429 3431 to be performed before pushing, or call it if they override push
3430 3432 command.
3431 3433 """
3432 3434
3433 3435 @unfilteredpropertycache
3434 3436 def prepushoutgoinghooks(self):
3435 3437 """Return util.hooks consists of a pushop with repo, remote, outgoing
3436 3438 methods, which are called before pushing changesets.
3437 3439 """
3438 3440 return util.hooks()
3439 3441
3440 3442 def pushkey(self, namespace, key, old, new):
3441 3443 try:
3442 3444 tr = self.currenttransaction()
3443 3445 hookargs = {}
3444 3446 if tr is not None:
3445 3447 hookargs.update(tr.hookargs)
3446 3448 hookargs = pycompat.strkwargs(hookargs)
3447 3449 hookargs['namespace'] = namespace
3448 3450 hookargs['key'] = key
3449 3451 hookargs['old'] = old
3450 3452 hookargs['new'] = new
3451 3453 self.hook(b'prepushkey', throw=True, **hookargs)
3452 3454 except error.HookAbort as exc:
3453 3455 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3454 3456 if exc.hint:
3455 3457 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3456 3458 return False
3457 3459 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3458 3460 ret = pushkey.push(self, namespace, key, old, new)
3459 3461
3460 3462 def runhook(unused_success):
3461 3463 self.hook(
3462 3464 b'pushkey',
3463 3465 namespace=namespace,
3464 3466 key=key,
3465 3467 old=old,
3466 3468 new=new,
3467 3469 ret=ret,
3468 3470 )
3469 3471
3470 3472 self._afterlock(runhook)
3471 3473 return ret
3472 3474
3473 3475 def listkeys(self, namespace):
3474 3476 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3475 3477 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3476 3478 values = pushkey.list(self, namespace)
3477 3479 self.hook(b'listkeys', namespace=namespace, values=values)
3478 3480 return values
3479 3481
3480 3482 def debugwireargs(self, one, two, three=None, four=None, five=None):
3481 3483 '''used to test argument passing over the wire'''
3482 3484 return b"%s %s %s %s %s" % (
3483 3485 one,
3484 3486 two,
3485 3487 pycompat.bytestr(three),
3486 3488 pycompat.bytestr(four),
3487 3489 pycompat.bytestr(five),
3488 3490 )
3489 3491
3490 3492 def savecommitmessage(self, text):
3491 3493 fp = self.vfs(b'last-message.txt', b'wb')
3492 3494 try:
3493 3495 fp.write(text)
3494 3496 finally:
3495 3497 fp.close()
3496 3498 return self.pathto(fp.name[len(self.root) + 1 :])
3497 3499
3498 3500 def register_wanted_sidedata(self, category):
3499 3501 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3500 3502 # Only revlogv2 repos can want sidedata.
3501 3503 return
3502 3504 self._wanted_sidedata.add(pycompat.bytestr(category))
3503 3505
3504 3506 def register_sidedata_computer(
3505 3507 self, kind, category, keys, computer, flags, replace=False
3506 3508 ):
3507 3509 if kind not in revlogconst.ALL_KINDS:
3508 3510 msg = _(b"unexpected revlog kind '%s'.")
3509 3511 raise error.ProgrammingError(msg % kind)
3510 3512 category = pycompat.bytestr(category)
3511 3513 already_registered = category in self._sidedata_computers.get(kind, [])
3512 3514 if already_registered and not replace:
3513 3515 msg = _(
3514 3516 b"cannot register a sidedata computer twice for category '%s'."
3515 3517 )
3516 3518 raise error.ProgrammingError(msg % category)
3517 3519 if replace and not already_registered:
3518 3520 msg = _(
3519 3521 b"cannot replace a sidedata computer that isn't registered "
3520 3522 b"for category '%s'."
3521 3523 )
3522 3524 raise error.ProgrammingError(msg % category)
3523 3525 self._sidedata_computers.setdefault(kind, {})
3524 3526 self._sidedata_computers[kind][category] = (keys, computer, flags)
3525 3527
3526 3528
3527 3529 # used to avoid circular references so destructors work
3528 3530 def aftertrans(files):
3529 3531 renamefiles = [tuple(t) for t in files]
3530 3532
3531 3533 def a():
3532 3534 for vfs, src, dest in renamefiles:
3533 3535 # if src and dest refer to a same file, vfs.rename is a no-op,
3534 3536 # leaving both src and dest on disk. delete dest to make sure
3535 3537 # the rename couldn't be such a no-op.
3536 3538 vfs.tryunlink(dest)
3537 3539 try:
3538 3540 vfs.rename(src, dest)
3539 3541 except FileNotFoundError: # journal file does not yet exist
3540 3542 pass
3541 3543
3542 3544 return a
3543 3545
3544 3546
3545 3547 def undoname(fn: bytes) -> bytes:
3546 3548 base, name = os.path.split(fn)
3547 3549 assert name.startswith(b'journal')
3548 3550 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3549 3551
3550 3552
3551 3553 def instance(ui, path: bytes, create, intents=None, createopts=None):
3552 3554
3553 3555 # prevent cyclic import localrepo -> upgrade -> localrepo
3554 3556 from . import upgrade
3555 3557
3556 3558 localpath = urlutil.urllocalpath(path)
3557 3559 if create:
3558 3560 createrepository(ui, localpath, createopts=createopts)
3559 3561
3560 3562 def repo_maker():
3561 3563 return makelocalrepository(ui, localpath, intents=intents)
3562 3564
3563 3565 repo = repo_maker()
3564 3566 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3565 3567 return repo
3566 3568
3567 3569
3568 3570 def islocal(path: bytes) -> bool:
3569 3571 return True
3570 3572
3571 3573
3572 3574 def defaultcreateopts(ui, createopts=None):
3573 3575 """Populate the default creation options for a repository.
3574 3576
3575 3577 A dictionary of explicitly requested creation options can be passed
3576 3578 in. Missing keys will be populated.
3577 3579 """
3578 3580 createopts = dict(createopts or {})
3579 3581
3580 3582 if b'backend' not in createopts:
3581 3583 # experimental config: storage.new-repo-backend
3582 3584 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3583 3585
3584 3586 return createopts
3585 3587
3586 3588
3587 3589 def clone_requirements(ui, createopts, srcrepo):
3588 3590 """clone the requirements of a local repo for a local clone
3589 3591
3590 3592 The store requirements are unchanged while the working copy requirements
3591 3593 depends on the configuration
3592 3594 """
3593 3595 target_requirements = set()
3594 3596 if not srcrepo.requirements:
3595 3597 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3596 3598 # with it.
3597 3599 return target_requirements
3598 3600 createopts = defaultcreateopts(ui, createopts=createopts)
3599 3601 for r in newreporequirements(ui, createopts):
3600 3602 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3601 3603 target_requirements.add(r)
3602 3604
3603 3605 for r in srcrepo.requirements:
3604 3606 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3605 3607 target_requirements.add(r)
3606 3608 return target_requirements
3607 3609
3608 3610
3609 3611 def newreporequirements(ui, createopts):
3610 3612 """Determine the set of requirements for a new local repository.
3611 3613
3612 3614 Extensions can wrap this function to specify custom requirements for
3613 3615 new repositories.
3614 3616 """
3615 3617
3616 3618 if b'backend' not in createopts:
3617 3619 raise error.ProgrammingError(
3618 3620 b'backend key not present in createopts; '
3619 3621 b'was defaultcreateopts() called?'
3620 3622 )
3621 3623
3622 3624 if createopts[b'backend'] != b'revlogv1':
3623 3625 raise error.Abort(
3624 3626 _(
3625 3627 b'unable to determine repository requirements for '
3626 3628 b'storage backend: %s'
3627 3629 )
3628 3630 % createopts[b'backend']
3629 3631 )
3630 3632
3631 3633 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3632 3634 if ui.configbool(b'format', b'usestore'):
3633 3635 requirements.add(requirementsmod.STORE_REQUIREMENT)
3634 3636 if ui.configbool(b'format', b'usefncache'):
3635 3637 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3636 3638 if ui.configbool(b'format', b'dotencode'):
3637 3639 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3638 3640
3639 3641 compengines = ui.configlist(b'format', b'revlog-compression')
3640 3642 for compengine in compengines:
3641 3643 if compengine in util.compengines:
3642 3644 engine = util.compengines[compengine]
3643 3645 if engine.available() and engine.revlogheader():
3644 3646 break
3645 3647 else:
3646 3648 raise error.Abort(
3647 3649 _(
3648 3650 b'compression engines %s defined by '
3649 3651 b'format.revlog-compression not available'
3650 3652 )
3651 3653 % b', '.join(b'"%s"' % e for e in compengines),
3652 3654 hint=_(
3653 3655 b'run "hg debuginstall" to list available '
3654 3656 b'compression engines'
3655 3657 ),
3656 3658 )
3657 3659
3658 3660 # zlib is the historical default and doesn't need an explicit requirement.
3659 3661 if compengine == b'zstd':
3660 3662 requirements.add(b'revlog-compression-zstd')
3661 3663 elif compengine != b'zlib':
3662 3664 requirements.add(b'exp-compression-%s' % compengine)
3663 3665
3664 3666 if scmutil.gdinitconfig(ui):
3665 3667 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3666 3668 if ui.configbool(b'format', b'sparse-revlog'):
3667 3669 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3668 3670
3669 3671 # experimental config: format.use-dirstate-v2
3670 3672 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3671 3673 if ui.configbool(b'format', b'use-dirstate-v2'):
3672 3674 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3673 3675
3674 3676 # experimental config: format.exp-use-copies-side-data-changeset
3675 3677 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3676 3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3677 3679 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3678 3680 if ui.configbool(b'experimental', b'treemanifest'):
3679 3681 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3680 3682
3681 3683 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3682 3684 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3683 3685 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3684 3686
3685 3687 revlogv2 = ui.config(b'experimental', b'revlogv2')
3686 3688 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3687 3689 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3688 3690 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3689 3691 # experimental config: format.internal-phase
3690 3692 if ui.configbool(b'format', b'use-internal-phase'):
3691 3693 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3692 3694
3693 3695 # experimental config: format.exp-archived-phase
3694 3696 if ui.configbool(b'format', b'exp-archived-phase'):
3695 3697 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3696 3698
3697 3699 if createopts.get(b'narrowfiles'):
3698 3700 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3699 3701
3700 3702 if createopts.get(b'lfs'):
3701 3703 requirements.add(b'lfs')
3702 3704
3703 3705 if ui.configbool(b'format', b'bookmarks-in-store'):
3704 3706 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3705 3707
3706 3708 if ui.configbool(b'format', b'use-persistent-nodemap'):
3707 3709 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3708 3710
3709 3711 # if share-safe is enabled, let's create the new repository with the new
3710 3712 # requirement
3711 3713 if ui.configbool(b'format', b'use-share-safe'):
3712 3714 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3713 3715
3714 3716 # if we are creating a share-repoΒΉ we have to handle requirement
3715 3717 # differently.
3716 3718 #
3717 3719 # [1] (i.e. reusing the store from another repository, just having a
3718 3720 # working copy)
3719 3721 if b'sharedrepo' in createopts:
3720 3722 source_requirements = set(createopts[b'sharedrepo'].requirements)
3721 3723
3722 3724 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3723 3725 # share to an old school repository, we have to copy the
3724 3726 # requirements and hope for the best.
3725 3727 requirements = source_requirements
3726 3728 else:
3727 3729 # We have control on the working copy only, so "copy" the non
3728 3730 # working copy part over, ignoring previous logic.
3729 3731 to_drop = set()
3730 3732 for req in requirements:
3731 3733 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3732 3734 continue
3733 3735 if req in source_requirements:
3734 3736 continue
3735 3737 to_drop.add(req)
3736 3738 requirements -= to_drop
3737 3739 requirements |= source_requirements
3738 3740
3739 3741 if createopts.get(b'sharedrelative'):
3740 3742 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3741 3743 else:
3742 3744 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3743 3745
3744 3746 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3745 3747 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3746 3748 msg = _(b"ignoring unknown tracked key version: %d\n")
3747 3749 hint = _(
3748 3750 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3749 3751 )
3750 3752 if version != 1:
3751 3753 ui.warn(msg % version, hint=hint)
3752 3754 else:
3753 3755 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3754 3756
3755 3757 return requirements
3756 3758
3757 3759
3758 3760 def checkrequirementscompat(ui, requirements):
3759 3761 """Checks compatibility of repository requirements enabled and disabled.
3760 3762
3761 3763 Returns a set of requirements which needs to be dropped because dependend
3762 3764 requirements are not enabled. Also warns users about it"""
3763 3765
3764 3766 dropped = set()
3765 3767
3766 3768 if requirementsmod.STORE_REQUIREMENT not in requirements:
3767 3769 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3768 3770 ui.warn(
3769 3771 _(
3770 3772 b'ignoring enabled \'format.bookmarks-in-store\' config '
3771 3773 b'beacuse it is incompatible with disabled '
3772 3774 b'\'format.usestore\' config\n'
3773 3775 )
3774 3776 )
3775 3777 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3776 3778
3777 3779 if (
3778 3780 requirementsmod.SHARED_REQUIREMENT in requirements
3779 3781 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3780 3782 ):
3781 3783 raise error.Abort(
3782 3784 _(
3783 3785 b"cannot create shared repository as source was created"
3784 3786 b" with 'format.usestore' config disabled"
3785 3787 )
3786 3788 )
3787 3789
3788 3790 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3789 3791 if ui.hasconfig(b'format', b'use-share-safe'):
3790 3792 msg = _(
3791 3793 b"ignoring enabled 'format.use-share-safe' config because "
3792 3794 b"it is incompatible with disabled 'format.usestore'"
3793 3795 b" config\n"
3794 3796 )
3795 3797 ui.warn(msg)
3796 3798 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3797 3799
3798 3800 return dropped
3799 3801
3800 3802
3801 3803 def filterknowncreateopts(ui, createopts):
3802 3804 """Filters a dict of repo creation options against options that are known.
3803 3805
3804 3806 Receives a dict of repo creation options and returns a dict of those
3805 3807 options that we don't know how to handle.
3806 3808
3807 3809 This function is called as part of repository creation. If the
3808 3810 returned dict contains any items, repository creation will not
3809 3811 be allowed, as it means there was a request to create a repository
3810 3812 with options not recognized by loaded code.
3811 3813
3812 3814 Extensions can wrap this function to filter out creation options
3813 3815 they know how to handle.
3814 3816 """
3815 3817 known = {
3816 3818 b'backend',
3817 3819 b'lfs',
3818 3820 b'narrowfiles',
3819 3821 b'sharedrepo',
3820 3822 b'sharedrelative',
3821 3823 b'shareditems',
3822 3824 b'shallowfilestore',
3823 3825 }
3824 3826
3825 3827 return {k: v for k, v in createopts.items() if k not in known}
3826 3828
3827 3829
3828 3830 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3829 3831 """Create a new repository in a vfs.
3830 3832
3831 3833 ``path`` path to the new repo's working directory.
3832 3834 ``createopts`` options for the new repository.
3833 3835 ``requirement`` predefined set of requirements.
3834 3836 (incompatible with ``createopts``)
3835 3837
3836 3838 The following keys for ``createopts`` are recognized:
3837 3839
3838 3840 backend
3839 3841 The storage backend to use.
3840 3842 lfs
3841 3843 Repository will be created with ``lfs`` requirement. The lfs extension
3842 3844 will automatically be loaded when the repository is accessed.
3843 3845 narrowfiles
3844 3846 Set up repository to support narrow file storage.
3845 3847 sharedrepo
3846 3848 Repository object from which storage should be shared.
3847 3849 sharedrelative
3848 3850 Boolean indicating if the path to the shared repo should be
3849 3851 stored as relative. By default, the pointer to the "parent" repo
3850 3852 is stored as an absolute path.
3851 3853 shareditems
3852 3854 Set of items to share to the new repository (in addition to storage).
3853 3855 shallowfilestore
3854 3856 Indicates that storage for files should be shallow (not all ancestor
3855 3857 revisions are known).
3856 3858 """
3857 3859
3858 3860 if requirements is not None:
3859 3861 if createopts is not None:
3860 3862 msg = b'cannot specify both createopts and requirements'
3861 3863 raise error.ProgrammingError(msg)
3862 3864 createopts = {}
3863 3865 else:
3864 3866 createopts = defaultcreateopts(ui, createopts=createopts)
3865 3867
3866 3868 unknownopts = filterknowncreateopts(ui, createopts)
3867 3869
3868 3870 if not isinstance(unknownopts, dict):
3869 3871 raise error.ProgrammingError(
3870 3872 b'filterknowncreateopts() did not return a dict'
3871 3873 )
3872 3874
3873 3875 if unknownopts:
3874 3876 raise error.Abort(
3875 3877 _(
3876 3878 b'unable to create repository because of unknown '
3877 3879 b'creation option: %s'
3878 3880 )
3879 3881 % b', '.join(sorted(unknownopts)),
3880 3882 hint=_(b'is a required extension not loaded?'),
3881 3883 )
3882 3884
3883 3885 requirements = newreporequirements(ui, createopts=createopts)
3884 3886 requirements -= checkrequirementscompat(ui, requirements)
3885 3887
3886 3888 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3887 3889
3888 3890 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3889 3891 if hgvfs.exists():
3890 3892 raise error.RepoError(_(b'repository %s already exists') % path)
3891 3893
3892 3894 if b'sharedrepo' in createopts:
3893 3895 sharedpath = createopts[b'sharedrepo'].sharedpath
3894 3896
3895 3897 if createopts.get(b'sharedrelative'):
3896 3898 try:
3897 3899 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3898 3900 sharedpath = util.pconvert(sharedpath)
3899 3901 except (IOError, ValueError) as e:
3900 3902 # ValueError is raised on Windows if the drive letters differ
3901 3903 # on each path.
3902 3904 raise error.Abort(
3903 3905 _(b'cannot calculate relative path'),
3904 3906 hint=stringutil.forcebytestr(e),
3905 3907 )
3906 3908
3907 3909 if not wdirvfs.exists():
3908 3910 wdirvfs.makedirs()
3909 3911
3910 3912 hgvfs.makedir(notindexed=True)
3911 3913 if b'sharedrepo' not in createopts:
3912 3914 hgvfs.mkdir(b'cache')
3913 3915 hgvfs.mkdir(b'wcache')
3914 3916
3915 3917 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3916 3918 if has_store and b'sharedrepo' not in createopts:
3917 3919 hgvfs.mkdir(b'store')
3918 3920
3919 3921 # We create an invalid changelog outside the store so very old
3920 3922 # Mercurial versions (which didn't know about the requirements
3921 3923 # file) encounter an error on reading the changelog. This
3922 3924 # effectively locks out old clients and prevents them from
3923 3925 # mucking with a repo in an unknown format.
3924 3926 #
3925 3927 # The revlog header has version 65535, which won't be recognized by
3926 3928 # such old clients.
3927 3929 hgvfs.append(
3928 3930 b'00changelog.i',
3929 3931 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3930 3932 b'layout',
3931 3933 )
3932 3934
3933 3935 # Filter the requirements into working copy and store ones
3934 3936 wcreq, storereq = scmutil.filterrequirements(requirements)
3935 3937 # write working copy ones
3936 3938 scmutil.writerequires(hgvfs, wcreq)
3937 3939 # If there are store requirements and the current repository
3938 3940 # is not a shared one, write stored requirements
3939 3941 # For new shared repository, we don't need to write the store
3940 3942 # requirements as they are already present in store requires
3941 3943 if storereq and b'sharedrepo' not in createopts:
3942 3944 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3943 3945 scmutil.writerequires(storevfs, storereq)
3944 3946
3945 3947 # Write out file telling readers where to find the shared store.
3946 3948 if b'sharedrepo' in createopts:
3947 3949 hgvfs.write(b'sharedpath', sharedpath)
3948 3950
3949 3951 if createopts.get(b'shareditems'):
3950 3952 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3951 3953 hgvfs.write(b'shared', shared)
3952 3954
3953 3955
3954 3956 def poisonrepository(repo):
3955 3957 """Poison a repository instance so it can no longer be used."""
3956 3958 # Perform any cleanup on the instance.
3957 3959 repo.close()
3958 3960
3959 3961 # Our strategy is to replace the type of the object with one that
3960 3962 # has all attribute lookups result in error.
3961 3963 #
3962 3964 # But we have to allow the close() method because some constructors
3963 3965 # of repos call close() on repo references.
3964 3966 class poisonedrepository:
3965 3967 def __getattribute__(self, item):
3966 3968 if item == 'close':
3967 3969 return object.__getattribute__(self, item)
3968 3970
3969 3971 raise error.ProgrammingError(
3970 3972 b'repo instances should not be used after unshare'
3971 3973 )
3972 3974
3973 3975 def close(self):
3974 3976 pass
3975 3977
3976 3978 # We may have a repoview, which intercepts __setattr__. So be sure
3977 3979 # we operate at the lowest level possible.
3978 3980 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3358 +1,3364 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 # coding: utf8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Storage back-end for Mercurial.
10 10
11 11 This provides efficient delta storage with O(1) retrieve and append
12 12 and O(changes) merge between branches.
13 13 """
14 14
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import io
20 20 import os
21 21 import struct
22 22 import zlib
23 23
24 24 # import stuff from node for others to import from revlog
25 25 from .node import (
26 26 bin,
27 27 hex,
28 28 nullrev,
29 29 sha1nodeconstants,
30 30 short,
31 31 wdirrev,
32 32 )
33 33 from .i18n import _
34 34 from .pycompat import getattr
35 35 from .revlogutils.constants import (
36 36 ALL_KINDS,
37 37 CHANGELOGV2,
38 38 COMP_MODE_DEFAULT,
39 39 COMP_MODE_INLINE,
40 40 COMP_MODE_PLAIN,
41 41 ENTRY_RANK,
42 42 FEATURES_BY_VERSION,
43 43 FLAG_GENERALDELTA,
44 44 FLAG_INLINE_DATA,
45 45 INDEX_HEADER,
46 46 KIND_CHANGELOG,
47 47 KIND_FILELOG,
48 48 RANK_UNKNOWN,
49 49 REVLOGV0,
50 50 REVLOGV1,
51 51 REVLOGV1_FLAGS,
52 52 REVLOGV2,
53 53 REVLOGV2_FLAGS,
54 54 REVLOG_DEFAULT_FLAGS,
55 55 REVLOG_DEFAULT_FORMAT,
56 56 REVLOG_DEFAULT_VERSION,
57 57 SUPPORTED_FLAGS,
58 58 )
59 59 from .revlogutils.flagutil import (
60 60 REVIDX_DEFAULT_FLAGS,
61 61 REVIDX_ELLIPSIS,
62 62 REVIDX_EXTSTORED,
63 63 REVIDX_FLAGS_ORDER,
64 64 REVIDX_HASCOPIESINFO,
65 65 REVIDX_ISCENSORED,
66 66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 67 )
68 68 from .thirdparty import attr
69 69 from . import (
70 70 ancestor,
71 71 dagop,
72 72 error,
73 73 mdiff,
74 74 policy,
75 75 pycompat,
76 76 revlogutils,
77 77 templatefilters,
78 78 util,
79 79 )
80 80 from .interfaces import (
81 81 repository,
82 82 util as interfaceutil,
83 83 )
84 84 from .revlogutils import (
85 85 deltas as deltautil,
86 86 docket as docketutil,
87 87 flagutil,
88 88 nodemap as nodemaputil,
89 89 randomaccessfile,
90 90 revlogv0,
91 91 rewrite,
92 92 sidedata as sidedatautil,
93 93 )
94 94 from .utils import (
95 95 storageutil,
96 96 stringutil,
97 97 )
98 98
99 99 # blanked usage of all the name to prevent pyflakes constraints
100 100 # We need these name available in the module for extensions.
101 101
102 102 REVLOGV0
103 103 REVLOGV1
104 104 REVLOGV2
105 105 CHANGELOGV2
106 106 FLAG_INLINE_DATA
107 107 FLAG_GENERALDELTA
108 108 REVLOG_DEFAULT_FLAGS
109 109 REVLOG_DEFAULT_FORMAT
110 110 REVLOG_DEFAULT_VERSION
111 111 REVLOGV1_FLAGS
112 112 REVLOGV2_FLAGS
113 113 REVIDX_ISCENSORED
114 114 REVIDX_ELLIPSIS
115 115 REVIDX_HASCOPIESINFO
116 116 REVIDX_EXTSTORED
117 117 REVIDX_DEFAULT_FLAGS
118 118 REVIDX_FLAGS_ORDER
119 119 REVIDX_RAWTEXT_CHANGING_FLAGS
120 120
121 121 parsers = policy.importmod('parsers')
122 122 rustancestor = policy.importrust('ancestor')
123 123 rustdagop = policy.importrust('dagop')
124 124 rustrevlog = policy.importrust('revlog')
125 125
126 126 # Aliased for performance.
127 127 _zlibdecompress = zlib.decompress
128 128
129 129 # max size of revlog with inline data
130 130 _maxinline = 131072
131 131
132 132 # Flag processors for REVIDX_ELLIPSIS.
133 133 def ellipsisreadprocessor(rl, text):
134 134 return text, False
135 135
136 136
137 137 def ellipsiswriteprocessor(rl, text):
138 138 return text, False
139 139
140 140
141 141 def ellipsisrawprocessor(rl, text):
142 142 return False
143 143
144 144
145 145 ellipsisprocessor = (
146 146 ellipsisreadprocessor,
147 147 ellipsiswriteprocessor,
148 148 ellipsisrawprocessor,
149 149 )
150 150
151 151
152 152 def _verify_revision(rl, skipflags, state, node):
153 153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 154 point for extensions to influence the operation."""
155 155 if skipflags:
156 156 state[b'skipread'].add(node)
157 157 else:
158 158 # Side-effect: read content and verify hash.
159 159 rl.revision(node)
160 160
161 161
162 162 # True if a fast implementation for persistent-nodemap is available
163 163 #
164 164 # We also consider we have a "fast" implementation in "pure" python because
165 165 # people using pure don't really have performance consideration (and a
166 166 # wheelbarrow of other slowness source)
167 167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 168 parsers, 'BaseIndexObject'
169 169 )
170 170
171 171
172 172 @interfaceutil.implementer(repository.irevisiondelta)
173 173 @attr.s(slots=True)
174 174 class revlogrevisiondelta:
175 175 node = attr.ib()
176 176 p1node = attr.ib()
177 177 p2node = attr.ib()
178 178 basenode = attr.ib()
179 179 flags = attr.ib()
180 180 baserevisionsize = attr.ib()
181 181 revision = attr.ib()
182 182 delta = attr.ib()
183 183 sidedata = attr.ib()
184 184 protocol_flags = attr.ib()
185 185 linknode = attr.ib(default=None)
186 186
187 187
188 188 @interfaceutil.implementer(repository.iverifyproblem)
189 189 @attr.s(frozen=True)
190 190 class revlogproblem:
191 191 warning = attr.ib(default=None)
192 192 error = attr.ib(default=None)
193 193 node = attr.ib(default=None)
194 194
195 195
196 196 def parse_index_v1(data, inline):
197 197 # call the C implementation to parse the index data
198 198 index, cache = parsers.parse_index2(data, inline)
199 199 return index, cache
200 200
201 201
202 202 def parse_index_v2(data, inline):
203 203 # call the C implementation to parse the index data
204 204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 205 return index, cache
206 206
207 207
208 208 def parse_index_cl_v2(data, inline):
209 209 # call the C implementation to parse the index data
210 210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 211 return index, cache
212 212
213 213
214 214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215 215
216 216 def parse_index_v1_nodemap(data, inline):
217 217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 218 return index, cache
219 219
220 220
221 221 else:
222 222 parse_index_v1_nodemap = None
223 223
224 224
225 225 def parse_index_v1_mixed(data, inline):
226 226 index, cache = parse_index_v1(data, inline)
227 227 return rustrevlog.MixedIndex(index), cache
228 228
229 229
230 230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 231 # signed integer)
232 232 _maxentrysize = 0x7FFFFFFF
233 233
234 234 FILE_TOO_SHORT_MSG = _(
235 235 b'cannot read from revlog %s;'
236 236 b' expected %d bytes from offset %d, data size is %d'
237 237 )
238 238
239 239 hexdigits = b'0123456789abcdefABCDEF'
240 240
241 241
242 242 class revlog:
243 243 """
244 244 the underlying revision storage object
245 245
246 246 A revlog consists of two parts, an index and the revision data.
247 247
248 248 The index is a file with a fixed record size containing
249 249 information on each revision, including its nodeid (hash), the
250 250 nodeids of its parents, the position and offset of its data within
251 251 the data file, and the revision it's based on. Finally, each entry
252 252 contains a linkrev entry that can serve as a pointer to external
253 253 data.
254 254
255 255 The revision data itself is a linear collection of data chunks.
256 256 Each chunk represents a revision and is usually represented as a
257 257 delta against the previous chunk. To bound lookup time, runs of
258 258 deltas are limited to about 2 times the length of the original
259 259 version data. This makes retrieval of a version proportional to
260 260 its size, or O(1) relative to the number of revisions.
261 261
262 262 Both pieces of the revlog are written to in an append-only
263 263 fashion, which means we never need to rewrite a file to insert or
264 264 remove data, and can use some simple techniques to avoid the need
265 265 for locking while reading.
266 266
267 267 If checkambig, indexfile is opened with checkambig=True at
268 268 writing, to avoid file stat ambiguity.
269 269
270 270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
271 271 index will be mmapped rather than read if it is larger than the
272 272 configured threshold.
273 273
274 274 If censorable is True, the revlog can have censored revisions.
275 275
276 276 If `upperboundcomp` is not None, this is the expected maximal gain from
277 277 compression for the data content.
278 278
279 279 `concurrencychecker` is an optional function that receives 3 arguments: a
280 280 file handle, a filename, and an expected position. It should check whether
281 281 the current position in the file handle is valid, and log/warn/fail (by
282 282 raising).
283 283
284 284 See mercurial/revlogutils/contants.py for details about the content of an
285 285 index entry.
286 286 """
287 287
288 288 _flagserrorclass = error.RevlogError
289 289
290 290 def __init__(
291 291 self,
292 292 opener,
293 293 target,
294 294 radix,
295 295 postfix=None, # only exist for `tmpcensored` now
296 296 checkambig=False,
297 297 mmaplargeindex=False,
298 298 censorable=False,
299 299 upperboundcomp=None,
300 300 persistentnodemap=False,
301 301 concurrencychecker=None,
302 302 trypending=False,
303 303 canonical_parent_order=True,
304 304 ):
305 305 """
306 306 create a revlog object
307 307
308 308 opener is a function that abstracts the file opening operation
309 309 and can be used to implement COW semantics or the like.
310 310
311 311 `target`: a (KIND, ID) tuple that identify the content stored in
312 312 this revlog. It help the rest of the code to understand what the revlog
313 313 is about without having to resort to heuristic and index filename
314 314 analysis. Note: that this must be reliably be set by normal code, but
315 315 that test, debug, or performance measurement code might not set this to
316 316 accurate value.
317 317 """
318 318 self.upperboundcomp = upperboundcomp
319 319
320 320 self.radix = radix
321 321
322 322 self._docket_file = None
323 323 self._indexfile = None
324 324 self._datafile = None
325 325 self._sidedatafile = None
326 326 self._nodemap_file = None
327 327 self.postfix = postfix
328 328 self._trypending = trypending
329 329 self.opener = opener
330 330 if persistentnodemap:
331 331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
332 332
333 333 assert target[0] in ALL_KINDS
334 334 assert len(target) == 2
335 335 self.target = target
336 336 # When True, indexfile is opened with checkambig=True at writing, to
337 337 # avoid file stat ambiguity.
338 338 self._checkambig = checkambig
339 339 self._mmaplargeindex = mmaplargeindex
340 340 self._censorable = censorable
341 341 # 3-tuple of (node, rev, text) for a raw revision.
342 342 self._revisioncache = None
343 343 # Maps rev to chain base rev.
344 344 self._chainbasecache = util.lrucachedict(100)
345 345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
346 346 self._chunkcache = (0, b'')
347 347 # How much data to read and cache into the raw revlog data cache.
348 348 self._chunkcachesize = 65536
349 349 self._maxchainlen = None
350 350 self._deltabothparents = True
351 351 self._candidate_group_chunk_size = 0
352 352 self._debug_delta = False
353 353 self.index = None
354 354 self._docket = None
355 355 self._nodemap_docket = None
356 356 # Mapping of partial identifiers to full nodes.
357 357 self._pcache = {}
358 358 # Mapping of revision integer to full node.
359 359 self._compengine = b'zlib'
360 360 self._compengineopts = {}
361 361 self._maxdeltachainspan = -1
362 362 self._withsparseread = False
363 363 self._sparserevlog = False
364 364 self.hassidedata = False
365 365 self._srdensitythreshold = 0.50
366 366 self._srmingapsize = 262144
367 367
368 # other optionnals features
369
370 # might remove rank configuration once the computation has no impact
371 self._compute_rank = False
372
368 373 # Make copy of flag processors so each revlog instance can support
369 374 # custom flags.
370 375 self._flagprocessors = dict(flagutil.flagprocessors)
371 376
372 377 # 3-tuple of file handles being used for active writing.
373 378 self._writinghandles = None
374 379 # prevent nesting of addgroup
375 380 self._adding_group = None
376 381
377 382 self._loadindex()
378 383
379 384 self._concurrencychecker = concurrencychecker
380 385
381 386 # parent order is supposed to be semantically irrelevant, so we
382 387 # normally resort parents to ensure that the first parent is non-null,
383 388 # if there is a non-null parent at all.
384 389 # filelog abuses the parent order as flag to mark some instances of
385 390 # meta-encoded files, so allow it to disable this behavior.
386 391 self.canonical_parent_order = canonical_parent_order
387 392
388 393 def _init_opts(self):
389 394 """process options (from above/config) to setup associated default revlog mode
390 395
391 396 These values might be affected when actually reading on disk information.
392 397
393 398 The relevant values are returned for use in _loadindex().
394 399
395 400 * newversionflags:
396 401 version header to use if we need to create a new revlog
397 402
398 403 * mmapindexthreshold:
399 404 minimal index size for start to use mmap
400 405
401 406 * force_nodemap:
402 407 force the usage of a "development" version of the nodemap code
403 408 """
404 409 mmapindexthreshold = None
405 410 opts = self.opener.options
406 411
407 412 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
408 413 new_header = CHANGELOGV2
414 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
409 415 elif b'revlogv2' in opts:
410 416 new_header = REVLOGV2
411 417 elif b'revlogv1' in opts:
412 418 new_header = REVLOGV1 | FLAG_INLINE_DATA
413 419 if b'generaldelta' in opts:
414 420 new_header |= FLAG_GENERALDELTA
415 421 elif b'revlogv0' in self.opener.options:
416 422 new_header = REVLOGV0
417 423 else:
418 424 new_header = REVLOG_DEFAULT_VERSION
419 425
420 426 if b'chunkcachesize' in opts:
421 427 self._chunkcachesize = opts[b'chunkcachesize']
422 428 if b'maxchainlen' in opts:
423 429 self._maxchainlen = opts[b'maxchainlen']
424 430 if b'deltabothparents' in opts:
425 431 self._deltabothparents = opts[b'deltabothparents']
426 432 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
427 433 if dps_cgds:
428 434 self._candidate_group_chunk_size = dps_cgds
429 435 self._lazydelta = bool(opts.get(b'lazydelta', True))
430 436 self._lazydeltabase = False
431 437 if self._lazydelta:
432 438 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
433 439 if b'debug-delta' in opts:
434 440 self._debug_delta = opts[b'debug-delta']
435 441 if b'compengine' in opts:
436 442 self._compengine = opts[b'compengine']
437 443 if b'zlib.level' in opts:
438 444 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
439 445 if b'zstd.level' in opts:
440 446 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
441 447 if b'maxdeltachainspan' in opts:
442 448 self._maxdeltachainspan = opts[b'maxdeltachainspan']
443 449 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
444 450 mmapindexthreshold = opts[b'mmapindexthreshold']
445 451 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
446 452 withsparseread = bool(opts.get(b'with-sparse-read', False))
447 453 # sparse-revlog forces sparse-read
448 454 self._withsparseread = self._sparserevlog or withsparseread
449 455 if b'sparse-read-density-threshold' in opts:
450 456 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
451 457 if b'sparse-read-min-gap-size' in opts:
452 458 self._srmingapsize = opts[b'sparse-read-min-gap-size']
453 459 if opts.get(b'enableellipsis'):
454 460 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
455 461
456 462 # revlog v0 doesn't have flag processors
457 463 for flag, processor in opts.get(b'flagprocessors', {}).items():
458 464 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
459 465
460 466 if self._chunkcachesize <= 0:
461 467 raise error.RevlogError(
462 468 _(b'revlog chunk cache size %r is not greater than 0')
463 469 % self._chunkcachesize
464 470 )
465 471 elif self._chunkcachesize & (self._chunkcachesize - 1):
466 472 raise error.RevlogError(
467 473 _(b'revlog chunk cache size %r is not a power of 2')
468 474 % self._chunkcachesize
469 475 )
470 476 force_nodemap = opts.get(b'devel-force-nodemap', False)
471 477 return new_header, mmapindexthreshold, force_nodemap
472 478
473 479 def _get_data(self, filepath, mmap_threshold, size=None):
474 480 """return a file content with or without mmap
475 481
476 482 If the file is missing return the empty string"""
477 483 try:
478 484 with self.opener(filepath) as fp:
479 485 if mmap_threshold is not None:
480 486 file_size = self.opener.fstat(fp).st_size
481 487 if file_size >= mmap_threshold:
482 488 if size is not None:
483 489 # avoid potentiel mmap crash
484 490 size = min(file_size, size)
485 491 # TODO: should .close() to release resources without
486 492 # relying on Python GC
487 493 if size is None:
488 494 return util.buffer(util.mmapread(fp))
489 495 else:
490 496 return util.buffer(util.mmapread(fp, size))
491 497 if size is None:
492 498 return fp.read()
493 499 else:
494 500 return fp.read(size)
495 501 except FileNotFoundError:
496 502 return b''
497 503
498 504 def _loadindex(self, docket=None):
499 505
500 506 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
501 507
502 508 if self.postfix is not None:
503 509 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
504 510 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
505 511 entry_point = b'%s.i.a' % self.radix
506 512 else:
507 513 entry_point = b'%s.i' % self.radix
508 514
509 515 if docket is not None:
510 516 self._docket = docket
511 517 self._docket_file = entry_point
512 518 else:
513 519 self._initempty = True
514 520 entry_data = self._get_data(entry_point, mmapindexthreshold)
515 521 if len(entry_data) > 0:
516 522 header = INDEX_HEADER.unpack(entry_data[:4])[0]
517 523 self._initempty = False
518 524 else:
519 525 header = new_header
520 526
521 527 self._format_flags = header & ~0xFFFF
522 528 self._format_version = header & 0xFFFF
523 529
524 530 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
525 531 if supported_flags is None:
526 532 msg = _(b'unknown version (%d) in revlog %s')
527 533 msg %= (self._format_version, self.display_id)
528 534 raise error.RevlogError(msg)
529 535 elif self._format_flags & ~supported_flags:
530 536 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
531 537 display_flag = self._format_flags >> 16
532 538 msg %= (display_flag, self._format_version, self.display_id)
533 539 raise error.RevlogError(msg)
534 540
535 541 features = FEATURES_BY_VERSION[self._format_version]
536 542 self._inline = features[b'inline'](self._format_flags)
537 543 self._generaldelta = features[b'generaldelta'](self._format_flags)
538 544 self.hassidedata = features[b'sidedata']
539 545
540 546 if not features[b'docket']:
541 547 self._indexfile = entry_point
542 548 index_data = entry_data
543 549 else:
544 550 self._docket_file = entry_point
545 551 if self._initempty:
546 552 self._docket = docketutil.default_docket(self, header)
547 553 else:
548 554 self._docket = docketutil.parse_docket(
549 555 self, entry_data, use_pending=self._trypending
550 556 )
551 557
552 558 if self._docket is not None:
553 559 self._indexfile = self._docket.index_filepath()
554 560 index_data = b''
555 561 index_size = self._docket.index_end
556 562 if index_size > 0:
557 563 index_data = self._get_data(
558 564 self._indexfile, mmapindexthreshold, size=index_size
559 565 )
560 566 if len(index_data) < index_size:
561 567 msg = _(b'too few index data for %s: got %d, expected %d')
562 568 msg %= (self.display_id, len(index_data), index_size)
563 569 raise error.RevlogError(msg)
564 570
565 571 self._inline = False
566 572 # generaldelta implied by version 2 revlogs.
567 573 self._generaldelta = True
568 574 # the logic for persistent nodemap will be dealt with within the
569 575 # main docket, so disable it for now.
570 576 self._nodemap_file = None
571 577
572 578 if self._docket is not None:
573 579 self._datafile = self._docket.data_filepath()
574 580 self._sidedatafile = self._docket.sidedata_filepath()
575 581 elif self.postfix is None:
576 582 self._datafile = b'%s.d' % self.radix
577 583 else:
578 584 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
579 585
580 586 self.nodeconstants = sha1nodeconstants
581 587 self.nullid = self.nodeconstants.nullid
582 588
583 589 # sparse-revlog can't be on without general-delta (issue6056)
584 590 if not self._generaldelta:
585 591 self._sparserevlog = False
586 592
587 593 self._storedeltachains = True
588 594
589 595 devel_nodemap = (
590 596 self._nodemap_file
591 597 and force_nodemap
592 598 and parse_index_v1_nodemap is not None
593 599 )
594 600
595 601 use_rust_index = False
596 602 if rustrevlog is not None:
597 603 if self._nodemap_file is not None:
598 604 use_rust_index = True
599 605 else:
600 606 use_rust_index = self.opener.options.get(b'rust.index')
601 607
602 608 self._parse_index = parse_index_v1
603 609 if self._format_version == REVLOGV0:
604 610 self._parse_index = revlogv0.parse_index_v0
605 611 elif self._format_version == REVLOGV2:
606 612 self._parse_index = parse_index_v2
607 613 elif self._format_version == CHANGELOGV2:
608 614 self._parse_index = parse_index_cl_v2
609 615 elif devel_nodemap:
610 616 self._parse_index = parse_index_v1_nodemap
611 617 elif use_rust_index:
612 618 self._parse_index = parse_index_v1_mixed
613 619 try:
614 620 d = self._parse_index(index_data, self._inline)
615 621 index, chunkcache = d
616 622 use_nodemap = (
617 623 not self._inline
618 624 and self._nodemap_file is not None
619 625 and util.safehasattr(index, 'update_nodemap_data')
620 626 )
621 627 if use_nodemap:
622 628 nodemap_data = nodemaputil.persisted_data(self)
623 629 if nodemap_data is not None:
624 630 docket = nodemap_data[0]
625 631 if (
626 632 len(d[0]) > docket.tip_rev
627 633 and d[0][docket.tip_rev][7] == docket.tip_node
628 634 ):
629 635 # no changelog tampering
630 636 self._nodemap_docket = docket
631 637 index.update_nodemap_data(*nodemap_data)
632 638 except (ValueError, IndexError):
633 639 raise error.RevlogError(
634 640 _(b"index %s is corrupted") % self.display_id
635 641 )
636 642 self.index = index
637 643 self._segmentfile = randomaccessfile.randomaccessfile(
638 644 self.opener,
639 645 (self._indexfile if self._inline else self._datafile),
640 646 self._chunkcachesize,
641 647 chunkcache,
642 648 )
643 649 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
644 650 self.opener,
645 651 self._sidedatafile,
646 652 self._chunkcachesize,
647 653 )
648 654 # revnum -> (chain-length, sum-delta-length)
649 655 self._chaininfocache = util.lrucachedict(500)
650 656 # revlog header -> revlog compressor
651 657 self._decompressors = {}
652 658
653 659 @util.propertycache
654 660 def revlog_kind(self):
655 661 return self.target[0]
656 662
657 663 @util.propertycache
658 664 def display_id(self):
659 665 """The public facing "ID" of the revlog that we use in message"""
660 666 if self.revlog_kind == KIND_FILELOG:
661 667 # Reference the file without the "data/" prefix, so it is familiar
662 668 # to the user.
663 669 return self.target[1]
664 670 else:
665 671 return self.radix
666 672
667 673 def _get_decompressor(self, t):
668 674 try:
669 675 compressor = self._decompressors[t]
670 676 except KeyError:
671 677 try:
672 678 engine = util.compengines.forrevlogheader(t)
673 679 compressor = engine.revlogcompressor(self._compengineopts)
674 680 self._decompressors[t] = compressor
675 681 except KeyError:
676 682 raise error.RevlogError(
677 683 _(b'unknown compression type %s') % binascii.hexlify(t)
678 684 )
679 685 return compressor
680 686
681 687 @util.propertycache
682 688 def _compressor(self):
683 689 engine = util.compengines[self._compengine]
684 690 return engine.revlogcompressor(self._compengineopts)
685 691
686 692 @util.propertycache
687 693 def _decompressor(self):
688 694 """the default decompressor"""
689 695 if self._docket is None:
690 696 return None
691 697 t = self._docket.default_compression_header
692 698 c = self._get_decompressor(t)
693 699 return c.decompress
694 700
695 701 def _indexfp(self):
696 702 """file object for the revlog's index file"""
697 703 return self.opener(self._indexfile, mode=b"r")
698 704
699 705 def __index_write_fp(self):
700 706 # You should not use this directly and use `_writing` instead
701 707 try:
702 708 f = self.opener(
703 709 self._indexfile, mode=b"r+", checkambig=self._checkambig
704 710 )
705 711 if self._docket is None:
706 712 f.seek(0, os.SEEK_END)
707 713 else:
708 714 f.seek(self._docket.index_end, os.SEEK_SET)
709 715 return f
710 716 except FileNotFoundError:
711 717 return self.opener(
712 718 self._indexfile, mode=b"w+", checkambig=self._checkambig
713 719 )
714 720
715 721 def __index_new_fp(self):
716 722 # You should not use this unless you are upgrading from inline revlog
717 723 return self.opener(
718 724 self._indexfile,
719 725 mode=b"w",
720 726 checkambig=self._checkambig,
721 727 atomictemp=True,
722 728 )
723 729
724 730 def _datafp(self, mode=b'r'):
725 731 """file object for the revlog's data file"""
726 732 return self.opener(self._datafile, mode=mode)
727 733
728 734 @contextlib.contextmanager
729 735 def _sidedatareadfp(self):
730 736 """file object suitable to read sidedata"""
731 737 if self._writinghandles:
732 738 yield self._writinghandles[2]
733 739 else:
734 740 with self.opener(self._sidedatafile) as fp:
735 741 yield fp
736 742
737 743 def tiprev(self):
738 744 return len(self.index) - 1
739 745
740 746 def tip(self):
741 747 return self.node(self.tiprev())
742 748
743 749 def __contains__(self, rev):
744 750 return 0 <= rev < len(self)
745 751
746 752 def __len__(self):
747 753 return len(self.index)
748 754
749 755 def __iter__(self):
750 756 return iter(range(len(self)))
751 757
752 758 def revs(self, start=0, stop=None):
753 759 """iterate over all rev in this revlog (from start to stop)"""
754 760 return storageutil.iterrevs(len(self), start=start, stop=stop)
755 761
756 762 def hasnode(self, node):
757 763 try:
758 764 self.rev(node)
759 765 return True
760 766 except KeyError:
761 767 return False
762 768
763 769 def candelta(self, baserev, rev):
764 770 """whether two revisions (baserev, rev) can be delta-ed or not"""
765 771 # Disable delta if either rev requires a content-changing flag
766 772 # processor (ex. LFS). This is because such flag processor can alter
767 773 # the rawtext content that the delta will be based on, and two clients
768 774 # could have a same revlog node with different flags (i.e. different
769 775 # rawtext contents) and the delta could be incompatible.
770 776 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
771 777 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
772 778 ):
773 779 return False
774 780 return True
775 781
776 782 def update_caches(self, transaction):
777 783 if self._nodemap_file is not None:
778 784 if transaction is None:
779 785 nodemaputil.update_persistent_nodemap(self)
780 786 else:
781 787 nodemaputil.setup_persistent_nodemap(transaction, self)
782 788
783 789 def clearcaches(self):
784 790 self._revisioncache = None
785 791 self._chainbasecache.clear()
786 792 self._segmentfile.clear_cache()
787 793 self._segmentfile_sidedata.clear_cache()
788 794 self._pcache = {}
789 795 self._nodemap_docket = None
790 796 self.index.clearcaches()
791 797 # The python code is the one responsible for validating the docket, we
792 798 # end up having to refresh it here.
793 799 use_nodemap = (
794 800 not self._inline
795 801 and self._nodemap_file is not None
796 802 and util.safehasattr(self.index, 'update_nodemap_data')
797 803 )
798 804 if use_nodemap:
799 805 nodemap_data = nodemaputil.persisted_data(self)
800 806 if nodemap_data is not None:
801 807 self._nodemap_docket = nodemap_data[0]
802 808 self.index.update_nodemap_data(*nodemap_data)
803 809
804 810 def rev(self, node):
805 811 try:
806 812 return self.index.rev(node)
807 813 except TypeError:
808 814 raise
809 815 except error.RevlogError:
810 816 # parsers.c radix tree lookup failed
811 817 if (
812 818 node == self.nodeconstants.wdirid
813 819 or node in self.nodeconstants.wdirfilenodeids
814 820 ):
815 821 raise error.WdirUnsupported
816 822 raise error.LookupError(node, self.display_id, _(b'no node'))
817 823
818 824 # Accessors for index entries.
819 825
820 826 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
821 827 # are flags.
822 828 def start(self, rev):
823 829 return int(self.index[rev][0] >> 16)
824 830
825 831 def sidedata_cut_off(self, rev):
826 832 sd_cut_off = self.index[rev][8]
827 833 if sd_cut_off != 0:
828 834 return sd_cut_off
829 835 # This is some annoying dance, because entries without sidedata
830 836 # currently use 0 as their ofsset. (instead of previous-offset +
831 837 # previous-size)
832 838 #
833 839 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
834 840 # In the meantime, we need this.
835 841 while 0 <= rev:
836 842 e = self.index[rev]
837 843 if e[9] != 0:
838 844 return e[8] + e[9]
839 845 rev -= 1
840 846 return 0
841 847
842 848 def flags(self, rev):
843 849 return self.index[rev][0] & 0xFFFF
844 850
845 851 def length(self, rev):
846 852 return self.index[rev][1]
847 853
848 854 def sidedata_length(self, rev):
849 855 if not self.hassidedata:
850 856 return 0
851 857 return self.index[rev][9]
852 858
853 859 def rawsize(self, rev):
854 860 """return the length of the uncompressed text for a given revision"""
855 861 l = self.index[rev][2]
856 862 if l >= 0:
857 863 return l
858 864
859 865 t = self.rawdata(rev)
860 866 return len(t)
861 867
862 868 def size(self, rev):
863 869 """length of non-raw text (processed by a "read" flag processor)"""
864 870 # fast path: if no "read" flag processor could change the content,
865 871 # size is rawsize. note: ELLIPSIS is known to not change the content.
866 872 flags = self.flags(rev)
867 873 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
868 874 return self.rawsize(rev)
869 875
870 876 return len(self.revision(rev))
871 877
872 878 def fast_rank(self, rev):
873 879 """Return the rank of a revision if already known, or None otherwise.
874 880
875 881 The rank of a revision is the size of the sub-graph it defines as a
876 882 head. Equivalently, the rank of a revision `r` is the size of the set
877 883 `ancestors(r)`, `r` included.
878 884
879 885 This method returns the rank retrieved from the revlog in constant
880 886 time. It makes no attempt at computing unknown values for versions of
881 887 the revlog which do not persist the rank.
882 888 """
883 889 rank = self.index[rev][ENTRY_RANK]
884 890 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
885 891 return None
886 892 if rev == nullrev:
887 893 return 0 # convention
888 894 return rank
889 895
890 896 def chainbase(self, rev):
891 897 base = self._chainbasecache.get(rev)
892 898 if base is not None:
893 899 return base
894 900
895 901 index = self.index
896 902 iterrev = rev
897 903 base = index[iterrev][3]
898 904 while base != iterrev:
899 905 iterrev = base
900 906 base = index[iterrev][3]
901 907
902 908 self._chainbasecache[rev] = base
903 909 return base
904 910
905 911 def linkrev(self, rev):
906 912 return self.index[rev][4]
907 913
908 914 def parentrevs(self, rev):
909 915 try:
910 916 entry = self.index[rev]
911 917 except IndexError:
912 918 if rev == wdirrev:
913 919 raise error.WdirUnsupported
914 920 raise
915 921
916 922 if self.canonical_parent_order and entry[5] == nullrev:
917 923 return entry[6], entry[5]
918 924 else:
919 925 return entry[5], entry[6]
920 926
921 927 # fast parentrevs(rev) where rev isn't filtered
922 928 _uncheckedparentrevs = parentrevs
923 929
924 930 def node(self, rev):
925 931 try:
926 932 return self.index[rev][7]
927 933 except IndexError:
928 934 if rev == wdirrev:
929 935 raise error.WdirUnsupported
930 936 raise
931 937
932 938 # Derived from index values.
933 939
934 940 def end(self, rev):
935 941 return self.start(rev) + self.length(rev)
936 942
937 943 def parents(self, node):
938 944 i = self.index
939 945 d = i[self.rev(node)]
940 946 # inline node() to avoid function call overhead
941 947 if self.canonical_parent_order and d[5] == self.nullid:
942 948 return i[d[6]][7], i[d[5]][7]
943 949 else:
944 950 return i[d[5]][7], i[d[6]][7]
945 951
946 952 def chainlen(self, rev):
947 953 return self._chaininfo(rev)[0]
948 954
949 955 def _chaininfo(self, rev):
950 956 chaininfocache = self._chaininfocache
951 957 if rev in chaininfocache:
952 958 return chaininfocache[rev]
953 959 index = self.index
954 960 generaldelta = self._generaldelta
955 961 iterrev = rev
956 962 e = index[iterrev]
957 963 clen = 0
958 964 compresseddeltalen = 0
959 965 while iterrev != e[3]:
960 966 clen += 1
961 967 compresseddeltalen += e[1]
962 968 if generaldelta:
963 969 iterrev = e[3]
964 970 else:
965 971 iterrev -= 1
966 972 if iterrev in chaininfocache:
967 973 t = chaininfocache[iterrev]
968 974 clen += t[0]
969 975 compresseddeltalen += t[1]
970 976 break
971 977 e = index[iterrev]
972 978 else:
973 979 # Add text length of base since decompressing that also takes
974 980 # work. For cache hits the length is already included.
975 981 compresseddeltalen += e[1]
976 982 r = (clen, compresseddeltalen)
977 983 chaininfocache[rev] = r
978 984 return r
979 985
980 986 def _deltachain(self, rev, stoprev=None):
981 987 """Obtain the delta chain for a revision.
982 988
983 989 ``stoprev`` specifies a revision to stop at. If not specified, we
984 990 stop at the base of the chain.
985 991
986 992 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
987 993 revs in ascending order and ``stopped`` is a bool indicating whether
988 994 ``stoprev`` was hit.
989 995 """
990 996 # Try C implementation.
991 997 try:
992 998 return self.index.deltachain(rev, stoprev, self._generaldelta)
993 999 except AttributeError:
994 1000 pass
995 1001
996 1002 chain = []
997 1003
998 1004 # Alias to prevent attribute lookup in tight loop.
999 1005 index = self.index
1000 1006 generaldelta = self._generaldelta
1001 1007
1002 1008 iterrev = rev
1003 1009 e = index[iterrev]
1004 1010 while iterrev != e[3] and iterrev != stoprev:
1005 1011 chain.append(iterrev)
1006 1012 if generaldelta:
1007 1013 iterrev = e[3]
1008 1014 else:
1009 1015 iterrev -= 1
1010 1016 e = index[iterrev]
1011 1017
1012 1018 if iterrev == stoprev:
1013 1019 stopped = True
1014 1020 else:
1015 1021 chain.append(iterrev)
1016 1022 stopped = False
1017 1023
1018 1024 chain.reverse()
1019 1025 return chain, stopped
1020 1026
1021 1027 def ancestors(self, revs, stoprev=0, inclusive=False):
1022 1028 """Generate the ancestors of 'revs' in reverse revision order.
1023 1029 Does not generate revs lower than stoprev.
1024 1030
1025 1031 See the documentation for ancestor.lazyancestors for more details."""
1026 1032
1027 1033 # first, make sure start revisions aren't filtered
1028 1034 revs = list(revs)
1029 1035 checkrev = self.node
1030 1036 for r in revs:
1031 1037 checkrev(r)
1032 1038 # and we're sure ancestors aren't filtered as well
1033 1039
1034 1040 if rustancestor is not None and self.index.rust_ext_compat:
1035 1041 lazyancestors = rustancestor.LazyAncestors
1036 1042 arg = self.index
1037 1043 else:
1038 1044 lazyancestors = ancestor.lazyancestors
1039 1045 arg = self._uncheckedparentrevs
1040 1046 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1041 1047
1042 1048 def descendants(self, revs):
1043 1049 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1044 1050
1045 1051 def findcommonmissing(self, common=None, heads=None):
1046 1052 """Return a tuple of the ancestors of common and the ancestors of heads
1047 1053 that are not ancestors of common. In revset terminology, we return the
1048 1054 tuple:
1049 1055
1050 1056 ::common, (::heads) - (::common)
1051 1057
1052 1058 The list is sorted by revision number, meaning it is
1053 1059 topologically sorted.
1054 1060
1055 1061 'heads' and 'common' are both lists of node IDs. If heads is
1056 1062 not supplied, uses all of the revlog's heads. If common is not
1057 1063 supplied, uses nullid."""
1058 1064 if common is None:
1059 1065 common = [self.nullid]
1060 1066 if heads is None:
1061 1067 heads = self.heads()
1062 1068
1063 1069 common = [self.rev(n) for n in common]
1064 1070 heads = [self.rev(n) for n in heads]
1065 1071
1066 1072 # we want the ancestors, but inclusive
1067 1073 class lazyset:
1068 1074 def __init__(self, lazyvalues):
1069 1075 self.addedvalues = set()
1070 1076 self.lazyvalues = lazyvalues
1071 1077
1072 1078 def __contains__(self, value):
1073 1079 return value in self.addedvalues or value in self.lazyvalues
1074 1080
1075 1081 def __iter__(self):
1076 1082 added = self.addedvalues
1077 1083 for r in added:
1078 1084 yield r
1079 1085 for r in self.lazyvalues:
1080 1086 if not r in added:
1081 1087 yield r
1082 1088
1083 1089 def add(self, value):
1084 1090 self.addedvalues.add(value)
1085 1091
1086 1092 def update(self, values):
1087 1093 self.addedvalues.update(values)
1088 1094
1089 1095 has = lazyset(self.ancestors(common))
1090 1096 has.add(nullrev)
1091 1097 has.update(common)
1092 1098
1093 1099 # take all ancestors from heads that aren't in has
1094 1100 missing = set()
1095 1101 visit = collections.deque(r for r in heads if r not in has)
1096 1102 while visit:
1097 1103 r = visit.popleft()
1098 1104 if r in missing:
1099 1105 continue
1100 1106 else:
1101 1107 missing.add(r)
1102 1108 for p in self.parentrevs(r):
1103 1109 if p not in has:
1104 1110 visit.append(p)
1105 1111 missing = list(missing)
1106 1112 missing.sort()
1107 1113 return has, [self.node(miss) for miss in missing]
1108 1114
1109 1115 def incrementalmissingrevs(self, common=None):
1110 1116 """Return an object that can be used to incrementally compute the
1111 1117 revision numbers of the ancestors of arbitrary sets that are not
1112 1118 ancestors of common. This is an ancestor.incrementalmissingancestors
1113 1119 object.
1114 1120
1115 1121 'common' is a list of revision numbers. If common is not supplied, uses
1116 1122 nullrev.
1117 1123 """
1118 1124 if common is None:
1119 1125 common = [nullrev]
1120 1126
1121 1127 if rustancestor is not None and self.index.rust_ext_compat:
1122 1128 return rustancestor.MissingAncestors(self.index, common)
1123 1129 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1124 1130
1125 1131 def findmissingrevs(self, common=None, heads=None):
1126 1132 """Return the revision numbers of the ancestors of heads that
1127 1133 are not ancestors of common.
1128 1134
1129 1135 More specifically, return a list of revision numbers corresponding to
1130 1136 nodes N such that every N satisfies the following constraints:
1131 1137
1132 1138 1. N is an ancestor of some node in 'heads'
1133 1139 2. N is not an ancestor of any node in 'common'
1134 1140
1135 1141 The list is sorted by revision number, meaning it is
1136 1142 topologically sorted.
1137 1143
1138 1144 'heads' and 'common' are both lists of revision numbers. If heads is
1139 1145 not supplied, uses all of the revlog's heads. If common is not
1140 1146 supplied, uses nullid."""
1141 1147 if common is None:
1142 1148 common = [nullrev]
1143 1149 if heads is None:
1144 1150 heads = self.headrevs()
1145 1151
1146 1152 inc = self.incrementalmissingrevs(common=common)
1147 1153 return inc.missingancestors(heads)
1148 1154
1149 1155 def findmissing(self, common=None, heads=None):
1150 1156 """Return the ancestors of heads that are not ancestors of common.
1151 1157
1152 1158 More specifically, return a list of nodes N such that every N
1153 1159 satisfies the following constraints:
1154 1160
1155 1161 1. N is an ancestor of some node in 'heads'
1156 1162 2. N is not an ancestor of any node in 'common'
1157 1163
1158 1164 The list is sorted by revision number, meaning it is
1159 1165 topologically sorted.
1160 1166
1161 1167 'heads' and 'common' are both lists of node IDs. If heads is
1162 1168 not supplied, uses all of the revlog's heads. If common is not
1163 1169 supplied, uses nullid."""
1164 1170 if common is None:
1165 1171 common = [self.nullid]
1166 1172 if heads is None:
1167 1173 heads = self.heads()
1168 1174
1169 1175 common = [self.rev(n) for n in common]
1170 1176 heads = [self.rev(n) for n in heads]
1171 1177
1172 1178 inc = self.incrementalmissingrevs(common=common)
1173 1179 return [self.node(r) for r in inc.missingancestors(heads)]
1174 1180
1175 1181 def nodesbetween(self, roots=None, heads=None):
1176 1182 """Return a topological path from 'roots' to 'heads'.
1177 1183
1178 1184 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1179 1185 topologically sorted list of all nodes N that satisfy both of
1180 1186 these constraints:
1181 1187
1182 1188 1. N is a descendant of some node in 'roots'
1183 1189 2. N is an ancestor of some node in 'heads'
1184 1190
1185 1191 Every node is considered to be both a descendant and an ancestor
1186 1192 of itself, so every reachable node in 'roots' and 'heads' will be
1187 1193 included in 'nodes'.
1188 1194
1189 1195 'outroots' is the list of reachable nodes in 'roots', i.e., the
1190 1196 subset of 'roots' that is returned in 'nodes'. Likewise,
1191 1197 'outheads' is the subset of 'heads' that is also in 'nodes'.
1192 1198
1193 1199 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1194 1200 unspecified, uses nullid as the only root. If 'heads' is
1195 1201 unspecified, uses list of all of the revlog's heads."""
1196 1202 nonodes = ([], [], [])
1197 1203 if roots is not None:
1198 1204 roots = list(roots)
1199 1205 if not roots:
1200 1206 return nonodes
1201 1207 lowestrev = min([self.rev(n) for n in roots])
1202 1208 else:
1203 1209 roots = [self.nullid] # Everybody's a descendant of nullid
1204 1210 lowestrev = nullrev
1205 1211 if (lowestrev == nullrev) and (heads is None):
1206 1212 # We want _all_ the nodes!
1207 1213 return (
1208 1214 [self.node(r) for r in self],
1209 1215 [self.nullid],
1210 1216 list(self.heads()),
1211 1217 )
1212 1218 if heads is None:
1213 1219 # All nodes are ancestors, so the latest ancestor is the last
1214 1220 # node.
1215 1221 highestrev = len(self) - 1
1216 1222 # Set ancestors to None to signal that every node is an ancestor.
1217 1223 ancestors = None
1218 1224 # Set heads to an empty dictionary for later discovery of heads
1219 1225 heads = {}
1220 1226 else:
1221 1227 heads = list(heads)
1222 1228 if not heads:
1223 1229 return nonodes
1224 1230 ancestors = set()
1225 1231 # Turn heads into a dictionary so we can remove 'fake' heads.
1226 1232 # Also, later we will be using it to filter out the heads we can't
1227 1233 # find from roots.
1228 1234 heads = dict.fromkeys(heads, False)
1229 1235 # Start at the top and keep marking parents until we're done.
1230 1236 nodestotag = set(heads)
1231 1237 # Remember where the top was so we can use it as a limit later.
1232 1238 highestrev = max([self.rev(n) for n in nodestotag])
1233 1239 while nodestotag:
1234 1240 # grab a node to tag
1235 1241 n = nodestotag.pop()
1236 1242 # Never tag nullid
1237 1243 if n == self.nullid:
1238 1244 continue
1239 1245 # A node's revision number represents its place in a
1240 1246 # topologically sorted list of nodes.
1241 1247 r = self.rev(n)
1242 1248 if r >= lowestrev:
1243 1249 if n not in ancestors:
1244 1250 # If we are possibly a descendant of one of the roots
1245 1251 # and we haven't already been marked as an ancestor
1246 1252 ancestors.add(n) # Mark as ancestor
1247 1253 # Add non-nullid parents to list of nodes to tag.
1248 1254 nodestotag.update(
1249 1255 [p for p in self.parents(n) if p != self.nullid]
1250 1256 )
1251 1257 elif n in heads: # We've seen it before, is it a fake head?
1252 1258 # So it is, real heads should not be the ancestors of
1253 1259 # any other heads.
1254 1260 heads.pop(n)
1255 1261 if not ancestors:
1256 1262 return nonodes
1257 1263 # Now that we have our set of ancestors, we want to remove any
1258 1264 # roots that are not ancestors.
1259 1265
1260 1266 # If one of the roots was nullid, everything is included anyway.
1261 1267 if lowestrev > nullrev:
1262 1268 # But, since we weren't, let's recompute the lowest rev to not
1263 1269 # include roots that aren't ancestors.
1264 1270
1265 1271 # Filter out roots that aren't ancestors of heads
1266 1272 roots = [root for root in roots if root in ancestors]
1267 1273 # Recompute the lowest revision
1268 1274 if roots:
1269 1275 lowestrev = min([self.rev(root) for root in roots])
1270 1276 else:
1271 1277 # No more roots? Return empty list
1272 1278 return nonodes
1273 1279 else:
1274 1280 # We are descending from nullid, and don't need to care about
1275 1281 # any other roots.
1276 1282 lowestrev = nullrev
1277 1283 roots = [self.nullid]
1278 1284 # Transform our roots list into a set.
1279 1285 descendants = set(roots)
1280 1286 # Also, keep the original roots so we can filter out roots that aren't
1281 1287 # 'real' roots (i.e. are descended from other roots).
1282 1288 roots = descendants.copy()
1283 1289 # Our topologically sorted list of output nodes.
1284 1290 orderedout = []
1285 1291 # Don't start at nullid since we don't want nullid in our output list,
1286 1292 # and if nullid shows up in descendants, empty parents will look like
1287 1293 # they're descendants.
1288 1294 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1289 1295 n = self.node(r)
1290 1296 isdescendant = False
1291 1297 if lowestrev == nullrev: # Everybody is a descendant of nullid
1292 1298 isdescendant = True
1293 1299 elif n in descendants:
1294 1300 # n is already a descendant
1295 1301 isdescendant = True
1296 1302 # This check only needs to be done here because all the roots
1297 1303 # will start being marked is descendants before the loop.
1298 1304 if n in roots:
1299 1305 # If n was a root, check if it's a 'real' root.
1300 1306 p = tuple(self.parents(n))
1301 1307 # If any of its parents are descendants, it's not a root.
1302 1308 if (p[0] in descendants) or (p[1] in descendants):
1303 1309 roots.remove(n)
1304 1310 else:
1305 1311 p = tuple(self.parents(n))
1306 1312 # A node is a descendant if either of its parents are
1307 1313 # descendants. (We seeded the dependents list with the roots
1308 1314 # up there, remember?)
1309 1315 if (p[0] in descendants) or (p[1] in descendants):
1310 1316 descendants.add(n)
1311 1317 isdescendant = True
1312 1318 if isdescendant and ((ancestors is None) or (n in ancestors)):
1313 1319 # Only include nodes that are both descendants and ancestors.
1314 1320 orderedout.append(n)
1315 1321 if (ancestors is not None) and (n in heads):
1316 1322 # We're trying to figure out which heads are reachable
1317 1323 # from roots.
1318 1324 # Mark this head as having been reached
1319 1325 heads[n] = True
1320 1326 elif ancestors is None:
1321 1327 # Otherwise, we're trying to discover the heads.
1322 1328 # Assume this is a head because if it isn't, the next step
1323 1329 # will eventually remove it.
1324 1330 heads[n] = True
1325 1331 # But, obviously its parents aren't.
1326 1332 for p in self.parents(n):
1327 1333 heads.pop(p, None)
1328 1334 heads = [head for head, flag in heads.items() if flag]
1329 1335 roots = list(roots)
1330 1336 assert orderedout
1331 1337 assert roots
1332 1338 assert heads
1333 1339 return (orderedout, roots, heads)
1334 1340
1335 1341 def headrevs(self, revs=None):
1336 1342 if revs is None:
1337 1343 try:
1338 1344 return self.index.headrevs()
1339 1345 except AttributeError:
1340 1346 return self._headrevs()
1341 1347 if rustdagop is not None and self.index.rust_ext_compat:
1342 1348 return rustdagop.headrevs(self.index, revs)
1343 1349 return dagop.headrevs(revs, self._uncheckedparentrevs)
1344 1350
1345 1351 def computephases(self, roots):
1346 1352 return self.index.computephasesmapsets(roots)
1347 1353
1348 1354 def _headrevs(self):
1349 1355 count = len(self)
1350 1356 if not count:
1351 1357 return [nullrev]
1352 1358 # we won't iter over filtered rev so nobody is a head at start
1353 1359 ishead = [0] * (count + 1)
1354 1360 index = self.index
1355 1361 for r in self:
1356 1362 ishead[r] = 1 # I may be an head
1357 1363 e = index[r]
1358 1364 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1359 1365 return [r for r, val in enumerate(ishead) if val]
1360 1366
1361 1367 def heads(self, start=None, stop=None):
1362 1368 """return the list of all nodes that have no children
1363 1369
1364 1370 if start is specified, only heads that are descendants of
1365 1371 start will be returned
1366 1372 if stop is specified, it will consider all the revs from stop
1367 1373 as if they had no children
1368 1374 """
1369 1375 if start is None and stop is None:
1370 1376 if not len(self):
1371 1377 return [self.nullid]
1372 1378 return [self.node(r) for r in self.headrevs()]
1373 1379
1374 1380 if start is None:
1375 1381 start = nullrev
1376 1382 else:
1377 1383 start = self.rev(start)
1378 1384
1379 1385 stoprevs = {self.rev(n) for n in stop or []}
1380 1386
1381 1387 revs = dagop.headrevssubset(
1382 1388 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1383 1389 )
1384 1390
1385 1391 return [self.node(rev) for rev in revs]
1386 1392
1387 1393 def children(self, node):
1388 1394 """find the children of a given node"""
1389 1395 c = []
1390 1396 p = self.rev(node)
1391 1397 for r in self.revs(start=p + 1):
1392 1398 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1393 1399 if prevs:
1394 1400 for pr in prevs:
1395 1401 if pr == p:
1396 1402 c.append(self.node(r))
1397 1403 elif p == nullrev:
1398 1404 c.append(self.node(r))
1399 1405 return c
1400 1406
1401 1407 def commonancestorsheads(self, a, b):
1402 1408 """calculate all the heads of the common ancestors of nodes a and b"""
1403 1409 a, b = self.rev(a), self.rev(b)
1404 1410 ancs = self._commonancestorsheads(a, b)
1405 1411 return pycompat.maplist(self.node, ancs)
1406 1412
1407 1413 def _commonancestorsheads(self, *revs):
1408 1414 """calculate all the heads of the common ancestors of revs"""
1409 1415 try:
1410 1416 ancs = self.index.commonancestorsheads(*revs)
1411 1417 except (AttributeError, OverflowError): # C implementation failed
1412 1418 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1413 1419 return ancs
1414 1420
1415 1421 def isancestor(self, a, b):
1416 1422 """return True if node a is an ancestor of node b
1417 1423
1418 1424 A revision is considered an ancestor of itself."""
1419 1425 a, b = self.rev(a), self.rev(b)
1420 1426 return self.isancestorrev(a, b)
1421 1427
1422 1428 def isancestorrev(self, a, b):
1423 1429 """return True if revision a is an ancestor of revision b
1424 1430
1425 1431 A revision is considered an ancestor of itself.
1426 1432
1427 1433 The implementation of this is trivial but the use of
1428 1434 reachableroots is not."""
1429 1435 if a == nullrev:
1430 1436 return True
1431 1437 elif a == b:
1432 1438 return True
1433 1439 elif a > b:
1434 1440 return False
1435 1441 return bool(self.reachableroots(a, [b], [a], includepath=False))
1436 1442
1437 1443 def reachableroots(self, minroot, heads, roots, includepath=False):
1438 1444 """return (heads(::(<roots> and <roots>::<heads>)))
1439 1445
1440 1446 If includepath is True, return (<roots>::<heads>)."""
1441 1447 try:
1442 1448 return self.index.reachableroots2(
1443 1449 minroot, heads, roots, includepath
1444 1450 )
1445 1451 except AttributeError:
1446 1452 return dagop._reachablerootspure(
1447 1453 self.parentrevs, minroot, roots, heads, includepath
1448 1454 )
1449 1455
1450 1456 def ancestor(self, a, b):
1451 1457 """calculate the "best" common ancestor of nodes a and b"""
1452 1458
1453 1459 a, b = self.rev(a), self.rev(b)
1454 1460 try:
1455 1461 ancs = self.index.ancestors(a, b)
1456 1462 except (AttributeError, OverflowError):
1457 1463 ancs = ancestor.ancestors(self.parentrevs, a, b)
1458 1464 if ancs:
1459 1465 # choose a consistent winner when there's a tie
1460 1466 return min(map(self.node, ancs))
1461 1467 return self.nullid
1462 1468
1463 1469 def _match(self, id):
1464 1470 if isinstance(id, int):
1465 1471 # rev
1466 1472 return self.node(id)
1467 1473 if len(id) == self.nodeconstants.nodelen:
1468 1474 # possibly a binary node
1469 1475 # odds of a binary node being all hex in ASCII are 1 in 10**25
1470 1476 try:
1471 1477 node = id
1472 1478 self.rev(node) # quick search the index
1473 1479 return node
1474 1480 except error.LookupError:
1475 1481 pass # may be partial hex id
1476 1482 try:
1477 1483 # str(rev)
1478 1484 rev = int(id)
1479 1485 if b"%d" % rev != id:
1480 1486 raise ValueError
1481 1487 if rev < 0:
1482 1488 rev = len(self) + rev
1483 1489 if rev < 0 or rev >= len(self):
1484 1490 raise ValueError
1485 1491 return self.node(rev)
1486 1492 except (ValueError, OverflowError):
1487 1493 pass
1488 1494 if len(id) == 2 * self.nodeconstants.nodelen:
1489 1495 try:
1490 1496 # a full hex nodeid?
1491 1497 node = bin(id)
1492 1498 self.rev(node)
1493 1499 return node
1494 1500 except (binascii.Error, error.LookupError):
1495 1501 pass
1496 1502
1497 1503 def _partialmatch(self, id):
1498 1504 # we don't care wdirfilenodeids as they should be always full hash
1499 1505 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1500 1506 ambiguous = False
1501 1507 try:
1502 1508 partial = self.index.partialmatch(id)
1503 1509 if partial and self.hasnode(partial):
1504 1510 if maybewdir:
1505 1511 # single 'ff...' match in radix tree, ambiguous with wdir
1506 1512 ambiguous = True
1507 1513 else:
1508 1514 return partial
1509 1515 elif maybewdir:
1510 1516 # no 'ff...' match in radix tree, wdir identified
1511 1517 raise error.WdirUnsupported
1512 1518 else:
1513 1519 return None
1514 1520 except error.RevlogError:
1515 1521 # parsers.c radix tree lookup gave multiple matches
1516 1522 # fast path: for unfiltered changelog, radix tree is accurate
1517 1523 if not getattr(self, 'filteredrevs', None):
1518 1524 ambiguous = True
1519 1525 # fall through to slow path that filters hidden revisions
1520 1526 except (AttributeError, ValueError):
1521 1527 # we are pure python, or key is not hex
1522 1528 pass
1523 1529 if ambiguous:
1524 1530 raise error.AmbiguousPrefixLookupError(
1525 1531 id, self.display_id, _(b'ambiguous identifier')
1526 1532 )
1527 1533
1528 1534 if id in self._pcache:
1529 1535 return self._pcache[id]
1530 1536
1531 1537 if len(id) <= 40:
1532 1538 # hex(node)[:...]
1533 1539 l = len(id) // 2 * 2 # grab an even number of digits
1534 1540 try:
1535 1541 # we're dropping the last digit, so let's check that it's hex,
1536 1542 # to avoid the expensive computation below if it's not
1537 1543 if len(id) % 2 > 0:
1538 1544 if not (id[-1] in hexdigits):
1539 1545 return None
1540 1546 prefix = bin(id[:l])
1541 1547 except binascii.Error:
1542 1548 pass
1543 1549 else:
1544 1550 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1545 1551 nl = [
1546 1552 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1547 1553 ]
1548 1554 if self.nodeconstants.nullhex.startswith(id):
1549 1555 nl.append(self.nullid)
1550 1556 if len(nl) > 0:
1551 1557 if len(nl) == 1 and not maybewdir:
1552 1558 self._pcache[id] = nl[0]
1553 1559 return nl[0]
1554 1560 raise error.AmbiguousPrefixLookupError(
1555 1561 id, self.display_id, _(b'ambiguous identifier')
1556 1562 )
1557 1563 if maybewdir:
1558 1564 raise error.WdirUnsupported
1559 1565 return None
1560 1566
1561 1567 def lookup(self, id):
1562 1568 """locate a node based on:
1563 1569 - revision number or str(revision number)
1564 1570 - nodeid or subset of hex nodeid
1565 1571 """
1566 1572 n = self._match(id)
1567 1573 if n is not None:
1568 1574 return n
1569 1575 n = self._partialmatch(id)
1570 1576 if n:
1571 1577 return n
1572 1578
1573 1579 raise error.LookupError(id, self.display_id, _(b'no match found'))
1574 1580
1575 1581 def shortest(self, node, minlength=1):
1576 1582 """Find the shortest unambiguous prefix that matches node."""
1577 1583
1578 1584 def isvalid(prefix):
1579 1585 try:
1580 1586 matchednode = self._partialmatch(prefix)
1581 1587 except error.AmbiguousPrefixLookupError:
1582 1588 return False
1583 1589 except error.WdirUnsupported:
1584 1590 # single 'ff...' match
1585 1591 return True
1586 1592 if matchednode is None:
1587 1593 raise error.LookupError(node, self.display_id, _(b'no node'))
1588 1594 return True
1589 1595
1590 1596 def maybewdir(prefix):
1591 1597 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1592 1598
1593 1599 hexnode = hex(node)
1594 1600
1595 1601 def disambiguate(hexnode, minlength):
1596 1602 """Disambiguate against wdirid."""
1597 1603 for length in range(minlength, len(hexnode) + 1):
1598 1604 prefix = hexnode[:length]
1599 1605 if not maybewdir(prefix):
1600 1606 return prefix
1601 1607
1602 1608 if not getattr(self, 'filteredrevs', None):
1603 1609 try:
1604 1610 length = max(self.index.shortest(node), minlength)
1605 1611 return disambiguate(hexnode, length)
1606 1612 except error.RevlogError:
1607 1613 if node != self.nodeconstants.wdirid:
1608 1614 raise error.LookupError(
1609 1615 node, self.display_id, _(b'no node')
1610 1616 )
1611 1617 except AttributeError:
1612 1618 # Fall through to pure code
1613 1619 pass
1614 1620
1615 1621 if node == self.nodeconstants.wdirid:
1616 1622 for length in range(minlength, len(hexnode) + 1):
1617 1623 prefix = hexnode[:length]
1618 1624 if isvalid(prefix):
1619 1625 return prefix
1620 1626
1621 1627 for length in range(minlength, len(hexnode) + 1):
1622 1628 prefix = hexnode[:length]
1623 1629 if isvalid(prefix):
1624 1630 return disambiguate(hexnode, length)
1625 1631
1626 1632 def cmp(self, node, text):
1627 1633 """compare text with a given file revision
1628 1634
1629 1635 returns True if text is different than what is stored.
1630 1636 """
1631 1637 p1, p2 = self.parents(node)
1632 1638 return storageutil.hashrevisionsha1(text, p1, p2) != node
1633 1639
1634 1640 def _getsegmentforrevs(self, startrev, endrev, df=None):
1635 1641 """Obtain a segment of raw data corresponding to a range of revisions.
1636 1642
1637 1643 Accepts the start and end revisions and an optional already-open
1638 1644 file handle to be used for reading. If the file handle is read, its
1639 1645 seek position will not be preserved.
1640 1646
1641 1647 Requests for data may be satisfied by a cache.
1642 1648
1643 1649 Returns a 2-tuple of (offset, data) for the requested range of
1644 1650 revisions. Offset is the integer offset from the beginning of the
1645 1651 revlog and data is a str or buffer of the raw byte data.
1646 1652
1647 1653 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1648 1654 to determine where each revision's data begins and ends.
1649 1655 """
1650 1656 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1651 1657 # (functions are expensive).
1652 1658 index = self.index
1653 1659 istart = index[startrev]
1654 1660 start = int(istart[0] >> 16)
1655 1661 if startrev == endrev:
1656 1662 end = start + istart[1]
1657 1663 else:
1658 1664 iend = index[endrev]
1659 1665 end = int(iend[0] >> 16) + iend[1]
1660 1666
1661 1667 if self._inline:
1662 1668 start += (startrev + 1) * self.index.entry_size
1663 1669 end += (endrev + 1) * self.index.entry_size
1664 1670 length = end - start
1665 1671
1666 1672 return start, self._segmentfile.read_chunk(start, length, df)
1667 1673
1668 1674 def _chunk(self, rev, df=None):
1669 1675 """Obtain a single decompressed chunk for a revision.
1670 1676
1671 1677 Accepts an integer revision and an optional already-open file handle
1672 1678 to be used for reading. If used, the seek position of the file will not
1673 1679 be preserved.
1674 1680
1675 1681 Returns a str holding uncompressed data for the requested revision.
1676 1682 """
1677 1683 compression_mode = self.index[rev][10]
1678 1684 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1679 1685 if compression_mode == COMP_MODE_PLAIN:
1680 1686 return data
1681 1687 elif compression_mode == COMP_MODE_DEFAULT:
1682 1688 return self._decompressor(data)
1683 1689 elif compression_mode == COMP_MODE_INLINE:
1684 1690 return self.decompress(data)
1685 1691 else:
1686 1692 msg = b'unknown compression mode %d'
1687 1693 msg %= compression_mode
1688 1694 raise error.RevlogError(msg)
1689 1695
1690 1696 def _chunks(self, revs, df=None, targetsize=None):
1691 1697 """Obtain decompressed chunks for the specified revisions.
1692 1698
1693 1699 Accepts an iterable of numeric revisions that are assumed to be in
1694 1700 ascending order. Also accepts an optional already-open file handle
1695 1701 to be used for reading. If used, the seek position of the file will
1696 1702 not be preserved.
1697 1703
1698 1704 This function is similar to calling ``self._chunk()`` multiple times,
1699 1705 but is faster.
1700 1706
1701 1707 Returns a list with decompressed data for each requested revision.
1702 1708 """
1703 1709 if not revs:
1704 1710 return []
1705 1711 start = self.start
1706 1712 length = self.length
1707 1713 inline = self._inline
1708 1714 iosize = self.index.entry_size
1709 1715 buffer = util.buffer
1710 1716
1711 1717 l = []
1712 1718 ladd = l.append
1713 1719
1714 1720 if not self._withsparseread:
1715 1721 slicedchunks = (revs,)
1716 1722 else:
1717 1723 slicedchunks = deltautil.slicechunk(
1718 1724 self, revs, targetsize=targetsize
1719 1725 )
1720 1726
1721 1727 for revschunk in slicedchunks:
1722 1728 firstrev = revschunk[0]
1723 1729 # Skip trailing revisions with empty diff
1724 1730 for lastrev in revschunk[::-1]:
1725 1731 if length(lastrev) != 0:
1726 1732 break
1727 1733
1728 1734 try:
1729 1735 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1730 1736 except OverflowError:
1731 1737 # issue4215 - we can't cache a run of chunks greater than
1732 1738 # 2G on Windows
1733 1739 return [self._chunk(rev, df=df) for rev in revschunk]
1734 1740
1735 1741 decomp = self.decompress
1736 1742 # self._decompressor might be None, but will not be used in that case
1737 1743 def_decomp = self._decompressor
1738 1744 for rev in revschunk:
1739 1745 chunkstart = start(rev)
1740 1746 if inline:
1741 1747 chunkstart += (rev + 1) * iosize
1742 1748 chunklength = length(rev)
1743 1749 comp_mode = self.index[rev][10]
1744 1750 c = buffer(data, chunkstart - offset, chunklength)
1745 1751 if comp_mode == COMP_MODE_PLAIN:
1746 1752 ladd(c)
1747 1753 elif comp_mode == COMP_MODE_INLINE:
1748 1754 ladd(decomp(c))
1749 1755 elif comp_mode == COMP_MODE_DEFAULT:
1750 1756 ladd(def_decomp(c))
1751 1757 else:
1752 1758 msg = b'unknown compression mode %d'
1753 1759 msg %= comp_mode
1754 1760 raise error.RevlogError(msg)
1755 1761
1756 1762 return l
1757 1763
1758 1764 def deltaparent(self, rev):
1759 1765 """return deltaparent of the given revision"""
1760 1766 base = self.index[rev][3]
1761 1767 if base == rev:
1762 1768 return nullrev
1763 1769 elif self._generaldelta:
1764 1770 return base
1765 1771 else:
1766 1772 return rev - 1
1767 1773
1768 1774 def issnapshot(self, rev):
1769 1775 """tells whether rev is a snapshot"""
1770 1776 if not self._sparserevlog:
1771 1777 return self.deltaparent(rev) == nullrev
1772 1778 elif util.safehasattr(self.index, b'issnapshot'):
1773 1779 # directly assign the method to cache the testing and access
1774 1780 self.issnapshot = self.index.issnapshot
1775 1781 return self.issnapshot(rev)
1776 1782 if rev == nullrev:
1777 1783 return True
1778 1784 entry = self.index[rev]
1779 1785 base = entry[3]
1780 1786 if base == rev:
1781 1787 return True
1782 1788 if base == nullrev:
1783 1789 return True
1784 1790 p1 = entry[5]
1785 1791 while self.length(p1) == 0:
1786 1792 b = self.deltaparent(p1)
1787 1793 if b == p1:
1788 1794 break
1789 1795 p1 = b
1790 1796 p2 = entry[6]
1791 1797 while self.length(p2) == 0:
1792 1798 b = self.deltaparent(p2)
1793 1799 if b == p2:
1794 1800 break
1795 1801 p2 = b
1796 1802 if base == p1 or base == p2:
1797 1803 return False
1798 1804 return self.issnapshot(base)
1799 1805
1800 1806 def snapshotdepth(self, rev):
1801 1807 """number of snapshot in the chain before this one"""
1802 1808 if not self.issnapshot(rev):
1803 1809 raise error.ProgrammingError(b'revision %d not a snapshot')
1804 1810 return len(self._deltachain(rev)[0]) - 1
1805 1811
1806 1812 def revdiff(self, rev1, rev2):
1807 1813 """return or calculate a delta between two revisions
1808 1814
1809 1815 The delta calculated is in binary form and is intended to be written to
1810 1816 revlog data directly. So this function needs raw revision data.
1811 1817 """
1812 1818 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1813 1819 return bytes(self._chunk(rev2))
1814 1820
1815 1821 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1816 1822
1817 1823 def revision(self, nodeorrev, _df=None):
1818 1824 """return an uncompressed revision of a given node or revision
1819 1825 number.
1820 1826
1821 1827 _df - an existing file handle to read from. (internal-only)
1822 1828 """
1823 1829 return self._revisiondata(nodeorrev, _df)
1824 1830
1825 1831 def sidedata(self, nodeorrev, _df=None):
1826 1832 """a map of extra data related to the changeset but not part of the hash
1827 1833
1828 1834 This function currently return a dictionary. However, more advanced
1829 1835 mapping object will likely be used in the future for a more
1830 1836 efficient/lazy code.
1831 1837 """
1832 1838 # deal with <nodeorrev> argument type
1833 1839 if isinstance(nodeorrev, int):
1834 1840 rev = nodeorrev
1835 1841 else:
1836 1842 rev = self.rev(nodeorrev)
1837 1843 return self._sidedata(rev)
1838 1844
1839 1845 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1840 1846 # deal with <nodeorrev> argument type
1841 1847 if isinstance(nodeorrev, int):
1842 1848 rev = nodeorrev
1843 1849 node = self.node(rev)
1844 1850 else:
1845 1851 node = nodeorrev
1846 1852 rev = None
1847 1853
1848 1854 # fast path the special `nullid` rev
1849 1855 if node == self.nullid:
1850 1856 return b""
1851 1857
1852 1858 # ``rawtext`` is the text as stored inside the revlog. Might be the
1853 1859 # revision or might need to be processed to retrieve the revision.
1854 1860 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1855 1861
1856 1862 if raw and validated:
1857 1863 # if we don't want to process the raw text and that raw
1858 1864 # text is cached, we can exit early.
1859 1865 return rawtext
1860 1866 if rev is None:
1861 1867 rev = self.rev(node)
1862 1868 # the revlog's flag for this revision
1863 1869 # (usually alter its state or content)
1864 1870 flags = self.flags(rev)
1865 1871
1866 1872 if validated and flags == REVIDX_DEFAULT_FLAGS:
1867 1873 # no extra flags set, no flag processor runs, text = rawtext
1868 1874 return rawtext
1869 1875
1870 1876 if raw:
1871 1877 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1872 1878 text = rawtext
1873 1879 else:
1874 1880 r = flagutil.processflagsread(self, rawtext, flags)
1875 1881 text, validatehash = r
1876 1882 if validatehash:
1877 1883 self.checkhash(text, node, rev=rev)
1878 1884 if not validated:
1879 1885 self._revisioncache = (node, rev, rawtext)
1880 1886
1881 1887 return text
1882 1888
1883 1889 def _rawtext(self, node, rev, _df=None):
1884 1890 """return the possibly unvalidated rawtext for a revision
1885 1891
1886 1892 returns (rev, rawtext, validated)
1887 1893 """
1888 1894
1889 1895 # revision in the cache (could be useful to apply delta)
1890 1896 cachedrev = None
1891 1897 # An intermediate text to apply deltas to
1892 1898 basetext = None
1893 1899
1894 1900 # Check if we have the entry in cache
1895 1901 # The cache entry looks like (node, rev, rawtext)
1896 1902 if self._revisioncache:
1897 1903 if self._revisioncache[0] == node:
1898 1904 return (rev, self._revisioncache[2], True)
1899 1905 cachedrev = self._revisioncache[1]
1900 1906
1901 1907 if rev is None:
1902 1908 rev = self.rev(node)
1903 1909
1904 1910 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1905 1911 if stopped:
1906 1912 basetext = self._revisioncache[2]
1907 1913
1908 1914 # drop cache to save memory, the caller is expected to
1909 1915 # update self._revisioncache after validating the text
1910 1916 self._revisioncache = None
1911 1917
1912 1918 targetsize = None
1913 1919 rawsize = self.index[rev][2]
1914 1920 if 0 <= rawsize:
1915 1921 targetsize = 4 * rawsize
1916 1922
1917 1923 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1918 1924 if basetext is None:
1919 1925 basetext = bytes(bins[0])
1920 1926 bins = bins[1:]
1921 1927
1922 1928 rawtext = mdiff.patches(basetext, bins)
1923 1929 del basetext # let us have a chance to free memory early
1924 1930 return (rev, rawtext, False)
1925 1931
1926 1932 def _sidedata(self, rev):
1927 1933 """Return the sidedata for a given revision number."""
1928 1934 index_entry = self.index[rev]
1929 1935 sidedata_offset = index_entry[8]
1930 1936 sidedata_size = index_entry[9]
1931 1937
1932 1938 if self._inline:
1933 1939 sidedata_offset += self.index.entry_size * (1 + rev)
1934 1940 if sidedata_size == 0:
1935 1941 return {}
1936 1942
1937 1943 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1938 1944 filename = self._sidedatafile
1939 1945 end = self._docket.sidedata_end
1940 1946 offset = sidedata_offset
1941 1947 length = sidedata_size
1942 1948 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1943 1949 raise error.RevlogError(m)
1944 1950
1945 1951 comp_segment = self._segmentfile_sidedata.read_chunk(
1946 1952 sidedata_offset, sidedata_size
1947 1953 )
1948 1954
1949 1955 comp = self.index[rev][11]
1950 1956 if comp == COMP_MODE_PLAIN:
1951 1957 segment = comp_segment
1952 1958 elif comp == COMP_MODE_DEFAULT:
1953 1959 segment = self._decompressor(comp_segment)
1954 1960 elif comp == COMP_MODE_INLINE:
1955 1961 segment = self.decompress(comp_segment)
1956 1962 else:
1957 1963 msg = b'unknown compression mode %d'
1958 1964 msg %= comp
1959 1965 raise error.RevlogError(msg)
1960 1966
1961 1967 sidedata = sidedatautil.deserialize_sidedata(segment)
1962 1968 return sidedata
1963 1969
1964 1970 def rawdata(self, nodeorrev, _df=None):
1965 1971 """return an uncompressed raw data of a given node or revision number.
1966 1972
1967 1973 _df - an existing file handle to read from. (internal-only)
1968 1974 """
1969 1975 return self._revisiondata(nodeorrev, _df, raw=True)
1970 1976
1971 1977 def hash(self, text, p1, p2):
1972 1978 """Compute a node hash.
1973 1979
1974 1980 Available as a function so that subclasses can replace the hash
1975 1981 as needed.
1976 1982 """
1977 1983 return storageutil.hashrevisionsha1(text, p1, p2)
1978 1984
1979 1985 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1980 1986 """Check node hash integrity.
1981 1987
1982 1988 Available as a function so that subclasses can extend hash mismatch
1983 1989 behaviors as needed.
1984 1990 """
1985 1991 try:
1986 1992 if p1 is None and p2 is None:
1987 1993 p1, p2 = self.parents(node)
1988 1994 if node != self.hash(text, p1, p2):
1989 1995 # Clear the revision cache on hash failure. The revision cache
1990 1996 # only stores the raw revision and clearing the cache does have
1991 1997 # the side-effect that we won't have a cache hit when the raw
1992 1998 # revision data is accessed. But this case should be rare and
1993 1999 # it is extra work to teach the cache about the hash
1994 2000 # verification state.
1995 2001 if self._revisioncache and self._revisioncache[0] == node:
1996 2002 self._revisioncache = None
1997 2003
1998 2004 revornode = rev
1999 2005 if revornode is None:
2000 2006 revornode = templatefilters.short(hex(node))
2001 2007 raise error.RevlogError(
2002 2008 _(b"integrity check failed on %s:%s")
2003 2009 % (self.display_id, pycompat.bytestr(revornode))
2004 2010 )
2005 2011 except error.RevlogError:
2006 2012 if self._censorable and storageutil.iscensoredtext(text):
2007 2013 raise error.CensoredNodeError(self.display_id, node, text)
2008 2014 raise
2009 2015
2010 2016 def _enforceinlinesize(self, tr):
2011 2017 """Check if the revlog is too big for inline and convert if so.
2012 2018
2013 2019 This should be called after revisions are added to the revlog. If the
2014 2020 revlog has grown too large to be an inline revlog, it will convert it
2015 2021 to use multiple index and data files.
2016 2022 """
2017 2023 tiprev = len(self) - 1
2018 2024 total_size = self.start(tiprev) + self.length(tiprev)
2019 2025 if not self._inline or total_size < _maxinline:
2020 2026 return
2021 2027
2022 2028 troffset = tr.findoffset(self._indexfile)
2023 2029 if troffset is None:
2024 2030 raise error.RevlogError(
2025 2031 _(b"%s not found in the transaction") % self._indexfile
2026 2032 )
2027 2033 trindex = None
2028 2034 tr.add(self._datafile, 0)
2029 2035
2030 2036 existing_handles = False
2031 2037 if self._writinghandles is not None:
2032 2038 existing_handles = True
2033 2039 fp = self._writinghandles[0]
2034 2040 fp.flush()
2035 2041 fp.close()
2036 2042 # We can't use the cached file handle after close(). So prevent
2037 2043 # its usage.
2038 2044 self._writinghandles = None
2039 2045 self._segmentfile.writing_handle = None
2040 2046 # No need to deal with sidedata writing handle as it is only
2041 2047 # relevant with revlog-v2 which is never inline, not reaching
2042 2048 # this code
2043 2049
2044 2050 new_dfh = self._datafp(b'w+')
2045 2051 new_dfh.truncate(0) # drop any potentially existing data
2046 2052 try:
2047 2053 with self._indexfp() as read_ifh:
2048 2054 for r in self:
2049 2055 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2050 2056 if (
2051 2057 trindex is None
2052 2058 and troffset
2053 2059 <= self.start(r) + r * self.index.entry_size
2054 2060 ):
2055 2061 trindex = r
2056 2062 new_dfh.flush()
2057 2063
2058 2064 if trindex is None:
2059 2065 trindex = 0
2060 2066
2061 2067 with self.__index_new_fp() as fp:
2062 2068 self._format_flags &= ~FLAG_INLINE_DATA
2063 2069 self._inline = False
2064 2070 for i in self:
2065 2071 e = self.index.entry_binary(i)
2066 2072 if i == 0 and self._docket is None:
2067 2073 header = self._format_flags | self._format_version
2068 2074 header = self.index.pack_header(header)
2069 2075 e = header + e
2070 2076 fp.write(e)
2071 2077 if self._docket is not None:
2072 2078 self._docket.index_end = fp.tell()
2073 2079
2074 2080 # There is a small transactional race here. If the rename of
2075 2081 # the index fails, we should remove the datafile. It is more
2076 2082 # important to ensure that the data file is not truncated
2077 2083 # when the index is replaced as otherwise data is lost.
2078 2084 tr.replace(self._datafile, self.start(trindex))
2079 2085
2080 2086 # the temp file replace the real index when we exit the context
2081 2087 # manager
2082 2088
2083 2089 tr.replace(self._indexfile, trindex * self.index.entry_size)
2084 2090 nodemaputil.setup_persistent_nodemap(tr, self)
2085 2091 self._segmentfile = randomaccessfile.randomaccessfile(
2086 2092 self.opener,
2087 2093 self._datafile,
2088 2094 self._chunkcachesize,
2089 2095 )
2090 2096
2091 2097 if existing_handles:
2092 2098 # switched from inline to conventional reopen the index
2093 2099 ifh = self.__index_write_fp()
2094 2100 self._writinghandles = (ifh, new_dfh, None)
2095 2101 self._segmentfile.writing_handle = new_dfh
2096 2102 new_dfh = None
2097 2103 # No need to deal with sidedata writing handle as it is only
2098 2104 # relevant with revlog-v2 which is never inline, not reaching
2099 2105 # this code
2100 2106 finally:
2101 2107 if new_dfh is not None:
2102 2108 new_dfh.close()
2103 2109
2104 2110 def _nodeduplicatecallback(self, transaction, node):
2105 2111 """called when trying to add a node already stored."""
2106 2112
2107 2113 @contextlib.contextmanager
2108 2114 def reading(self):
2109 2115 """Context manager that keeps data and sidedata files open for reading"""
2110 2116 with self._segmentfile.reading():
2111 2117 with self._segmentfile_sidedata.reading():
2112 2118 yield
2113 2119
2114 2120 @contextlib.contextmanager
2115 2121 def _writing(self, transaction):
2116 2122 if self._trypending:
2117 2123 msg = b'try to write in a `trypending` revlog: %s'
2118 2124 msg %= self.display_id
2119 2125 raise error.ProgrammingError(msg)
2120 2126 if self._writinghandles is not None:
2121 2127 yield
2122 2128 else:
2123 2129 ifh = dfh = sdfh = None
2124 2130 try:
2125 2131 r = len(self)
2126 2132 # opening the data file.
2127 2133 dsize = 0
2128 2134 if r:
2129 2135 dsize = self.end(r - 1)
2130 2136 dfh = None
2131 2137 if not self._inline:
2132 2138 try:
2133 2139 dfh = self._datafp(b"r+")
2134 2140 if self._docket is None:
2135 2141 dfh.seek(0, os.SEEK_END)
2136 2142 else:
2137 2143 dfh.seek(self._docket.data_end, os.SEEK_SET)
2138 2144 except FileNotFoundError:
2139 2145 dfh = self._datafp(b"w+")
2140 2146 transaction.add(self._datafile, dsize)
2141 2147 if self._sidedatafile is not None:
2142 2148 # revlog-v2 does not inline, help Pytype
2143 2149 assert dfh is not None
2144 2150 try:
2145 2151 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2146 2152 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2147 2153 except FileNotFoundError:
2148 2154 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2149 2155 transaction.add(
2150 2156 self._sidedatafile, self._docket.sidedata_end
2151 2157 )
2152 2158
2153 2159 # opening the index file.
2154 2160 isize = r * self.index.entry_size
2155 2161 ifh = self.__index_write_fp()
2156 2162 if self._inline:
2157 2163 transaction.add(self._indexfile, dsize + isize)
2158 2164 else:
2159 2165 transaction.add(self._indexfile, isize)
2160 2166 # exposing all file handle for writing.
2161 2167 self._writinghandles = (ifh, dfh, sdfh)
2162 2168 self._segmentfile.writing_handle = ifh if self._inline else dfh
2163 2169 self._segmentfile_sidedata.writing_handle = sdfh
2164 2170 yield
2165 2171 if self._docket is not None:
2166 2172 self._write_docket(transaction)
2167 2173 finally:
2168 2174 self._writinghandles = None
2169 2175 self._segmentfile.writing_handle = None
2170 2176 self._segmentfile_sidedata.writing_handle = None
2171 2177 if dfh is not None:
2172 2178 dfh.close()
2173 2179 if sdfh is not None:
2174 2180 sdfh.close()
2175 2181 # closing the index file last to avoid exposing referent to
2176 2182 # potential unflushed data content.
2177 2183 if ifh is not None:
2178 2184 ifh.close()
2179 2185
2180 2186 def _write_docket(self, transaction):
2181 2187 """write the current docket on disk
2182 2188
2183 2189 Exist as a method to help changelog to implement transaction logic
2184 2190
2185 2191 We could also imagine using the same transaction logic for all revlog
2186 2192 since docket are cheap."""
2187 2193 self._docket.write(transaction)
2188 2194
2189 2195 def addrevision(
2190 2196 self,
2191 2197 text,
2192 2198 transaction,
2193 2199 link,
2194 2200 p1,
2195 2201 p2,
2196 2202 cachedelta=None,
2197 2203 node=None,
2198 2204 flags=REVIDX_DEFAULT_FLAGS,
2199 2205 deltacomputer=None,
2200 2206 sidedata=None,
2201 2207 ):
2202 2208 """add a revision to the log
2203 2209
2204 2210 text - the revision data to add
2205 2211 transaction - the transaction object used for rollback
2206 2212 link - the linkrev data to add
2207 2213 p1, p2 - the parent nodeids of the revision
2208 2214 cachedelta - an optional precomputed delta
2209 2215 node - nodeid of revision; typically node is not specified, and it is
2210 2216 computed by default as hash(text, p1, p2), however subclasses might
2211 2217 use different hashing method (and override checkhash() in such case)
2212 2218 flags - the known flags to set on the revision
2213 2219 deltacomputer - an optional deltacomputer instance shared between
2214 2220 multiple calls
2215 2221 """
2216 2222 if link == nullrev:
2217 2223 raise error.RevlogError(
2218 2224 _(b"attempted to add linkrev -1 to %s") % self.display_id
2219 2225 )
2220 2226
2221 2227 if sidedata is None:
2222 2228 sidedata = {}
2223 2229 elif sidedata and not self.hassidedata:
2224 2230 raise error.ProgrammingError(
2225 2231 _(b"trying to add sidedata to a revlog who don't support them")
2226 2232 )
2227 2233
2228 2234 if flags:
2229 2235 node = node or self.hash(text, p1, p2)
2230 2236
2231 2237 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2232 2238
2233 2239 # If the flag processor modifies the revision data, ignore any provided
2234 2240 # cachedelta.
2235 2241 if rawtext != text:
2236 2242 cachedelta = None
2237 2243
2238 2244 if len(rawtext) > _maxentrysize:
2239 2245 raise error.RevlogError(
2240 2246 _(
2241 2247 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2242 2248 )
2243 2249 % (self.display_id, len(rawtext))
2244 2250 )
2245 2251
2246 2252 node = node or self.hash(rawtext, p1, p2)
2247 2253 rev = self.index.get_rev(node)
2248 2254 if rev is not None:
2249 2255 return rev
2250 2256
2251 2257 if validatehash:
2252 2258 self.checkhash(rawtext, node, p1=p1, p2=p2)
2253 2259
2254 2260 return self.addrawrevision(
2255 2261 rawtext,
2256 2262 transaction,
2257 2263 link,
2258 2264 p1,
2259 2265 p2,
2260 2266 node,
2261 2267 flags,
2262 2268 cachedelta=cachedelta,
2263 2269 deltacomputer=deltacomputer,
2264 2270 sidedata=sidedata,
2265 2271 )
2266 2272
2267 2273 def addrawrevision(
2268 2274 self,
2269 2275 rawtext,
2270 2276 transaction,
2271 2277 link,
2272 2278 p1,
2273 2279 p2,
2274 2280 node,
2275 2281 flags,
2276 2282 cachedelta=None,
2277 2283 deltacomputer=None,
2278 2284 sidedata=None,
2279 2285 ):
2280 2286 """add a raw revision with known flags, node and parents
2281 2287 useful when reusing a revision not stored in this revlog (ex: received
2282 2288 over wire, or read from an external bundle).
2283 2289 """
2284 2290 with self._writing(transaction):
2285 2291 return self._addrevision(
2286 2292 node,
2287 2293 rawtext,
2288 2294 transaction,
2289 2295 link,
2290 2296 p1,
2291 2297 p2,
2292 2298 flags,
2293 2299 cachedelta,
2294 2300 deltacomputer=deltacomputer,
2295 2301 sidedata=sidedata,
2296 2302 )
2297 2303
2298 2304 def compress(self, data):
2299 2305 """Generate a possibly-compressed representation of data."""
2300 2306 if not data:
2301 2307 return b'', data
2302 2308
2303 2309 compressed = self._compressor.compress(data)
2304 2310
2305 2311 if compressed:
2306 2312 # The revlog compressor added the header in the returned data.
2307 2313 return b'', compressed
2308 2314
2309 2315 if data[0:1] == b'\0':
2310 2316 return b'', data
2311 2317 return b'u', data
2312 2318
2313 2319 def decompress(self, data):
2314 2320 """Decompress a revlog chunk.
2315 2321
2316 2322 The chunk is expected to begin with a header identifying the
2317 2323 format type so it can be routed to an appropriate decompressor.
2318 2324 """
2319 2325 if not data:
2320 2326 return data
2321 2327
2322 2328 # Revlogs are read much more frequently than they are written and many
2323 2329 # chunks only take microseconds to decompress, so performance is
2324 2330 # important here.
2325 2331 #
2326 2332 # We can make a few assumptions about revlogs:
2327 2333 #
2328 2334 # 1) the majority of chunks will be compressed (as opposed to inline
2329 2335 # raw data).
2330 2336 # 2) decompressing *any* data will likely by at least 10x slower than
2331 2337 # returning raw inline data.
2332 2338 # 3) we want to prioritize common and officially supported compression
2333 2339 # engines
2334 2340 #
2335 2341 # It follows that we want to optimize for "decompress compressed data
2336 2342 # when encoded with common and officially supported compression engines"
2337 2343 # case over "raw data" and "data encoded by less common or non-official
2338 2344 # compression engines." That is why we have the inline lookup first
2339 2345 # followed by the compengines lookup.
2340 2346 #
2341 2347 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2342 2348 # compressed chunks. And this matters for changelog and manifest reads.
2343 2349 t = data[0:1]
2344 2350
2345 2351 if t == b'x':
2346 2352 try:
2347 2353 return _zlibdecompress(data)
2348 2354 except zlib.error as e:
2349 2355 raise error.RevlogError(
2350 2356 _(b'revlog decompress error: %s')
2351 2357 % stringutil.forcebytestr(e)
2352 2358 )
2353 2359 # '\0' is more common than 'u' so it goes first.
2354 2360 elif t == b'\0':
2355 2361 return data
2356 2362 elif t == b'u':
2357 2363 return util.buffer(data, 1)
2358 2364
2359 2365 compressor = self._get_decompressor(t)
2360 2366
2361 2367 return compressor.decompress(data)
2362 2368
2363 2369 def _addrevision(
2364 2370 self,
2365 2371 node,
2366 2372 rawtext,
2367 2373 transaction,
2368 2374 link,
2369 2375 p1,
2370 2376 p2,
2371 2377 flags,
2372 2378 cachedelta,
2373 2379 alwayscache=False,
2374 2380 deltacomputer=None,
2375 2381 sidedata=None,
2376 2382 ):
2377 2383 """internal function to add revisions to the log
2378 2384
2379 2385 see addrevision for argument descriptions.
2380 2386
2381 2387 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2382 2388
2383 2389 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2384 2390 be used.
2385 2391
2386 2392 invariants:
2387 2393 - rawtext is optional (can be None); if not set, cachedelta must be set.
2388 2394 if both are set, they must correspond to each other.
2389 2395 """
2390 2396 if node == self.nullid:
2391 2397 raise error.RevlogError(
2392 2398 _(b"%s: attempt to add null revision") % self.display_id
2393 2399 )
2394 2400 if (
2395 2401 node == self.nodeconstants.wdirid
2396 2402 or node in self.nodeconstants.wdirfilenodeids
2397 2403 ):
2398 2404 raise error.RevlogError(
2399 2405 _(b"%s: attempt to add wdir revision") % self.display_id
2400 2406 )
2401 2407 if self._writinghandles is None:
2402 2408 msg = b'adding revision outside `revlog._writing` context'
2403 2409 raise error.ProgrammingError(msg)
2404 2410
2405 2411 if self._inline:
2406 2412 fh = self._writinghandles[0]
2407 2413 else:
2408 2414 fh = self._writinghandles[1]
2409 2415
2410 2416 btext = [rawtext]
2411 2417
2412 2418 curr = len(self)
2413 2419 prev = curr - 1
2414 2420
2415 2421 offset = self._get_data_offset(prev)
2416 2422
2417 2423 if self._concurrencychecker:
2418 2424 ifh, dfh, sdfh = self._writinghandles
2419 2425 # XXX no checking for the sidedata file
2420 2426 if self._inline:
2421 2427 # offset is "as if" it were in the .d file, so we need to add on
2422 2428 # the size of the entry metadata.
2423 2429 self._concurrencychecker(
2424 2430 ifh, self._indexfile, offset + curr * self.index.entry_size
2425 2431 )
2426 2432 else:
2427 2433 # Entries in the .i are a consistent size.
2428 2434 self._concurrencychecker(
2429 2435 ifh, self._indexfile, curr * self.index.entry_size
2430 2436 )
2431 2437 self._concurrencychecker(dfh, self._datafile, offset)
2432 2438
2433 2439 p1r, p2r = self.rev(p1), self.rev(p2)
2434 2440
2435 2441 # full versions are inserted when the needed deltas
2436 2442 # become comparable to the uncompressed text
2437 2443 if rawtext is None:
2438 2444 # need rawtext size, before changed by flag processors, which is
2439 2445 # the non-raw size. use revlog explicitly to avoid filelog's extra
2440 2446 # logic that might remove metadata size.
2441 2447 textlen = mdiff.patchedsize(
2442 2448 revlog.size(self, cachedelta[0]), cachedelta[1]
2443 2449 )
2444 2450 else:
2445 2451 textlen = len(rawtext)
2446 2452
2447 2453 if deltacomputer is None:
2448 2454 write_debug = None
2449 2455 if self._debug_delta:
2450 2456 write_debug = transaction._report
2451 2457 deltacomputer = deltautil.deltacomputer(
2452 2458 self, write_debug=write_debug
2453 2459 )
2454 2460
2455 2461 revinfo = revlogutils.revisioninfo(
2456 2462 node,
2457 2463 p1,
2458 2464 p2,
2459 2465 btext,
2460 2466 textlen,
2461 2467 cachedelta,
2462 2468 flags,
2463 2469 )
2464 2470
2465 2471 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2466 2472
2467 2473 compression_mode = COMP_MODE_INLINE
2468 2474 if self._docket is not None:
2469 2475 default_comp = self._docket.default_compression_header
2470 2476 r = deltautil.delta_compression(default_comp, deltainfo)
2471 2477 compression_mode, deltainfo = r
2472 2478
2473 2479 sidedata_compression_mode = COMP_MODE_INLINE
2474 2480 if sidedata and self.hassidedata:
2475 2481 sidedata_compression_mode = COMP_MODE_PLAIN
2476 2482 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2477 2483 sidedata_offset = self._docket.sidedata_end
2478 2484 h, comp_sidedata = self.compress(serialized_sidedata)
2479 2485 if (
2480 2486 h != b'u'
2481 2487 and comp_sidedata[0:1] != b'\0'
2482 2488 and len(comp_sidedata) < len(serialized_sidedata)
2483 2489 ):
2484 2490 assert not h
2485 2491 if (
2486 2492 comp_sidedata[0:1]
2487 2493 == self._docket.default_compression_header
2488 2494 ):
2489 2495 sidedata_compression_mode = COMP_MODE_DEFAULT
2490 2496 serialized_sidedata = comp_sidedata
2491 2497 else:
2492 2498 sidedata_compression_mode = COMP_MODE_INLINE
2493 2499 serialized_sidedata = comp_sidedata
2494 2500 else:
2495 2501 serialized_sidedata = b""
2496 2502 # Don't store the offset if the sidedata is empty, that way
2497 2503 # we can easily detect empty sidedata and they will be no different
2498 2504 # than ones we manually add.
2499 2505 sidedata_offset = 0
2500 2506
2501 2507 rank = RANK_UNKNOWN
2502 if self._format_version == CHANGELOGV2:
2508 if self._compute_rank:
2503 2509 if (p1r, p2r) == (nullrev, nullrev):
2504 2510 rank = 1
2505 2511 elif p1r != nullrev and p2r == nullrev:
2506 2512 rank = 1 + self.fast_rank(p1r)
2507 2513 elif p1r == nullrev and p2r != nullrev:
2508 2514 rank = 1 + self.fast_rank(p2r)
2509 2515 else: # merge node
2510 2516 if rustdagop is not None and self.index.rust_ext_compat:
2511 2517 rank = rustdagop.rank(self.index, p1r, p2r)
2512 2518 else:
2513 2519 pmin, pmax = sorted((p1r, p2r))
2514 2520 rank = 1 + self.fast_rank(pmax)
2515 2521 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2516 2522
2517 2523 e = revlogutils.entry(
2518 2524 flags=flags,
2519 2525 data_offset=offset,
2520 2526 data_compressed_length=deltainfo.deltalen,
2521 2527 data_uncompressed_length=textlen,
2522 2528 data_compression_mode=compression_mode,
2523 2529 data_delta_base=deltainfo.base,
2524 2530 link_rev=link,
2525 2531 parent_rev_1=p1r,
2526 2532 parent_rev_2=p2r,
2527 2533 node_id=node,
2528 2534 sidedata_offset=sidedata_offset,
2529 2535 sidedata_compressed_length=len(serialized_sidedata),
2530 2536 sidedata_compression_mode=sidedata_compression_mode,
2531 2537 rank=rank,
2532 2538 )
2533 2539
2534 2540 self.index.append(e)
2535 2541 entry = self.index.entry_binary(curr)
2536 2542 if curr == 0 and self._docket is None:
2537 2543 header = self._format_flags | self._format_version
2538 2544 header = self.index.pack_header(header)
2539 2545 entry = header + entry
2540 2546 self._writeentry(
2541 2547 transaction,
2542 2548 entry,
2543 2549 deltainfo.data,
2544 2550 link,
2545 2551 offset,
2546 2552 serialized_sidedata,
2547 2553 sidedata_offset,
2548 2554 )
2549 2555
2550 2556 rawtext = btext[0]
2551 2557
2552 2558 if alwayscache and rawtext is None:
2553 2559 rawtext = deltacomputer.buildtext(revinfo, fh)
2554 2560
2555 2561 if type(rawtext) == bytes: # only accept immutable objects
2556 2562 self._revisioncache = (node, curr, rawtext)
2557 2563 self._chainbasecache[curr] = deltainfo.chainbase
2558 2564 return curr
2559 2565
2560 2566 def _get_data_offset(self, prev):
2561 2567 """Returns the current offset in the (in-transaction) data file.
2562 2568 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2563 2569 file to store that information: since sidedata can be rewritten to the
2564 2570 end of the data file within a transaction, you can have cases where, for
2565 2571 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2566 2572 to `n - 1`'s sidedata being written after `n`'s data.
2567 2573
2568 2574 TODO cache this in a docket file before getting out of experimental."""
2569 2575 if self._docket is None:
2570 2576 return self.end(prev)
2571 2577 else:
2572 2578 return self._docket.data_end
2573 2579
2574 2580 def _writeentry(
2575 2581 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2576 2582 ):
2577 2583 # Files opened in a+ mode have inconsistent behavior on various
2578 2584 # platforms. Windows requires that a file positioning call be made
2579 2585 # when the file handle transitions between reads and writes. See
2580 2586 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2581 2587 # platforms, Python or the platform itself can be buggy. Some versions
2582 2588 # of Solaris have been observed to not append at the end of the file
2583 2589 # if the file was seeked to before the end. See issue4943 for more.
2584 2590 #
2585 2591 # We work around this issue by inserting a seek() before writing.
2586 2592 # Note: This is likely not necessary on Python 3. However, because
2587 2593 # the file handle is reused for reads and may be seeked there, we need
2588 2594 # to be careful before changing this.
2589 2595 if self._writinghandles is None:
2590 2596 msg = b'adding revision outside `revlog._writing` context'
2591 2597 raise error.ProgrammingError(msg)
2592 2598 ifh, dfh, sdfh = self._writinghandles
2593 2599 if self._docket is None:
2594 2600 ifh.seek(0, os.SEEK_END)
2595 2601 else:
2596 2602 ifh.seek(self._docket.index_end, os.SEEK_SET)
2597 2603 if dfh:
2598 2604 if self._docket is None:
2599 2605 dfh.seek(0, os.SEEK_END)
2600 2606 else:
2601 2607 dfh.seek(self._docket.data_end, os.SEEK_SET)
2602 2608 if sdfh:
2603 2609 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2604 2610
2605 2611 curr = len(self) - 1
2606 2612 if not self._inline:
2607 2613 transaction.add(self._datafile, offset)
2608 2614 if self._sidedatafile:
2609 2615 transaction.add(self._sidedatafile, sidedata_offset)
2610 2616 transaction.add(self._indexfile, curr * len(entry))
2611 2617 if data[0]:
2612 2618 dfh.write(data[0])
2613 2619 dfh.write(data[1])
2614 2620 if sidedata:
2615 2621 sdfh.write(sidedata)
2616 2622 ifh.write(entry)
2617 2623 else:
2618 2624 offset += curr * self.index.entry_size
2619 2625 transaction.add(self._indexfile, offset)
2620 2626 ifh.write(entry)
2621 2627 ifh.write(data[0])
2622 2628 ifh.write(data[1])
2623 2629 assert not sidedata
2624 2630 self._enforceinlinesize(transaction)
2625 2631 if self._docket is not None:
2626 2632 # revlog-v2 always has 3 writing handles, help Pytype
2627 2633 wh1 = self._writinghandles[0]
2628 2634 wh2 = self._writinghandles[1]
2629 2635 wh3 = self._writinghandles[2]
2630 2636 assert wh1 is not None
2631 2637 assert wh2 is not None
2632 2638 assert wh3 is not None
2633 2639 self._docket.index_end = wh1.tell()
2634 2640 self._docket.data_end = wh2.tell()
2635 2641 self._docket.sidedata_end = wh3.tell()
2636 2642
2637 2643 nodemaputil.setup_persistent_nodemap(transaction, self)
2638 2644
2639 2645 def addgroup(
2640 2646 self,
2641 2647 deltas,
2642 2648 linkmapper,
2643 2649 transaction,
2644 2650 alwayscache=False,
2645 2651 addrevisioncb=None,
2646 2652 duplicaterevisioncb=None,
2647 2653 debug_info=None,
2648 2654 ):
2649 2655 """
2650 2656 add a delta group
2651 2657
2652 2658 given a set of deltas, add them to the revision log. the
2653 2659 first delta is against its parent, which should be in our
2654 2660 log, the rest are against the previous delta.
2655 2661
2656 2662 If ``addrevisioncb`` is defined, it will be called with arguments of
2657 2663 this revlog and the node that was added.
2658 2664 """
2659 2665
2660 2666 if self._adding_group:
2661 2667 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2662 2668
2663 2669 self._adding_group = True
2664 2670 empty = True
2665 2671 try:
2666 2672 with self._writing(transaction):
2667 2673 write_debug = None
2668 2674 if self._debug_delta:
2669 2675 write_debug = transaction._report
2670 2676 deltacomputer = deltautil.deltacomputer(
2671 2677 self,
2672 2678 write_debug=write_debug,
2673 2679 debug_info=debug_info,
2674 2680 )
2675 2681 # loop through our set of deltas
2676 2682 for data in deltas:
2677 2683 (
2678 2684 node,
2679 2685 p1,
2680 2686 p2,
2681 2687 linknode,
2682 2688 deltabase,
2683 2689 delta,
2684 2690 flags,
2685 2691 sidedata,
2686 2692 ) = data
2687 2693 link = linkmapper(linknode)
2688 2694 flags = flags or REVIDX_DEFAULT_FLAGS
2689 2695
2690 2696 rev = self.index.get_rev(node)
2691 2697 if rev is not None:
2692 2698 # this can happen if two branches make the same change
2693 2699 self._nodeduplicatecallback(transaction, rev)
2694 2700 if duplicaterevisioncb:
2695 2701 duplicaterevisioncb(self, rev)
2696 2702 empty = False
2697 2703 continue
2698 2704
2699 2705 for p in (p1, p2):
2700 2706 if not self.index.has_node(p):
2701 2707 raise error.LookupError(
2702 2708 p, self.radix, _(b'unknown parent')
2703 2709 )
2704 2710
2705 2711 if not self.index.has_node(deltabase):
2706 2712 raise error.LookupError(
2707 2713 deltabase, self.display_id, _(b'unknown delta base')
2708 2714 )
2709 2715
2710 2716 baserev = self.rev(deltabase)
2711 2717
2712 2718 if baserev != nullrev and self.iscensored(baserev):
2713 2719 # if base is censored, delta must be full replacement in a
2714 2720 # single patch operation
2715 2721 hlen = struct.calcsize(b">lll")
2716 2722 oldlen = self.rawsize(baserev)
2717 2723 newlen = len(delta) - hlen
2718 2724 if delta[:hlen] != mdiff.replacediffheader(
2719 2725 oldlen, newlen
2720 2726 ):
2721 2727 raise error.CensoredBaseError(
2722 2728 self.display_id, self.node(baserev)
2723 2729 )
2724 2730
2725 2731 if not flags and self._peek_iscensored(baserev, delta):
2726 2732 flags |= REVIDX_ISCENSORED
2727 2733
2728 2734 # We assume consumers of addrevisioncb will want to retrieve
2729 2735 # the added revision, which will require a call to
2730 2736 # revision(). revision() will fast path if there is a cache
2731 2737 # hit. So, we tell _addrevision() to always cache in this case.
2732 2738 # We're only using addgroup() in the context of changegroup
2733 2739 # generation so the revision data can always be handled as raw
2734 2740 # by the flagprocessor.
2735 2741 rev = self._addrevision(
2736 2742 node,
2737 2743 None,
2738 2744 transaction,
2739 2745 link,
2740 2746 p1,
2741 2747 p2,
2742 2748 flags,
2743 2749 (baserev, delta),
2744 2750 alwayscache=alwayscache,
2745 2751 deltacomputer=deltacomputer,
2746 2752 sidedata=sidedata,
2747 2753 )
2748 2754
2749 2755 if addrevisioncb:
2750 2756 addrevisioncb(self, rev)
2751 2757 empty = False
2752 2758 finally:
2753 2759 self._adding_group = False
2754 2760 return not empty
2755 2761
2756 2762 def iscensored(self, rev):
2757 2763 """Check if a file revision is censored."""
2758 2764 if not self._censorable:
2759 2765 return False
2760 2766
2761 2767 return self.flags(rev) & REVIDX_ISCENSORED
2762 2768
2763 2769 def _peek_iscensored(self, baserev, delta):
2764 2770 """Quickly check if a delta produces a censored revision."""
2765 2771 if not self._censorable:
2766 2772 return False
2767 2773
2768 2774 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2769 2775
2770 2776 def getstrippoint(self, minlink):
2771 2777 """find the minimum rev that must be stripped to strip the linkrev
2772 2778
2773 2779 Returns a tuple containing the minimum rev and a set of all revs that
2774 2780 have linkrevs that will be broken by this strip.
2775 2781 """
2776 2782 return storageutil.resolvestripinfo(
2777 2783 minlink,
2778 2784 len(self) - 1,
2779 2785 self.headrevs(),
2780 2786 self.linkrev,
2781 2787 self.parentrevs,
2782 2788 )
2783 2789
2784 2790 def strip(self, minlink, transaction):
2785 2791 """truncate the revlog on the first revision with a linkrev >= minlink
2786 2792
2787 2793 This function is called when we're stripping revision minlink and
2788 2794 its descendants from the repository.
2789 2795
2790 2796 We have to remove all revisions with linkrev >= minlink, because
2791 2797 the equivalent changelog revisions will be renumbered after the
2792 2798 strip.
2793 2799
2794 2800 So we truncate the revlog on the first of these revisions, and
2795 2801 trust that the caller has saved the revisions that shouldn't be
2796 2802 removed and that it'll re-add them after this truncation.
2797 2803 """
2798 2804 if len(self) == 0:
2799 2805 return
2800 2806
2801 2807 rev, _ = self.getstrippoint(minlink)
2802 2808 if rev == len(self):
2803 2809 return
2804 2810
2805 2811 # first truncate the files on disk
2806 2812 data_end = self.start(rev)
2807 2813 if not self._inline:
2808 2814 transaction.add(self._datafile, data_end)
2809 2815 end = rev * self.index.entry_size
2810 2816 else:
2811 2817 end = data_end + (rev * self.index.entry_size)
2812 2818
2813 2819 if self._sidedatafile:
2814 2820 sidedata_end = self.sidedata_cut_off(rev)
2815 2821 transaction.add(self._sidedatafile, sidedata_end)
2816 2822
2817 2823 transaction.add(self._indexfile, end)
2818 2824 if self._docket is not None:
2819 2825 # XXX we could, leverage the docket while stripping. However it is
2820 2826 # not powerfull enough at the time of this comment
2821 2827 self._docket.index_end = end
2822 2828 self._docket.data_end = data_end
2823 2829 self._docket.sidedata_end = sidedata_end
2824 2830 self._docket.write(transaction, stripping=True)
2825 2831
2826 2832 # then reset internal state in memory to forget those revisions
2827 2833 self._revisioncache = None
2828 2834 self._chaininfocache = util.lrucachedict(500)
2829 2835 self._segmentfile.clear_cache()
2830 2836 self._segmentfile_sidedata.clear_cache()
2831 2837
2832 2838 del self.index[rev:-1]
2833 2839
2834 2840 def checksize(self):
2835 2841 """Check size of index and data files
2836 2842
2837 2843 return a (dd, di) tuple.
2838 2844 - dd: extra bytes for the "data" file
2839 2845 - di: extra bytes for the "index" file
2840 2846
2841 2847 A healthy revlog will return (0, 0).
2842 2848 """
2843 2849 expected = 0
2844 2850 if len(self):
2845 2851 expected = max(0, self.end(len(self) - 1))
2846 2852
2847 2853 try:
2848 2854 with self._datafp() as f:
2849 2855 f.seek(0, io.SEEK_END)
2850 2856 actual = f.tell()
2851 2857 dd = actual - expected
2852 2858 except FileNotFoundError:
2853 2859 dd = 0
2854 2860
2855 2861 try:
2856 2862 f = self.opener(self._indexfile)
2857 2863 f.seek(0, io.SEEK_END)
2858 2864 actual = f.tell()
2859 2865 f.close()
2860 2866 s = self.index.entry_size
2861 2867 i = max(0, actual // s)
2862 2868 di = actual - (i * s)
2863 2869 if self._inline:
2864 2870 databytes = 0
2865 2871 for r in self:
2866 2872 databytes += max(0, self.length(r))
2867 2873 dd = 0
2868 2874 di = actual - len(self) * s - databytes
2869 2875 except FileNotFoundError:
2870 2876 di = 0
2871 2877
2872 2878 return (dd, di)
2873 2879
2874 2880 def files(self):
2875 2881 res = [self._indexfile]
2876 2882 if self._docket_file is None:
2877 2883 if not self._inline:
2878 2884 res.append(self._datafile)
2879 2885 else:
2880 2886 res.append(self._docket_file)
2881 2887 res.extend(self._docket.old_index_filepaths(include_empty=False))
2882 2888 if self._docket.data_end:
2883 2889 res.append(self._datafile)
2884 2890 res.extend(self._docket.old_data_filepaths(include_empty=False))
2885 2891 if self._docket.sidedata_end:
2886 2892 res.append(self._sidedatafile)
2887 2893 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2888 2894 return res
2889 2895
2890 2896 def emitrevisions(
2891 2897 self,
2892 2898 nodes,
2893 2899 nodesorder=None,
2894 2900 revisiondata=False,
2895 2901 assumehaveparentrevisions=False,
2896 2902 deltamode=repository.CG_DELTAMODE_STD,
2897 2903 sidedata_helpers=None,
2898 2904 debug_info=None,
2899 2905 ):
2900 2906 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2901 2907 raise error.ProgrammingError(
2902 2908 b'unhandled value for nodesorder: %s' % nodesorder
2903 2909 )
2904 2910
2905 2911 if nodesorder is None and not self._generaldelta:
2906 2912 nodesorder = b'storage'
2907 2913
2908 2914 if (
2909 2915 not self._storedeltachains
2910 2916 and deltamode != repository.CG_DELTAMODE_PREV
2911 2917 ):
2912 2918 deltamode = repository.CG_DELTAMODE_FULL
2913 2919
2914 2920 return storageutil.emitrevisions(
2915 2921 self,
2916 2922 nodes,
2917 2923 nodesorder,
2918 2924 revlogrevisiondelta,
2919 2925 deltaparentfn=self.deltaparent,
2920 2926 candeltafn=self.candelta,
2921 2927 rawsizefn=self.rawsize,
2922 2928 revdifffn=self.revdiff,
2923 2929 flagsfn=self.flags,
2924 2930 deltamode=deltamode,
2925 2931 revisiondata=revisiondata,
2926 2932 assumehaveparentrevisions=assumehaveparentrevisions,
2927 2933 sidedata_helpers=sidedata_helpers,
2928 2934 debug_info=debug_info,
2929 2935 )
2930 2936
2931 2937 DELTAREUSEALWAYS = b'always'
2932 2938 DELTAREUSESAMEREVS = b'samerevs'
2933 2939 DELTAREUSENEVER = b'never'
2934 2940
2935 2941 DELTAREUSEFULLADD = b'fulladd'
2936 2942
2937 2943 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2938 2944
2939 2945 def clone(
2940 2946 self,
2941 2947 tr,
2942 2948 destrevlog,
2943 2949 addrevisioncb=None,
2944 2950 deltareuse=DELTAREUSESAMEREVS,
2945 2951 forcedeltabothparents=None,
2946 2952 sidedata_helpers=None,
2947 2953 ):
2948 2954 """Copy this revlog to another, possibly with format changes.
2949 2955
2950 2956 The destination revlog will contain the same revisions and nodes.
2951 2957 However, it may not be bit-for-bit identical due to e.g. delta encoding
2952 2958 differences.
2953 2959
2954 2960 The ``deltareuse`` argument control how deltas from the existing revlog
2955 2961 are preserved in the destination revlog. The argument can have the
2956 2962 following values:
2957 2963
2958 2964 DELTAREUSEALWAYS
2959 2965 Deltas will always be reused (if possible), even if the destination
2960 2966 revlog would not select the same revisions for the delta. This is the
2961 2967 fastest mode of operation.
2962 2968 DELTAREUSESAMEREVS
2963 2969 Deltas will be reused if the destination revlog would pick the same
2964 2970 revisions for the delta. This mode strikes a balance between speed
2965 2971 and optimization.
2966 2972 DELTAREUSENEVER
2967 2973 Deltas will never be reused. This is the slowest mode of execution.
2968 2974 This mode can be used to recompute deltas (e.g. if the diff/delta
2969 2975 algorithm changes).
2970 2976 DELTAREUSEFULLADD
2971 2977 Revision will be re-added as if their were new content. This is
2972 2978 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2973 2979 eg: large file detection and handling.
2974 2980
2975 2981 Delta computation can be slow, so the choice of delta reuse policy can
2976 2982 significantly affect run time.
2977 2983
2978 2984 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2979 2985 two extremes. Deltas will be reused if they are appropriate. But if the
2980 2986 delta could choose a better revision, it will do so. This means if you
2981 2987 are converting a non-generaldelta revlog to a generaldelta revlog,
2982 2988 deltas will be recomputed if the delta's parent isn't a parent of the
2983 2989 revision.
2984 2990
2985 2991 In addition to the delta policy, the ``forcedeltabothparents``
2986 2992 argument controls whether to force compute deltas against both parents
2987 2993 for merges. By default, the current default is used.
2988 2994
2989 2995 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2990 2996 `sidedata_helpers`.
2991 2997 """
2992 2998 if deltareuse not in self.DELTAREUSEALL:
2993 2999 raise ValueError(
2994 3000 _(b'value for deltareuse invalid: %s') % deltareuse
2995 3001 )
2996 3002
2997 3003 if len(destrevlog):
2998 3004 raise ValueError(_(b'destination revlog is not empty'))
2999 3005
3000 3006 if getattr(self, 'filteredrevs', None):
3001 3007 raise ValueError(_(b'source revlog has filtered revisions'))
3002 3008 if getattr(destrevlog, 'filteredrevs', None):
3003 3009 raise ValueError(_(b'destination revlog has filtered revisions'))
3004 3010
3005 3011 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3006 3012 # if possible.
3007 3013 oldlazydelta = destrevlog._lazydelta
3008 3014 oldlazydeltabase = destrevlog._lazydeltabase
3009 3015 oldamd = destrevlog._deltabothparents
3010 3016
3011 3017 try:
3012 3018 if deltareuse == self.DELTAREUSEALWAYS:
3013 3019 destrevlog._lazydeltabase = True
3014 3020 destrevlog._lazydelta = True
3015 3021 elif deltareuse == self.DELTAREUSESAMEREVS:
3016 3022 destrevlog._lazydeltabase = False
3017 3023 destrevlog._lazydelta = True
3018 3024 elif deltareuse == self.DELTAREUSENEVER:
3019 3025 destrevlog._lazydeltabase = False
3020 3026 destrevlog._lazydelta = False
3021 3027
3022 3028 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3023 3029
3024 3030 self._clone(
3025 3031 tr,
3026 3032 destrevlog,
3027 3033 addrevisioncb,
3028 3034 deltareuse,
3029 3035 forcedeltabothparents,
3030 3036 sidedata_helpers,
3031 3037 )
3032 3038
3033 3039 finally:
3034 3040 destrevlog._lazydelta = oldlazydelta
3035 3041 destrevlog._lazydeltabase = oldlazydeltabase
3036 3042 destrevlog._deltabothparents = oldamd
3037 3043
3038 3044 def _clone(
3039 3045 self,
3040 3046 tr,
3041 3047 destrevlog,
3042 3048 addrevisioncb,
3043 3049 deltareuse,
3044 3050 forcedeltabothparents,
3045 3051 sidedata_helpers,
3046 3052 ):
3047 3053 """perform the core duty of `revlog.clone` after parameter processing"""
3048 3054 write_debug = None
3049 3055 if self._debug_delta:
3050 3056 write_debug = tr._report
3051 3057 deltacomputer = deltautil.deltacomputer(
3052 3058 destrevlog,
3053 3059 write_debug=write_debug,
3054 3060 )
3055 3061 index = self.index
3056 3062 for rev in self:
3057 3063 entry = index[rev]
3058 3064
3059 3065 # Some classes override linkrev to take filtered revs into
3060 3066 # account. Use raw entry from index.
3061 3067 flags = entry[0] & 0xFFFF
3062 3068 linkrev = entry[4]
3063 3069 p1 = index[entry[5]][7]
3064 3070 p2 = index[entry[6]][7]
3065 3071 node = entry[7]
3066 3072
3067 3073 # (Possibly) reuse the delta from the revlog if allowed and
3068 3074 # the revlog chunk is a delta.
3069 3075 cachedelta = None
3070 3076 rawtext = None
3071 3077 if deltareuse == self.DELTAREUSEFULLADD:
3072 3078 text = self._revisiondata(rev)
3073 3079 sidedata = self.sidedata(rev)
3074 3080
3075 3081 if sidedata_helpers is not None:
3076 3082 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3077 3083 self, sidedata_helpers, sidedata, rev
3078 3084 )
3079 3085 flags = flags | new_flags[0] & ~new_flags[1]
3080 3086
3081 3087 destrevlog.addrevision(
3082 3088 text,
3083 3089 tr,
3084 3090 linkrev,
3085 3091 p1,
3086 3092 p2,
3087 3093 cachedelta=cachedelta,
3088 3094 node=node,
3089 3095 flags=flags,
3090 3096 deltacomputer=deltacomputer,
3091 3097 sidedata=sidedata,
3092 3098 )
3093 3099 else:
3094 3100 if destrevlog._lazydelta:
3095 3101 dp = self.deltaparent(rev)
3096 3102 if dp != nullrev:
3097 3103 cachedelta = (dp, bytes(self._chunk(rev)))
3098 3104
3099 3105 sidedata = None
3100 3106 if not cachedelta:
3101 3107 rawtext = self._revisiondata(rev)
3102 3108 sidedata = self.sidedata(rev)
3103 3109 if sidedata is None:
3104 3110 sidedata = self.sidedata(rev)
3105 3111
3106 3112 if sidedata_helpers is not None:
3107 3113 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3108 3114 self, sidedata_helpers, sidedata, rev
3109 3115 )
3110 3116 flags = flags | new_flags[0] & ~new_flags[1]
3111 3117
3112 3118 with destrevlog._writing(tr):
3113 3119 destrevlog._addrevision(
3114 3120 node,
3115 3121 rawtext,
3116 3122 tr,
3117 3123 linkrev,
3118 3124 p1,
3119 3125 p2,
3120 3126 flags,
3121 3127 cachedelta,
3122 3128 deltacomputer=deltacomputer,
3123 3129 sidedata=sidedata,
3124 3130 )
3125 3131
3126 3132 if addrevisioncb:
3127 3133 addrevisioncb(self, rev, node)
3128 3134
3129 3135 def censorrevision(self, tr, censornode, tombstone=b''):
3130 3136 if self._format_version == REVLOGV0:
3131 3137 raise error.RevlogError(
3132 3138 _(b'cannot censor with version %d revlogs')
3133 3139 % self._format_version
3134 3140 )
3135 3141 elif self._format_version == REVLOGV1:
3136 3142 rewrite.v1_censor(self, tr, censornode, tombstone)
3137 3143 else:
3138 3144 rewrite.v2_censor(self, tr, censornode, tombstone)
3139 3145
3140 3146 def verifyintegrity(self, state):
3141 3147 """Verifies the integrity of the revlog.
3142 3148
3143 3149 Yields ``revlogproblem`` instances describing problems that are
3144 3150 found.
3145 3151 """
3146 3152 dd, di = self.checksize()
3147 3153 if dd:
3148 3154 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3149 3155 if di:
3150 3156 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3151 3157
3152 3158 version = self._format_version
3153 3159
3154 3160 # The verifier tells us what version revlog we should be.
3155 3161 if version != state[b'expectedversion']:
3156 3162 yield revlogproblem(
3157 3163 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3158 3164 % (self.display_id, version, state[b'expectedversion'])
3159 3165 )
3160 3166
3161 3167 state[b'skipread'] = set()
3162 3168 state[b'safe_renamed'] = set()
3163 3169
3164 3170 for rev in self:
3165 3171 node = self.node(rev)
3166 3172
3167 3173 # Verify contents. 4 cases to care about:
3168 3174 #
3169 3175 # common: the most common case
3170 3176 # rename: with a rename
3171 3177 # meta: file content starts with b'\1\n', the metadata
3172 3178 # header defined in filelog.py, but without a rename
3173 3179 # ext: content stored externally
3174 3180 #
3175 3181 # More formally, their differences are shown below:
3176 3182 #
3177 3183 # | common | rename | meta | ext
3178 3184 # -------------------------------------------------------
3179 3185 # flags() | 0 | 0 | 0 | not 0
3180 3186 # renamed() | False | True | False | ?
3181 3187 # rawtext[0:2]=='\1\n'| False | True | True | ?
3182 3188 #
3183 3189 # "rawtext" means the raw text stored in revlog data, which
3184 3190 # could be retrieved by "rawdata(rev)". "text"
3185 3191 # mentioned below is "revision(rev)".
3186 3192 #
3187 3193 # There are 3 different lengths stored physically:
3188 3194 # 1. L1: rawsize, stored in revlog index
3189 3195 # 2. L2: len(rawtext), stored in revlog data
3190 3196 # 3. L3: len(text), stored in revlog data if flags==0, or
3191 3197 # possibly somewhere else if flags!=0
3192 3198 #
3193 3199 # L1 should be equal to L2. L3 could be different from them.
3194 3200 # "text" may or may not affect commit hash depending on flag
3195 3201 # processors (see flagutil.addflagprocessor).
3196 3202 #
3197 3203 # | common | rename | meta | ext
3198 3204 # -------------------------------------------------
3199 3205 # rawsize() | L1 | L1 | L1 | L1
3200 3206 # size() | L1 | L2-LM | L1(*) | L1 (?)
3201 3207 # len(rawtext) | L2 | L2 | L2 | L2
3202 3208 # len(text) | L2 | L2 | L2 | L3
3203 3209 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3204 3210 #
3205 3211 # LM: length of metadata, depending on rawtext
3206 3212 # (*): not ideal, see comment in filelog.size
3207 3213 # (?): could be "- len(meta)" if the resolved content has
3208 3214 # rename metadata
3209 3215 #
3210 3216 # Checks needed to be done:
3211 3217 # 1. length check: L1 == L2, in all cases.
3212 3218 # 2. hash check: depending on flag processor, we may need to
3213 3219 # use either "text" (external), or "rawtext" (in revlog).
3214 3220
3215 3221 try:
3216 3222 skipflags = state.get(b'skipflags', 0)
3217 3223 if skipflags:
3218 3224 skipflags &= self.flags(rev)
3219 3225
3220 3226 _verify_revision(self, skipflags, state, node)
3221 3227
3222 3228 l1 = self.rawsize(rev)
3223 3229 l2 = len(self.rawdata(node))
3224 3230
3225 3231 if l1 != l2:
3226 3232 yield revlogproblem(
3227 3233 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3228 3234 node=node,
3229 3235 )
3230 3236
3231 3237 except error.CensoredNodeError:
3232 3238 if state[b'erroroncensored']:
3233 3239 yield revlogproblem(
3234 3240 error=_(b'censored file data'), node=node
3235 3241 )
3236 3242 state[b'skipread'].add(node)
3237 3243 except Exception as e:
3238 3244 yield revlogproblem(
3239 3245 error=_(b'unpacking %s: %s')
3240 3246 % (short(node), stringutil.forcebytestr(e)),
3241 3247 node=node,
3242 3248 )
3243 3249 state[b'skipread'].add(node)
3244 3250
3245 3251 def storageinfo(
3246 3252 self,
3247 3253 exclusivefiles=False,
3248 3254 sharedfiles=False,
3249 3255 revisionscount=False,
3250 3256 trackedsize=False,
3251 3257 storedsize=False,
3252 3258 ):
3253 3259 d = {}
3254 3260
3255 3261 if exclusivefiles:
3256 3262 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3257 3263 if not self._inline:
3258 3264 d[b'exclusivefiles'].append((self.opener, self._datafile))
3259 3265
3260 3266 if sharedfiles:
3261 3267 d[b'sharedfiles'] = []
3262 3268
3263 3269 if revisionscount:
3264 3270 d[b'revisionscount'] = len(self)
3265 3271
3266 3272 if trackedsize:
3267 3273 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3268 3274
3269 3275 if storedsize:
3270 3276 d[b'storedsize'] = sum(
3271 3277 self.opener.stat(path).st_size for path in self.files()
3272 3278 )
3273 3279
3274 3280 return d
3275 3281
3276 3282 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3277 3283 if not self.hassidedata:
3278 3284 return
3279 3285 # revlog formats with sidedata support does not support inline
3280 3286 assert not self._inline
3281 3287 if not helpers[1] and not helpers[2]:
3282 3288 # Nothing to generate or remove
3283 3289 return
3284 3290
3285 3291 new_entries = []
3286 3292 # append the new sidedata
3287 3293 with self._writing(transaction):
3288 3294 ifh, dfh, sdfh = self._writinghandles
3289 3295 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3290 3296
3291 3297 current_offset = sdfh.tell()
3292 3298 for rev in range(startrev, endrev + 1):
3293 3299 entry = self.index[rev]
3294 3300 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3295 3301 store=self,
3296 3302 sidedata_helpers=helpers,
3297 3303 sidedata={},
3298 3304 rev=rev,
3299 3305 )
3300 3306
3301 3307 serialized_sidedata = sidedatautil.serialize_sidedata(
3302 3308 new_sidedata
3303 3309 )
3304 3310
3305 3311 sidedata_compression_mode = COMP_MODE_INLINE
3306 3312 if serialized_sidedata and self.hassidedata:
3307 3313 sidedata_compression_mode = COMP_MODE_PLAIN
3308 3314 h, comp_sidedata = self.compress(serialized_sidedata)
3309 3315 if (
3310 3316 h != b'u'
3311 3317 and comp_sidedata[0] != b'\0'
3312 3318 and len(comp_sidedata) < len(serialized_sidedata)
3313 3319 ):
3314 3320 assert not h
3315 3321 if (
3316 3322 comp_sidedata[0]
3317 3323 == self._docket.default_compression_header
3318 3324 ):
3319 3325 sidedata_compression_mode = COMP_MODE_DEFAULT
3320 3326 serialized_sidedata = comp_sidedata
3321 3327 else:
3322 3328 sidedata_compression_mode = COMP_MODE_INLINE
3323 3329 serialized_sidedata = comp_sidedata
3324 3330 if entry[8] != 0 or entry[9] != 0:
3325 3331 # rewriting entries that already have sidedata is not
3326 3332 # supported yet, because it introduces garbage data in the
3327 3333 # revlog.
3328 3334 msg = b"rewriting existing sidedata is not supported yet"
3329 3335 raise error.Abort(msg)
3330 3336
3331 3337 # Apply (potential) flags to add and to remove after running
3332 3338 # the sidedata helpers
3333 3339 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3334 3340 entry_update = (
3335 3341 current_offset,
3336 3342 len(serialized_sidedata),
3337 3343 new_offset_flags,
3338 3344 sidedata_compression_mode,
3339 3345 )
3340 3346
3341 3347 # the sidedata computation might have move the file cursors around
3342 3348 sdfh.seek(current_offset, os.SEEK_SET)
3343 3349 sdfh.write(serialized_sidedata)
3344 3350 new_entries.append(entry_update)
3345 3351 current_offset += len(serialized_sidedata)
3346 3352 self._docket.sidedata_end = sdfh.tell()
3347 3353
3348 3354 # rewrite the new index entries
3349 3355 ifh.seek(startrev * self.index.entry_size)
3350 3356 for i, e in enumerate(new_entries):
3351 3357 rev = startrev + i
3352 3358 self.index.replace_sidedata_info(rev, *e)
3353 3359 packed = self.index.entry_binary(rev)
3354 3360 if rev == 0 and self._docket is None:
3355 3361 header = self._format_flags | self._format_version
3356 3362 header = self.index.pack_header(header)
3357 3363 packed = header + packed
3358 3364 ifh.write(packed)
General Comments 0
You need to be logged in to leave comments. Login now