##// END OF EJS Templates
revlogv2: track current index size in the docket...
marmoute -
r48012:6597255a default
parent child Browse files
Show More
@@ -1,2698 +1,2699 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 1153 # * properly hide uncommitted content to other process
1154 1154 # * expose transaction content hooks during pre-commit validation
1155 1155 # * include management of a persistent nodemap in the main docket
1156 1156 # * enforce a "no-truncate" policy for mmap safety
1157 1157 # - for censoring operation
1158 1158 # - for stripping operation
1159 1159 # - for rollback operation
1160 # * proper streaming (race free) of the docket file
1160 1161 # * store the data size in the docket to simplify sidedata rewrite.
1161 1162 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1162 1163 # * Exchange-wise, we will also need to do something more efficient than
1163 1164 # keeping references to the affected revlogs, especially memory-wise when
1164 1165 # rewriting sidedata.
1165 1166 # * sidedata compression
1166 1167 # * introduce a proper solution to reduce the number of filelog related files.
1167 1168 # * Improvement to consider
1168 1169 # - track compression mode in the index entris instead of the chunks
1169 1170 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 1171 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 1172 # - keep track of chain base or size (probably not that useful anymore)
1172 1173 # - store data and sidedata in different files
1173 1174 coreconfigitem(
1174 1175 b'experimental',
1175 1176 b'revlogv2',
1176 1177 default=None,
1177 1178 )
1178 1179 coreconfigitem(
1179 1180 b'experimental',
1180 1181 b'revisions.disambiguatewithin',
1181 1182 default=None,
1182 1183 )
1183 1184 coreconfigitem(
1184 1185 b'experimental',
1185 1186 b'rust.index',
1186 1187 default=False,
1187 1188 )
1188 1189 coreconfigitem(
1189 1190 b'experimental',
1190 1191 b'server.filesdata.recommended-batch-size',
1191 1192 default=50000,
1192 1193 )
1193 1194 coreconfigitem(
1194 1195 b'experimental',
1195 1196 b'server.manifestdata.recommended-batch-size',
1196 1197 default=100000,
1197 1198 )
1198 1199 coreconfigitem(
1199 1200 b'experimental',
1200 1201 b'server.stream-narrow-clones',
1201 1202 default=False,
1202 1203 )
1203 1204 coreconfigitem(
1204 1205 b'experimental',
1205 1206 b'single-head-per-branch',
1206 1207 default=False,
1207 1208 )
1208 1209 coreconfigitem(
1209 1210 b'experimental',
1210 1211 b'single-head-per-branch:account-closed-heads',
1211 1212 default=False,
1212 1213 )
1213 1214 coreconfigitem(
1214 1215 b'experimental',
1215 1216 b'single-head-per-branch:public-changes-only',
1216 1217 default=False,
1217 1218 )
1218 1219 coreconfigitem(
1219 1220 b'experimental',
1220 1221 b'sshserver.support-v2',
1221 1222 default=False,
1222 1223 )
1223 1224 coreconfigitem(
1224 1225 b'experimental',
1225 1226 b'sparse-read',
1226 1227 default=False,
1227 1228 )
1228 1229 coreconfigitem(
1229 1230 b'experimental',
1230 1231 b'sparse-read.density-threshold',
1231 1232 default=0.50,
1232 1233 )
1233 1234 coreconfigitem(
1234 1235 b'experimental',
1235 1236 b'sparse-read.min-gap-size',
1236 1237 default=b'65K',
1237 1238 )
1238 1239 coreconfigitem(
1239 1240 b'experimental',
1240 1241 b'treemanifest',
1241 1242 default=False,
1242 1243 )
1243 1244 coreconfigitem(
1244 1245 b'experimental',
1245 1246 b'update.atomic-file',
1246 1247 default=False,
1247 1248 )
1248 1249 coreconfigitem(
1249 1250 b'experimental',
1250 1251 b'sshpeer.advertise-v2',
1251 1252 default=False,
1252 1253 )
1253 1254 coreconfigitem(
1254 1255 b'experimental',
1255 1256 b'web.apiserver',
1256 1257 default=False,
1257 1258 )
1258 1259 coreconfigitem(
1259 1260 b'experimental',
1260 1261 b'web.api.http-v2',
1261 1262 default=False,
1262 1263 )
1263 1264 coreconfigitem(
1264 1265 b'experimental',
1265 1266 b'web.api.debugreflect',
1266 1267 default=False,
1267 1268 )
1268 1269 coreconfigitem(
1269 1270 b'experimental',
1270 1271 b'worker.wdir-get-thread-safe',
1271 1272 default=False,
1272 1273 )
1273 1274 coreconfigitem(
1274 1275 b'experimental',
1275 1276 b'worker.repository-upgrade',
1276 1277 default=False,
1277 1278 )
1278 1279 coreconfigitem(
1279 1280 b'experimental',
1280 1281 b'xdiff',
1281 1282 default=False,
1282 1283 )
1283 1284 coreconfigitem(
1284 1285 b'extensions',
1285 1286 b'.*',
1286 1287 default=None,
1287 1288 generic=True,
1288 1289 )
1289 1290 coreconfigitem(
1290 1291 b'extdata',
1291 1292 b'.*',
1292 1293 default=None,
1293 1294 generic=True,
1294 1295 )
1295 1296 coreconfigitem(
1296 1297 b'format',
1297 1298 b'bookmarks-in-store',
1298 1299 default=False,
1299 1300 )
1300 1301 coreconfigitem(
1301 1302 b'format',
1302 1303 b'chunkcachesize',
1303 1304 default=None,
1304 1305 experimental=True,
1305 1306 )
1306 1307 coreconfigitem(
1307 1308 b'format',
1308 1309 b'dotencode',
1309 1310 default=True,
1310 1311 )
1311 1312 coreconfigitem(
1312 1313 b'format',
1313 1314 b'generaldelta',
1314 1315 default=False,
1315 1316 experimental=True,
1316 1317 )
1317 1318 coreconfigitem(
1318 1319 b'format',
1319 1320 b'manifestcachesize',
1320 1321 default=None,
1321 1322 experimental=True,
1322 1323 )
1323 1324 coreconfigitem(
1324 1325 b'format',
1325 1326 b'maxchainlen',
1326 1327 default=dynamicdefault,
1327 1328 experimental=True,
1328 1329 )
1329 1330 coreconfigitem(
1330 1331 b'format',
1331 1332 b'obsstore-version',
1332 1333 default=None,
1333 1334 )
1334 1335 coreconfigitem(
1335 1336 b'format',
1336 1337 b'sparse-revlog',
1337 1338 default=True,
1338 1339 )
1339 1340 coreconfigitem(
1340 1341 b'format',
1341 1342 b'revlog-compression',
1342 1343 default=lambda: [b'zstd', b'zlib'],
1343 1344 alias=[(b'experimental', b'format.compression')],
1344 1345 )
1345 1346 coreconfigitem(
1346 1347 b'format',
1347 1348 b'usefncache',
1348 1349 default=True,
1349 1350 )
1350 1351 coreconfigitem(
1351 1352 b'format',
1352 1353 b'usegeneraldelta',
1353 1354 default=True,
1354 1355 )
1355 1356 coreconfigitem(
1356 1357 b'format',
1357 1358 b'usestore',
1358 1359 default=True,
1359 1360 )
1360 1361
1361 1362
1362 1363 def _persistent_nodemap_default():
1363 1364 """compute `use-persistent-nodemap` default value
1364 1365
1365 1366 The feature is disabled unless a fast implementation is available.
1366 1367 """
1367 1368 from . import policy
1368 1369
1369 1370 return policy.importrust('revlog') is not None
1370 1371
1371 1372
1372 1373 coreconfigitem(
1373 1374 b'format',
1374 1375 b'use-persistent-nodemap',
1375 1376 default=_persistent_nodemap_default,
1376 1377 )
1377 1378 coreconfigitem(
1378 1379 b'format',
1379 1380 b'exp-use-copies-side-data-changeset',
1380 1381 default=False,
1381 1382 experimental=True,
1382 1383 )
1383 1384 coreconfigitem(
1384 1385 b'format',
1385 1386 b'use-share-safe',
1386 1387 default=False,
1387 1388 )
1388 1389 coreconfigitem(
1389 1390 b'format',
1390 1391 b'internal-phase',
1391 1392 default=False,
1392 1393 experimental=True,
1393 1394 )
1394 1395 coreconfigitem(
1395 1396 b'fsmonitor',
1396 1397 b'warn_when_unused',
1397 1398 default=True,
1398 1399 )
1399 1400 coreconfigitem(
1400 1401 b'fsmonitor',
1401 1402 b'warn_update_file_count',
1402 1403 default=50000,
1403 1404 )
1404 1405 coreconfigitem(
1405 1406 b'fsmonitor',
1406 1407 b'warn_update_file_count_rust',
1407 1408 default=400000,
1408 1409 )
1409 1410 coreconfigitem(
1410 1411 b'help',
1411 1412 br'hidden-command\..*',
1412 1413 default=False,
1413 1414 generic=True,
1414 1415 )
1415 1416 coreconfigitem(
1416 1417 b'help',
1417 1418 br'hidden-topic\..*',
1418 1419 default=False,
1419 1420 generic=True,
1420 1421 )
1421 1422 coreconfigitem(
1422 1423 b'hooks',
1423 1424 b'[^:]*',
1424 1425 default=dynamicdefault,
1425 1426 generic=True,
1426 1427 )
1427 1428 coreconfigitem(
1428 1429 b'hooks',
1429 1430 b'.*:run-with-plain',
1430 1431 default=True,
1431 1432 generic=True,
1432 1433 )
1433 1434 coreconfigitem(
1434 1435 b'hgweb-paths',
1435 1436 b'.*',
1436 1437 default=list,
1437 1438 generic=True,
1438 1439 )
1439 1440 coreconfigitem(
1440 1441 b'hostfingerprints',
1441 1442 b'.*',
1442 1443 default=list,
1443 1444 generic=True,
1444 1445 )
1445 1446 coreconfigitem(
1446 1447 b'hostsecurity',
1447 1448 b'ciphers',
1448 1449 default=None,
1449 1450 )
1450 1451 coreconfigitem(
1451 1452 b'hostsecurity',
1452 1453 b'minimumprotocol',
1453 1454 default=dynamicdefault,
1454 1455 )
1455 1456 coreconfigitem(
1456 1457 b'hostsecurity',
1457 1458 b'.*:minimumprotocol$',
1458 1459 default=dynamicdefault,
1459 1460 generic=True,
1460 1461 )
1461 1462 coreconfigitem(
1462 1463 b'hostsecurity',
1463 1464 b'.*:ciphers$',
1464 1465 default=dynamicdefault,
1465 1466 generic=True,
1466 1467 )
1467 1468 coreconfigitem(
1468 1469 b'hostsecurity',
1469 1470 b'.*:fingerprints$',
1470 1471 default=list,
1471 1472 generic=True,
1472 1473 )
1473 1474 coreconfigitem(
1474 1475 b'hostsecurity',
1475 1476 b'.*:verifycertsfile$',
1476 1477 default=None,
1477 1478 generic=True,
1478 1479 )
1479 1480
1480 1481 coreconfigitem(
1481 1482 b'http_proxy',
1482 1483 b'always',
1483 1484 default=False,
1484 1485 )
1485 1486 coreconfigitem(
1486 1487 b'http_proxy',
1487 1488 b'host',
1488 1489 default=None,
1489 1490 )
1490 1491 coreconfigitem(
1491 1492 b'http_proxy',
1492 1493 b'no',
1493 1494 default=list,
1494 1495 )
1495 1496 coreconfigitem(
1496 1497 b'http_proxy',
1497 1498 b'passwd',
1498 1499 default=None,
1499 1500 )
1500 1501 coreconfigitem(
1501 1502 b'http_proxy',
1502 1503 b'user',
1503 1504 default=None,
1504 1505 )
1505 1506
1506 1507 coreconfigitem(
1507 1508 b'http',
1508 1509 b'timeout',
1509 1510 default=None,
1510 1511 )
1511 1512
1512 1513 coreconfigitem(
1513 1514 b'logtoprocess',
1514 1515 b'commandexception',
1515 1516 default=None,
1516 1517 )
1517 1518 coreconfigitem(
1518 1519 b'logtoprocess',
1519 1520 b'commandfinish',
1520 1521 default=None,
1521 1522 )
1522 1523 coreconfigitem(
1523 1524 b'logtoprocess',
1524 1525 b'command',
1525 1526 default=None,
1526 1527 )
1527 1528 coreconfigitem(
1528 1529 b'logtoprocess',
1529 1530 b'develwarn',
1530 1531 default=None,
1531 1532 )
1532 1533 coreconfigitem(
1533 1534 b'logtoprocess',
1534 1535 b'uiblocked',
1535 1536 default=None,
1536 1537 )
1537 1538 coreconfigitem(
1538 1539 b'merge',
1539 1540 b'checkunknown',
1540 1541 default=b'abort',
1541 1542 )
1542 1543 coreconfigitem(
1543 1544 b'merge',
1544 1545 b'checkignored',
1545 1546 default=b'abort',
1546 1547 )
1547 1548 coreconfigitem(
1548 1549 b'experimental',
1549 1550 b'merge.checkpathconflicts',
1550 1551 default=False,
1551 1552 )
1552 1553 coreconfigitem(
1553 1554 b'merge',
1554 1555 b'followcopies',
1555 1556 default=True,
1556 1557 )
1557 1558 coreconfigitem(
1558 1559 b'merge',
1559 1560 b'on-failure',
1560 1561 default=b'continue',
1561 1562 )
1562 1563 coreconfigitem(
1563 1564 b'merge',
1564 1565 b'preferancestor',
1565 1566 default=lambda: [b'*'],
1566 1567 experimental=True,
1567 1568 )
1568 1569 coreconfigitem(
1569 1570 b'merge',
1570 1571 b'strict-capability-check',
1571 1572 default=False,
1572 1573 )
1573 1574 coreconfigitem(
1574 1575 b'merge-tools',
1575 1576 b'.*',
1576 1577 default=None,
1577 1578 generic=True,
1578 1579 )
1579 1580 coreconfigitem(
1580 1581 b'merge-tools',
1581 1582 br'.*\.args$',
1582 1583 default=b"$local $base $other",
1583 1584 generic=True,
1584 1585 priority=-1,
1585 1586 )
1586 1587 coreconfigitem(
1587 1588 b'merge-tools',
1588 1589 br'.*\.binary$',
1589 1590 default=False,
1590 1591 generic=True,
1591 1592 priority=-1,
1592 1593 )
1593 1594 coreconfigitem(
1594 1595 b'merge-tools',
1595 1596 br'.*\.check$',
1596 1597 default=list,
1597 1598 generic=True,
1598 1599 priority=-1,
1599 1600 )
1600 1601 coreconfigitem(
1601 1602 b'merge-tools',
1602 1603 br'.*\.checkchanged$',
1603 1604 default=False,
1604 1605 generic=True,
1605 1606 priority=-1,
1606 1607 )
1607 1608 coreconfigitem(
1608 1609 b'merge-tools',
1609 1610 br'.*\.executable$',
1610 1611 default=dynamicdefault,
1611 1612 generic=True,
1612 1613 priority=-1,
1613 1614 )
1614 1615 coreconfigitem(
1615 1616 b'merge-tools',
1616 1617 br'.*\.fixeol$',
1617 1618 default=False,
1618 1619 generic=True,
1619 1620 priority=-1,
1620 1621 )
1621 1622 coreconfigitem(
1622 1623 b'merge-tools',
1623 1624 br'.*\.gui$',
1624 1625 default=False,
1625 1626 generic=True,
1626 1627 priority=-1,
1627 1628 )
1628 1629 coreconfigitem(
1629 1630 b'merge-tools',
1630 1631 br'.*\.mergemarkers$',
1631 1632 default=b'basic',
1632 1633 generic=True,
1633 1634 priority=-1,
1634 1635 )
1635 1636 coreconfigitem(
1636 1637 b'merge-tools',
1637 1638 br'.*\.mergemarkertemplate$',
1638 1639 default=dynamicdefault, # take from command-templates.mergemarker
1639 1640 generic=True,
1640 1641 priority=-1,
1641 1642 )
1642 1643 coreconfigitem(
1643 1644 b'merge-tools',
1644 1645 br'.*\.priority$',
1645 1646 default=0,
1646 1647 generic=True,
1647 1648 priority=-1,
1648 1649 )
1649 1650 coreconfigitem(
1650 1651 b'merge-tools',
1651 1652 br'.*\.premerge$',
1652 1653 default=dynamicdefault,
1653 1654 generic=True,
1654 1655 priority=-1,
1655 1656 )
1656 1657 coreconfigitem(
1657 1658 b'merge-tools',
1658 1659 br'.*\.symlink$',
1659 1660 default=False,
1660 1661 generic=True,
1661 1662 priority=-1,
1662 1663 )
1663 1664 coreconfigitem(
1664 1665 b'pager',
1665 1666 b'attend-.*',
1666 1667 default=dynamicdefault,
1667 1668 generic=True,
1668 1669 )
1669 1670 coreconfigitem(
1670 1671 b'pager',
1671 1672 b'ignore',
1672 1673 default=list,
1673 1674 )
1674 1675 coreconfigitem(
1675 1676 b'pager',
1676 1677 b'pager',
1677 1678 default=dynamicdefault,
1678 1679 )
1679 1680 coreconfigitem(
1680 1681 b'patch',
1681 1682 b'eol',
1682 1683 default=b'strict',
1683 1684 )
1684 1685 coreconfigitem(
1685 1686 b'patch',
1686 1687 b'fuzz',
1687 1688 default=2,
1688 1689 )
1689 1690 coreconfigitem(
1690 1691 b'paths',
1691 1692 b'default',
1692 1693 default=None,
1693 1694 )
1694 1695 coreconfigitem(
1695 1696 b'paths',
1696 1697 b'default-push',
1697 1698 default=None,
1698 1699 )
1699 1700 coreconfigitem(
1700 1701 b'paths',
1701 1702 b'.*',
1702 1703 default=None,
1703 1704 generic=True,
1704 1705 )
1705 1706 coreconfigitem(
1706 1707 b'phases',
1707 1708 b'checksubrepos',
1708 1709 default=b'follow',
1709 1710 )
1710 1711 coreconfigitem(
1711 1712 b'phases',
1712 1713 b'new-commit',
1713 1714 default=b'draft',
1714 1715 )
1715 1716 coreconfigitem(
1716 1717 b'phases',
1717 1718 b'publish',
1718 1719 default=True,
1719 1720 )
1720 1721 coreconfigitem(
1721 1722 b'profiling',
1722 1723 b'enabled',
1723 1724 default=False,
1724 1725 )
1725 1726 coreconfigitem(
1726 1727 b'profiling',
1727 1728 b'format',
1728 1729 default=b'text',
1729 1730 )
1730 1731 coreconfigitem(
1731 1732 b'profiling',
1732 1733 b'freq',
1733 1734 default=1000,
1734 1735 )
1735 1736 coreconfigitem(
1736 1737 b'profiling',
1737 1738 b'limit',
1738 1739 default=30,
1739 1740 )
1740 1741 coreconfigitem(
1741 1742 b'profiling',
1742 1743 b'nested',
1743 1744 default=0,
1744 1745 )
1745 1746 coreconfigitem(
1746 1747 b'profiling',
1747 1748 b'output',
1748 1749 default=None,
1749 1750 )
1750 1751 coreconfigitem(
1751 1752 b'profiling',
1752 1753 b'showmax',
1753 1754 default=0.999,
1754 1755 )
1755 1756 coreconfigitem(
1756 1757 b'profiling',
1757 1758 b'showmin',
1758 1759 default=dynamicdefault,
1759 1760 )
1760 1761 coreconfigitem(
1761 1762 b'profiling',
1762 1763 b'showtime',
1763 1764 default=True,
1764 1765 )
1765 1766 coreconfigitem(
1766 1767 b'profiling',
1767 1768 b'sort',
1768 1769 default=b'inlinetime',
1769 1770 )
1770 1771 coreconfigitem(
1771 1772 b'profiling',
1772 1773 b'statformat',
1773 1774 default=b'hotpath',
1774 1775 )
1775 1776 coreconfigitem(
1776 1777 b'profiling',
1777 1778 b'time-track',
1778 1779 default=dynamicdefault,
1779 1780 )
1780 1781 coreconfigitem(
1781 1782 b'profiling',
1782 1783 b'type',
1783 1784 default=b'stat',
1784 1785 )
1785 1786 coreconfigitem(
1786 1787 b'progress',
1787 1788 b'assume-tty',
1788 1789 default=False,
1789 1790 )
1790 1791 coreconfigitem(
1791 1792 b'progress',
1792 1793 b'changedelay',
1793 1794 default=1,
1794 1795 )
1795 1796 coreconfigitem(
1796 1797 b'progress',
1797 1798 b'clear-complete',
1798 1799 default=True,
1799 1800 )
1800 1801 coreconfigitem(
1801 1802 b'progress',
1802 1803 b'debug',
1803 1804 default=False,
1804 1805 )
1805 1806 coreconfigitem(
1806 1807 b'progress',
1807 1808 b'delay',
1808 1809 default=3,
1809 1810 )
1810 1811 coreconfigitem(
1811 1812 b'progress',
1812 1813 b'disable',
1813 1814 default=False,
1814 1815 )
1815 1816 coreconfigitem(
1816 1817 b'progress',
1817 1818 b'estimateinterval',
1818 1819 default=60.0,
1819 1820 )
1820 1821 coreconfigitem(
1821 1822 b'progress',
1822 1823 b'format',
1823 1824 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1824 1825 )
1825 1826 coreconfigitem(
1826 1827 b'progress',
1827 1828 b'refresh',
1828 1829 default=0.1,
1829 1830 )
1830 1831 coreconfigitem(
1831 1832 b'progress',
1832 1833 b'width',
1833 1834 default=dynamicdefault,
1834 1835 )
1835 1836 coreconfigitem(
1836 1837 b'pull',
1837 1838 b'confirm',
1838 1839 default=False,
1839 1840 )
1840 1841 coreconfigitem(
1841 1842 b'push',
1842 1843 b'pushvars.server',
1843 1844 default=False,
1844 1845 )
1845 1846 coreconfigitem(
1846 1847 b'rewrite',
1847 1848 b'backup-bundle',
1848 1849 default=True,
1849 1850 alias=[(b'ui', b'history-editing-backup')],
1850 1851 )
1851 1852 coreconfigitem(
1852 1853 b'rewrite',
1853 1854 b'update-timestamp',
1854 1855 default=False,
1855 1856 )
1856 1857 coreconfigitem(
1857 1858 b'rewrite',
1858 1859 b'empty-successor',
1859 1860 default=b'skip',
1860 1861 experimental=True,
1861 1862 )
1862 1863 coreconfigitem(
1863 1864 b'storage',
1864 1865 b'new-repo-backend',
1865 1866 default=b'revlogv1',
1866 1867 experimental=True,
1867 1868 )
1868 1869 coreconfigitem(
1869 1870 b'storage',
1870 1871 b'revlog.optimize-delta-parent-choice',
1871 1872 default=True,
1872 1873 alias=[(b'format', b'aggressivemergedeltas')],
1873 1874 )
1874 1875 # experimental as long as rust is experimental (or a C version is implemented)
1875 1876 coreconfigitem(
1876 1877 b'storage',
1877 1878 b'revlog.persistent-nodemap.mmap',
1878 1879 default=True,
1879 1880 )
1880 1881 # experimental as long as format.use-persistent-nodemap is.
1881 1882 coreconfigitem(
1882 1883 b'storage',
1883 1884 b'revlog.persistent-nodemap.slow-path',
1884 1885 default=b"abort",
1885 1886 )
1886 1887
1887 1888 coreconfigitem(
1888 1889 b'storage',
1889 1890 b'revlog.reuse-external-delta',
1890 1891 default=True,
1891 1892 )
1892 1893 coreconfigitem(
1893 1894 b'storage',
1894 1895 b'revlog.reuse-external-delta-parent',
1895 1896 default=None,
1896 1897 )
1897 1898 coreconfigitem(
1898 1899 b'storage',
1899 1900 b'revlog.zlib.level',
1900 1901 default=None,
1901 1902 )
1902 1903 coreconfigitem(
1903 1904 b'storage',
1904 1905 b'revlog.zstd.level',
1905 1906 default=None,
1906 1907 )
1907 1908 coreconfigitem(
1908 1909 b'server',
1909 1910 b'bookmarks-pushkey-compat',
1910 1911 default=True,
1911 1912 )
1912 1913 coreconfigitem(
1913 1914 b'server',
1914 1915 b'bundle1',
1915 1916 default=True,
1916 1917 )
1917 1918 coreconfigitem(
1918 1919 b'server',
1919 1920 b'bundle1gd',
1920 1921 default=None,
1921 1922 )
1922 1923 coreconfigitem(
1923 1924 b'server',
1924 1925 b'bundle1.pull',
1925 1926 default=None,
1926 1927 )
1927 1928 coreconfigitem(
1928 1929 b'server',
1929 1930 b'bundle1gd.pull',
1930 1931 default=None,
1931 1932 )
1932 1933 coreconfigitem(
1933 1934 b'server',
1934 1935 b'bundle1.push',
1935 1936 default=None,
1936 1937 )
1937 1938 coreconfigitem(
1938 1939 b'server',
1939 1940 b'bundle1gd.push',
1940 1941 default=None,
1941 1942 )
1942 1943 coreconfigitem(
1943 1944 b'server',
1944 1945 b'bundle2.stream',
1945 1946 default=True,
1946 1947 alias=[(b'experimental', b'bundle2.stream')],
1947 1948 )
1948 1949 coreconfigitem(
1949 1950 b'server',
1950 1951 b'compressionengines',
1951 1952 default=list,
1952 1953 )
1953 1954 coreconfigitem(
1954 1955 b'server',
1955 1956 b'concurrent-push-mode',
1956 1957 default=b'check-related',
1957 1958 )
1958 1959 coreconfigitem(
1959 1960 b'server',
1960 1961 b'disablefullbundle',
1961 1962 default=False,
1962 1963 )
1963 1964 coreconfigitem(
1964 1965 b'server',
1965 1966 b'maxhttpheaderlen',
1966 1967 default=1024,
1967 1968 )
1968 1969 coreconfigitem(
1969 1970 b'server',
1970 1971 b'pullbundle',
1971 1972 default=False,
1972 1973 )
1973 1974 coreconfigitem(
1974 1975 b'server',
1975 1976 b'preferuncompressed',
1976 1977 default=False,
1977 1978 )
1978 1979 coreconfigitem(
1979 1980 b'server',
1980 1981 b'streamunbundle',
1981 1982 default=False,
1982 1983 )
1983 1984 coreconfigitem(
1984 1985 b'server',
1985 1986 b'uncompressed',
1986 1987 default=True,
1987 1988 )
1988 1989 coreconfigitem(
1989 1990 b'server',
1990 1991 b'uncompressedallowsecret',
1991 1992 default=False,
1992 1993 )
1993 1994 coreconfigitem(
1994 1995 b'server',
1995 1996 b'view',
1996 1997 default=b'served',
1997 1998 )
1998 1999 coreconfigitem(
1999 2000 b'server',
2000 2001 b'validate',
2001 2002 default=False,
2002 2003 )
2003 2004 coreconfigitem(
2004 2005 b'server',
2005 2006 b'zliblevel',
2006 2007 default=-1,
2007 2008 )
2008 2009 coreconfigitem(
2009 2010 b'server',
2010 2011 b'zstdlevel',
2011 2012 default=3,
2012 2013 )
2013 2014 coreconfigitem(
2014 2015 b'share',
2015 2016 b'pool',
2016 2017 default=None,
2017 2018 )
2018 2019 coreconfigitem(
2019 2020 b'share',
2020 2021 b'poolnaming',
2021 2022 default=b'identity',
2022 2023 )
2023 2024 coreconfigitem(
2024 2025 b'share',
2025 2026 b'safe-mismatch.source-not-safe',
2026 2027 default=b'abort',
2027 2028 )
2028 2029 coreconfigitem(
2029 2030 b'share',
2030 2031 b'safe-mismatch.source-safe',
2031 2032 default=b'abort',
2032 2033 )
2033 2034 coreconfigitem(
2034 2035 b'share',
2035 2036 b'safe-mismatch.source-not-safe.warn',
2036 2037 default=True,
2037 2038 )
2038 2039 coreconfigitem(
2039 2040 b'share',
2040 2041 b'safe-mismatch.source-safe.warn',
2041 2042 default=True,
2042 2043 )
2043 2044 coreconfigitem(
2044 2045 b'shelve',
2045 2046 b'maxbackups',
2046 2047 default=10,
2047 2048 )
2048 2049 coreconfigitem(
2049 2050 b'smtp',
2050 2051 b'host',
2051 2052 default=None,
2052 2053 )
2053 2054 coreconfigitem(
2054 2055 b'smtp',
2055 2056 b'local_hostname',
2056 2057 default=None,
2057 2058 )
2058 2059 coreconfigitem(
2059 2060 b'smtp',
2060 2061 b'password',
2061 2062 default=None,
2062 2063 )
2063 2064 coreconfigitem(
2064 2065 b'smtp',
2065 2066 b'port',
2066 2067 default=dynamicdefault,
2067 2068 )
2068 2069 coreconfigitem(
2069 2070 b'smtp',
2070 2071 b'tls',
2071 2072 default=b'none',
2072 2073 )
2073 2074 coreconfigitem(
2074 2075 b'smtp',
2075 2076 b'username',
2076 2077 default=None,
2077 2078 )
2078 2079 coreconfigitem(
2079 2080 b'sparse',
2080 2081 b'missingwarning',
2081 2082 default=True,
2082 2083 experimental=True,
2083 2084 )
2084 2085 coreconfigitem(
2085 2086 b'subrepos',
2086 2087 b'allowed',
2087 2088 default=dynamicdefault, # to make backporting simpler
2088 2089 )
2089 2090 coreconfigitem(
2090 2091 b'subrepos',
2091 2092 b'hg:allowed',
2092 2093 default=dynamicdefault,
2093 2094 )
2094 2095 coreconfigitem(
2095 2096 b'subrepos',
2096 2097 b'git:allowed',
2097 2098 default=dynamicdefault,
2098 2099 )
2099 2100 coreconfigitem(
2100 2101 b'subrepos',
2101 2102 b'svn:allowed',
2102 2103 default=dynamicdefault,
2103 2104 )
2104 2105 coreconfigitem(
2105 2106 b'templates',
2106 2107 b'.*',
2107 2108 default=None,
2108 2109 generic=True,
2109 2110 )
2110 2111 coreconfigitem(
2111 2112 b'templateconfig',
2112 2113 b'.*',
2113 2114 default=dynamicdefault,
2114 2115 generic=True,
2115 2116 )
2116 2117 coreconfigitem(
2117 2118 b'trusted',
2118 2119 b'groups',
2119 2120 default=list,
2120 2121 )
2121 2122 coreconfigitem(
2122 2123 b'trusted',
2123 2124 b'users',
2124 2125 default=list,
2125 2126 )
2126 2127 coreconfigitem(
2127 2128 b'ui',
2128 2129 b'_usedassubrepo',
2129 2130 default=False,
2130 2131 )
2131 2132 coreconfigitem(
2132 2133 b'ui',
2133 2134 b'allowemptycommit',
2134 2135 default=False,
2135 2136 )
2136 2137 coreconfigitem(
2137 2138 b'ui',
2138 2139 b'archivemeta',
2139 2140 default=True,
2140 2141 )
2141 2142 coreconfigitem(
2142 2143 b'ui',
2143 2144 b'askusername',
2144 2145 default=False,
2145 2146 )
2146 2147 coreconfigitem(
2147 2148 b'ui',
2148 2149 b'available-memory',
2149 2150 default=None,
2150 2151 )
2151 2152
2152 2153 coreconfigitem(
2153 2154 b'ui',
2154 2155 b'clonebundlefallback',
2155 2156 default=False,
2156 2157 )
2157 2158 coreconfigitem(
2158 2159 b'ui',
2159 2160 b'clonebundleprefers',
2160 2161 default=list,
2161 2162 )
2162 2163 coreconfigitem(
2163 2164 b'ui',
2164 2165 b'clonebundles',
2165 2166 default=True,
2166 2167 )
2167 2168 coreconfigitem(
2168 2169 b'ui',
2169 2170 b'color',
2170 2171 default=b'auto',
2171 2172 )
2172 2173 coreconfigitem(
2173 2174 b'ui',
2174 2175 b'commitsubrepos',
2175 2176 default=False,
2176 2177 )
2177 2178 coreconfigitem(
2178 2179 b'ui',
2179 2180 b'debug',
2180 2181 default=False,
2181 2182 )
2182 2183 coreconfigitem(
2183 2184 b'ui',
2184 2185 b'debugger',
2185 2186 default=None,
2186 2187 )
2187 2188 coreconfigitem(
2188 2189 b'ui',
2189 2190 b'editor',
2190 2191 default=dynamicdefault,
2191 2192 )
2192 2193 coreconfigitem(
2193 2194 b'ui',
2194 2195 b'detailed-exit-code',
2195 2196 default=False,
2196 2197 experimental=True,
2197 2198 )
2198 2199 coreconfigitem(
2199 2200 b'ui',
2200 2201 b'fallbackencoding',
2201 2202 default=None,
2202 2203 )
2203 2204 coreconfigitem(
2204 2205 b'ui',
2205 2206 b'forcecwd',
2206 2207 default=None,
2207 2208 )
2208 2209 coreconfigitem(
2209 2210 b'ui',
2210 2211 b'forcemerge',
2211 2212 default=None,
2212 2213 )
2213 2214 coreconfigitem(
2214 2215 b'ui',
2215 2216 b'formatdebug',
2216 2217 default=False,
2217 2218 )
2218 2219 coreconfigitem(
2219 2220 b'ui',
2220 2221 b'formatjson',
2221 2222 default=False,
2222 2223 )
2223 2224 coreconfigitem(
2224 2225 b'ui',
2225 2226 b'formatted',
2226 2227 default=None,
2227 2228 )
2228 2229 coreconfigitem(
2229 2230 b'ui',
2230 2231 b'interactive',
2231 2232 default=None,
2232 2233 )
2233 2234 coreconfigitem(
2234 2235 b'ui',
2235 2236 b'interface',
2236 2237 default=None,
2237 2238 )
2238 2239 coreconfigitem(
2239 2240 b'ui',
2240 2241 b'interface.chunkselector',
2241 2242 default=None,
2242 2243 )
2243 2244 coreconfigitem(
2244 2245 b'ui',
2245 2246 b'large-file-limit',
2246 2247 default=10000000,
2247 2248 )
2248 2249 coreconfigitem(
2249 2250 b'ui',
2250 2251 b'logblockedtimes',
2251 2252 default=False,
2252 2253 )
2253 2254 coreconfigitem(
2254 2255 b'ui',
2255 2256 b'merge',
2256 2257 default=None,
2257 2258 )
2258 2259 coreconfigitem(
2259 2260 b'ui',
2260 2261 b'mergemarkers',
2261 2262 default=b'basic',
2262 2263 )
2263 2264 coreconfigitem(
2264 2265 b'ui',
2265 2266 b'message-output',
2266 2267 default=b'stdio',
2267 2268 )
2268 2269 coreconfigitem(
2269 2270 b'ui',
2270 2271 b'nontty',
2271 2272 default=False,
2272 2273 )
2273 2274 coreconfigitem(
2274 2275 b'ui',
2275 2276 b'origbackuppath',
2276 2277 default=None,
2277 2278 )
2278 2279 coreconfigitem(
2279 2280 b'ui',
2280 2281 b'paginate',
2281 2282 default=True,
2282 2283 )
2283 2284 coreconfigitem(
2284 2285 b'ui',
2285 2286 b'patch',
2286 2287 default=None,
2287 2288 )
2288 2289 coreconfigitem(
2289 2290 b'ui',
2290 2291 b'portablefilenames',
2291 2292 default=b'warn',
2292 2293 )
2293 2294 coreconfigitem(
2294 2295 b'ui',
2295 2296 b'promptecho',
2296 2297 default=False,
2297 2298 )
2298 2299 coreconfigitem(
2299 2300 b'ui',
2300 2301 b'quiet',
2301 2302 default=False,
2302 2303 )
2303 2304 coreconfigitem(
2304 2305 b'ui',
2305 2306 b'quietbookmarkmove',
2306 2307 default=False,
2307 2308 )
2308 2309 coreconfigitem(
2309 2310 b'ui',
2310 2311 b'relative-paths',
2311 2312 default=b'legacy',
2312 2313 )
2313 2314 coreconfigitem(
2314 2315 b'ui',
2315 2316 b'remotecmd',
2316 2317 default=b'hg',
2317 2318 )
2318 2319 coreconfigitem(
2319 2320 b'ui',
2320 2321 b'report_untrusted',
2321 2322 default=True,
2322 2323 )
2323 2324 coreconfigitem(
2324 2325 b'ui',
2325 2326 b'rollback',
2326 2327 default=True,
2327 2328 )
2328 2329 coreconfigitem(
2329 2330 b'ui',
2330 2331 b'signal-safe-lock',
2331 2332 default=True,
2332 2333 )
2333 2334 coreconfigitem(
2334 2335 b'ui',
2335 2336 b'slash',
2336 2337 default=False,
2337 2338 )
2338 2339 coreconfigitem(
2339 2340 b'ui',
2340 2341 b'ssh',
2341 2342 default=b'ssh',
2342 2343 )
2343 2344 coreconfigitem(
2344 2345 b'ui',
2345 2346 b'ssherrorhint',
2346 2347 default=None,
2347 2348 )
2348 2349 coreconfigitem(
2349 2350 b'ui',
2350 2351 b'statuscopies',
2351 2352 default=False,
2352 2353 )
2353 2354 coreconfigitem(
2354 2355 b'ui',
2355 2356 b'strict',
2356 2357 default=False,
2357 2358 )
2358 2359 coreconfigitem(
2359 2360 b'ui',
2360 2361 b'style',
2361 2362 default=b'',
2362 2363 )
2363 2364 coreconfigitem(
2364 2365 b'ui',
2365 2366 b'supportcontact',
2366 2367 default=None,
2367 2368 )
2368 2369 coreconfigitem(
2369 2370 b'ui',
2370 2371 b'textwidth',
2371 2372 default=78,
2372 2373 )
2373 2374 coreconfigitem(
2374 2375 b'ui',
2375 2376 b'timeout',
2376 2377 default=b'600',
2377 2378 )
2378 2379 coreconfigitem(
2379 2380 b'ui',
2380 2381 b'timeout.warn',
2381 2382 default=0,
2382 2383 )
2383 2384 coreconfigitem(
2384 2385 b'ui',
2385 2386 b'timestamp-output',
2386 2387 default=False,
2387 2388 )
2388 2389 coreconfigitem(
2389 2390 b'ui',
2390 2391 b'traceback',
2391 2392 default=False,
2392 2393 )
2393 2394 coreconfigitem(
2394 2395 b'ui',
2395 2396 b'tweakdefaults',
2396 2397 default=False,
2397 2398 )
2398 2399 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2399 2400 coreconfigitem(
2400 2401 b'ui',
2401 2402 b'verbose',
2402 2403 default=False,
2403 2404 )
2404 2405 coreconfigitem(
2405 2406 b'verify',
2406 2407 b'skipflags',
2407 2408 default=None,
2408 2409 )
2409 2410 coreconfigitem(
2410 2411 b'web',
2411 2412 b'allowbz2',
2412 2413 default=False,
2413 2414 )
2414 2415 coreconfigitem(
2415 2416 b'web',
2416 2417 b'allowgz',
2417 2418 default=False,
2418 2419 )
2419 2420 coreconfigitem(
2420 2421 b'web',
2421 2422 b'allow-pull',
2422 2423 alias=[(b'web', b'allowpull')],
2423 2424 default=True,
2424 2425 )
2425 2426 coreconfigitem(
2426 2427 b'web',
2427 2428 b'allow-push',
2428 2429 alias=[(b'web', b'allow_push')],
2429 2430 default=list,
2430 2431 )
2431 2432 coreconfigitem(
2432 2433 b'web',
2433 2434 b'allowzip',
2434 2435 default=False,
2435 2436 )
2436 2437 coreconfigitem(
2437 2438 b'web',
2438 2439 b'archivesubrepos',
2439 2440 default=False,
2440 2441 )
2441 2442 coreconfigitem(
2442 2443 b'web',
2443 2444 b'cache',
2444 2445 default=True,
2445 2446 )
2446 2447 coreconfigitem(
2447 2448 b'web',
2448 2449 b'comparisoncontext',
2449 2450 default=5,
2450 2451 )
2451 2452 coreconfigitem(
2452 2453 b'web',
2453 2454 b'contact',
2454 2455 default=None,
2455 2456 )
2456 2457 coreconfigitem(
2457 2458 b'web',
2458 2459 b'deny_push',
2459 2460 default=list,
2460 2461 )
2461 2462 coreconfigitem(
2462 2463 b'web',
2463 2464 b'guessmime',
2464 2465 default=False,
2465 2466 )
2466 2467 coreconfigitem(
2467 2468 b'web',
2468 2469 b'hidden',
2469 2470 default=False,
2470 2471 )
2471 2472 coreconfigitem(
2472 2473 b'web',
2473 2474 b'labels',
2474 2475 default=list,
2475 2476 )
2476 2477 coreconfigitem(
2477 2478 b'web',
2478 2479 b'logoimg',
2479 2480 default=b'hglogo.png',
2480 2481 )
2481 2482 coreconfigitem(
2482 2483 b'web',
2483 2484 b'logourl',
2484 2485 default=b'https://mercurial-scm.org/',
2485 2486 )
2486 2487 coreconfigitem(
2487 2488 b'web',
2488 2489 b'accesslog',
2489 2490 default=b'-',
2490 2491 )
2491 2492 coreconfigitem(
2492 2493 b'web',
2493 2494 b'address',
2494 2495 default=b'',
2495 2496 )
2496 2497 coreconfigitem(
2497 2498 b'web',
2498 2499 b'allow-archive',
2499 2500 alias=[(b'web', b'allow_archive')],
2500 2501 default=list,
2501 2502 )
2502 2503 coreconfigitem(
2503 2504 b'web',
2504 2505 b'allow_read',
2505 2506 default=list,
2506 2507 )
2507 2508 coreconfigitem(
2508 2509 b'web',
2509 2510 b'baseurl',
2510 2511 default=None,
2511 2512 )
2512 2513 coreconfigitem(
2513 2514 b'web',
2514 2515 b'cacerts',
2515 2516 default=None,
2516 2517 )
2517 2518 coreconfigitem(
2518 2519 b'web',
2519 2520 b'certificate',
2520 2521 default=None,
2521 2522 )
2522 2523 coreconfigitem(
2523 2524 b'web',
2524 2525 b'collapse',
2525 2526 default=False,
2526 2527 )
2527 2528 coreconfigitem(
2528 2529 b'web',
2529 2530 b'csp',
2530 2531 default=None,
2531 2532 )
2532 2533 coreconfigitem(
2533 2534 b'web',
2534 2535 b'deny_read',
2535 2536 default=list,
2536 2537 )
2537 2538 coreconfigitem(
2538 2539 b'web',
2539 2540 b'descend',
2540 2541 default=True,
2541 2542 )
2542 2543 coreconfigitem(
2543 2544 b'web',
2544 2545 b'description',
2545 2546 default=b"",
2546 2547 )
2547 2548 coreconfigitem(
2548 2549 b'web',
2549 2550 b'encoding',
2550 2551 default=lambda: encoding.encoding,
2551 2552 )
2552 2553 coreconfigitem(
2553 2554 b'web',
2554 2555 b'errorlog',
2555 2556 default=b'-',
2556 2557 )
2557 2558 coreconfigitem(
2558 2559 b'web',
2559 2560 b'ipv6',
2560 2561 default=False,
2561 2562 )
2562 2563 coreconfigitem(
2563 2564 b'web',
2564 2565 b'maxchanges',
2565 2566 default=10,
2566 2567 )
2567 2568 coreconfigitem(
2568 2569 b'web',
2569 2570 b'maxfiles',
2570 2571 default=10,
2571 2572 )
2572 2573 coreconfigitem(
2573 2574 b'web',
2574 2575 b'maxshortchanges',
2575 2576 default=60,
2576 2577 )
2577 2578 coreconfigitem(
2578 2579 b'web',
2579 2580 b'motd',
2580 2581 default=b'',
2581 2582 )
2582 2583 coreconfigitem(
2583 2584 b'web',
2584 2585 b'name',
2585 2586 default=dynamicdefault,
2586 2587 )
2587 2588 coreconfigitem(
2588 2589 b'web',
2589 2590 b'port',
2590 2591 default=8000,
2591 2592 )
2592 2593 coreconfigitem(
2593 2594 b'web',
2594 2595 b'prefix',
2595 2596 default=b'',
2596 2597 )
2597 2598 coreconfigitem(
2598 2599 b'web',
2599 2600 b'push_ssl',
2600 2601 default=True,
2601 2602 )
2602 2603 coreconfigitem(
2603 2604 b'web',
2604 2605 b'refreshinterval',
2605 2606 default=20,
2606 2607 )
2607 2608 coreconfigitem(
2608 2609 b'web',
2609 2610 b'server-header',
2610 2611 default=None,
2611 2612 )
2612 2613 coreconfigitem(
2613 2614 b'web',
2614 2615 b'static',
2615 2616 default=None,
2616 2617 )
2617 2618 coreconfigitem(
2618 2619 b'web',
2619 2620 b'staticurl',
2620 2621 default=None,
2621 2622 )
2622 2623 coreconfigitem(
2623 2624 b'web',
2624 2625 b'stripes',
2625 2626 default=1,
2626 2627 )
2627 2628 coreconfigitem(
2628 2629 b'web',
2629 2630 b'style',
2630 2631 default=b'paper',
2631 2632 )
2632 2633 coreconfigitem(
2633 2634 b'web',
2634 2635 b'templates',
2635 2636 default=None,
2636 2637 )
2637 2638 coreconfigitem(
2638 2639 b'web',
2639 2640 b'view',
2640 2641 default=b'served',
2641 2642 experimental=True,
2642 2643 )
2643 2644 coreconfigitem(
2644 2645 b'worker',
2645 2646 b'backgroundclose',
2646 2647 default=dynamicdefault,
2647 2648 )
2648 2649 # Windows defaults to a limit of 512 open files. A buffer of 128
2649 2650 # should give us enough headway.
2650 2651 coreconfigitem(
2651 2652 b'worker',
2652 2653 b'backgroundclosemaxqueue',
2653 2654 default=384,
2654 2655 )
2655 2656 coreconfigitem(
2656 2657 b'worker',
2657 2658 b'backgroundcloseminfilecount',
2658 2659 default=2048,
2659 2660 )
2660 2661 coreconfigitem(
2661 2662 b'worker',
2662 2663 b'backgroundclosethreadcount',
2663 2664 default=4,
2664 2665 )
2665 2666 coreconfigitem(
2666 2667 b'worker',
2667 2668 b'enabled',
2668 2669 default=True,
2669 2670 )
2670 2671 coreconfigitem(
2671 2672 b'worker',
2672 2673 b'numcpus',
2673 2674 default=None,
2674 2675 )
2675 2676
2676 2677 # Rebase related configuration moved to core because other extension are doing
2677 2678 # strange things. For example, shelve import the extensions to reuse some bit
2678 2679 # without formally loading it.
2679 2680 coreconfigitem(
2680 2681 b'commands',
2681 2682 b'rebase.requiredest',
2682 2683 default=False,
2683 2684 )
2684 2685 coreconfigitem(
2685 2686 b'experimental',
2686 2687 b'rebaseskipobsolete',
2687 2688 default=True,
2688 2689 )
2689 2690 coreconfigitem(
2690 2691 b'rebase',
2691 2692 b'singletransaction',
2692 2693 default=False,
2693 2694 )
2694 2695 coreconfigitem(
2695 2696 b'rebase',
2696 2697 b'experimental.inmemory',
2697 2698 default=False,
2698 2699 )
@@ -1,3769 +1,3771 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 sha1nodeconstants,
24 24 short,
25 25 )
26 26 from .pycompat import (
27 27 delattr,
28 28 getattr,
29 29 )
30 30 from . import (
31 31 bookmarks,
32 32 branchmap,
33 33 bundle2,
34 34 bundlecaches,
35 35 changegroup,
36 36 color,
37 37 commit,
38 38 context,
39 39 dirstate,
40 40 dirstateguard,
41 41 discovery,
42 42 encoding,
43 43 error,
44 44 exchange,
45 45 extensions,
46 46 filelog,
47 47 hook,
48 48 lock as lockmod,
49 49 match as matchmod,
50 50 mergestate as mergestatemod,
51 51 mergeutil,
52 52 namespaces,
53 53 narrowspec,
54 54 obsolete,
55 55 pathutil,
56 56 phases,
57 57 pushkey,
58 58 pycompat,
59 59 rcutil,
60 60 repoview,
61 61 requirements as requirementsmod,
62 62 revlog,
63 63 revset,
64 64 revsetlang,
65 65 scmutil,
66 66 sparse,
67 67 store as storemod,
68 68 subrepoutil,
69 69 tags as tagsmod,
70 70 transaction,
71 71 txnutil,
72 72 util,
73 73 vfs as vfsmod,
74 74 wireprototypes,
75 75 )
76 76
77 77 from .interfaces import (
78 78 repository,
79 79 util as interfaceutil,
80 80 )
81 81
82 82 from .utils import (
83 83 hashutil,
84 84 procutil,
85 85 stringutil,
86 86 urlutil,
87 87 )
88 88
89 89 from .revlogutils import (
90 90 concurrency_checker as revlogchecker,
91 91 constants as revlogconst,
92 92 sidedata as sidedatamod,
93 93 )
94 94
95 95 release = lockmod.release
96 96 urlerr = util.urlerr
97 97 urlreq = util.urlreq
98 98
99 99 # set of (path, vfs-location) tuples. vfs-location is:
100 100 # - 'plain for vfs relative paths
101 101 # - '' for svfs relative paths
102 102 _cachedfiles = set()
103 103
104 104
105 105 class _basefilecache(scmutil.filecache):
106 106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 107
108 108 def __get__(self, repo, type=None):
109 109 if repo is None:
110 110 return self
111 111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 112 unfi = repo.unfiltered()
113 113 try:
114 114 return unfi.__dict__[self.sname]
115 115 except KeyError:
116 116 pass
117 117 return super(_basefilecache, self).__get__(unfi, type)
118 118
119 119 def set(self, repo, value):
120 120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 121
122 122
123 123 class repofilecache(_basefilecache):
124 124 """filecache for files in .hg but outside of .hg/store"""
125 125
126 126 def __init__(self, *paths):
127 127 super(repofilecache, self).__init__(*paths)
128 128 for path in paths:
129 129 _cachedfiles.add((path, b'plain'))
130 130
131 131 def join(self, obj, fname):
132 132 return obj.vfs.join(fname)
133 133
134 134
135 135 class storecache(_basefilecache):
136 136 """filecache for files in the store"""
137 137
138 138 def __init__(self, *paths):
139 139 super(storecache, self).__init__(*paths)
140 140 for path in paths:
141 141 _cachedfiles.add((path, b''))
142 142
143 143 def join(self, obj, fname):
144 144 return obj.sjoin(fname)
145 145
146 146
147 147 class mixedrepostorecache(_basefilecache):
148 148 """filecache for a mix files in .hg/store and outside"""
149 149
150 150 def __init__(self, *pathsandlocations):
151 151 # scmutil.filecache only uses the path for passing back into our
152 152 # join(), so we can safely pass a list of paths and locations
153 153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 154 _cachedfiles.update(pathsandlocations)
155 155
156 156 def join(self, obj, fnameandlocation):
157 157 fname, location = fnameandlocation
158 158 if location == b'plain':
159 159 return obj.vfs.join(fname)
160 160 else:
161 161 if location != b'':
162 162 raise error.ProgrammingError(
163 163 b'unexpected location: %s' % location
164 164 )
165 165 return obj.sjoin(fname)
166 166
167 167
168 168 def isfilecached(repo, name):
169 169 """check if a repo has already cached "name" filecache-ed property
170 170
171 171 This returns (cachedobj-or-None, iscached) tuple.
172 172 """
173 173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 174 if not cacheentry:
175 175 return None, False
176 176 return cacheentry.obj, True
177 177
178 178
179 179 class unfilteredpropertycache(util.propertycache):
180 180 """propertycache that apply to unfiltered repo only"""
181 181
182 182 def __get__(self, repo, type=None):
183 183 unfi = repo.unfiltered()
184 184 if unfi is repo:
185 185 return super(unfilteredpropertycache, self).__get__(unfi)
186 186 return getattr(unfi, self.name)
187 187
188 188
189 189 class filteredpropertycache(util.propertycache):
190 190 """propertycache that must take filtering in account"""
191 191
192 192 def cachevalue(self, obj, value):
193 193 object.__setattr__(obj, self.name, value)
194 194
195 195
196 196 def hasunfilteredcache(repo, name):
197 197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 198 return name in vars(repo.unfiltered())
199 199
200 200
201 201 def unfilteredmethod(orig):
202 202 """decorate method that always need to be run on unfiltered version"""
203 203
204 204 @functools.wraps(orig)
205 205 def wrapper(repo, *args, **kwargs):
206 206 return orig(repo.unfiltered(), *args, **kwargs)
207 207
208 208 return wrapper
209 209
210 210
211 211 moderncaps = {
212 212 b'lookup',
213 213 b'branchmap',
214 214 b'pushkey',
215 215 b'known',
216 216 b'getbundle',
217 217 b'unbundle',
218 218 }
219 219 legacycaps = moderncaps.union({b'changegroupsubset'})
220 220
221 221
222 222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 223 class localcommandexecutor(object):
224 224 def __init__(self, peer):
225 225 self._peer = peer
226 226 self._sent = False
227 227 self._closed = False
228 228
229 229 def __enter__(self):
230 230 return self
231 231
232 232 def __exit__(self, exctype, excvalue, exctb):
233 233 self.close()
234 234
235 235 def callcommand(self, command, args):
236 236 if self._sent:
237 237 raise error.ProgrammingError(
238 238 b'callcommand() cannot be used after sendcommands()'
239 239 )
240 240
241 241 if self._closed:
242 242 raise error.ProgrammingError(
243 243 b'callcommand() cannot be used after close()'
244 244 )
245 245
246 246 # We don't need to support anything fancy. Just call the named
247 247 # method on the peer and return a resolved future.
248 248 fn = getattr(self._peer, pycompat.sysstr(command))
249 249
250 250 f = pycompat.futures.Future()
251 251
252 252 try:
253 253 result = fn(**pycompat.strkwargs(args))
254 254 except Exception:
255 255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 256 else:
257 257 f.set_result(result)
258 258
259 259 return f
260 260
261 261 def sendcommands(self):
262 262 self._sent = True
263 263
264 264 def close(self):
265 265 self._closed = True
266 266
267 267
268 268 @interfaceutil.implementer(repository.ipeercommands)
269 269 class localpeer(repository.peer):
270 270 '''peer for a local repo; reflects only the most recent API'''
271 271
272 272 def __init__(self, repo, caps=None):
273 273 super(localpeer, self).__init__()
274 274
275 275 if caps is None:
276 276 caps = moderncaps.copy()
277 277 self._repo = repo.filtered(b'served')
278 278 self.ui = repo.ui
279 279
280 280 if repo._wanted_sidedata:
281 281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 282 caps.add(b'exp-wanted-sidedata=' + formatted)
283 283
284 284 self._caps = repo._restrictcapabilities(caps)
285 285
286 286 # Begin of _basepeer interface.
287 287
288 288 def url(self):
289 289 return self._repo.url()
290 290
291 291 def local(self):
292 292 return self._repo
293 293
294 294 def peer(self):
295 295 return self
296 296
297 297 def canpush(self):
298 298 return True
299 299
300 300 def close(self):
301 301 self._repo.close()
302 302
303 303 # End of _basepeer interface.
304 304
305 305 # Begin of _basewirecommands interface.
306 306
307 307 def branchmap(self):
308 308 return self._repo.branchmap()
309 309
310 310 def capabilities(self):
311 311 return self._caps
312 312
313 313 def clonebundles(self):
314 314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315 315
316 316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 317 """Used to test argument passing over the wire"""
318 318 return b"%s %s %s %s %s" % (
319 319 one,
320 320 two,
321 321 pycompat.bytestr(three),
322 322 pycompat.bytestr(four),
323 323 pycompat.bytestr(five),
324 324 )
325 325
326 326 def getbundle(
327 327 self,
328 328 source,
329 329 heads=None,
330 330 common=None,
331 331 bundlecaps=None,
332 332 remote_sidedata=None,
333 333 **kwargs
334 334 ):
335 335 chunks = exchange.getbundlechunks(
336 336 self._repo,
337 337 source,
338 338 heads=heads,
339 339 common=common,
340 340 bundlecaps=bundlecaps,
341 341 remote_sidedata=remote_sidedata,
342 342 **kwargs
343 343 )[1]
344 344 cb = util.chunkbuffer(chunks)
345 345
346 346 if exchange.bundle2requested(bundlecaps):
347 347 # When requesting a bundle2, getbundle returns a stream to make the
348 348 # wire level function happier. We need to build a proper object
349 349 # from it in local peer.
350 350 return bundle2.getunbundler(self.ui, cb)
351 351 else:
352 352 return changegroup.getunbundler(b'01', cb, None)
353 353
354 354 def heads(self):
355 355 return self._repo.heads()
356 356
357 357 def known(self, nodes):
358 358 return self._repo.known(nodes)
359 359
360 360 def listkeys(self, namespace):
361 361 return self._repo.listkeys(namespace)
362 362
363 363 def lookup(self, key):
364 364 return self._repo.lookup(key)
365 365
366 366 def pushkey(self, namespace, key, old, new):
367 367 return self._repo.pushkey(namespace, key, old, new)
368 368
369 369 def stream_out(self):
370 370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371 371
372 372 def unbundle(self, bundle, heads, url):
373 373 """apply a bundle on a repo
374 374
375 375 This function handles the repo locking itself."""
376 376 try:
377 377 try:
378 378 bundle = exchange.readbundle(self.ui, bundle, None)
379 379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 380 if util.safehasattr(ret, b'getchunks'):
381 381 # This is a bundle20 object, turn it into an unbundler.
382 382 # This little dance should be dropped eventually when the
383 383 # API is finally improved.
384 384 stream = util.chunkbuffer(ret.getchunks())
385 385 ret = bundle2.getunbundler(self.ui, stream)
386 386 return ret
387 387 except Exception as exc:
388 388 # If the exception contains output salvaged from a bundle2
389 389 # reply, we need to make sure it is printed before continuing
390 390 # to fail. So we build a bundle2 with such output and consume
391 391 # it directly.
392 392 #
393 393 # This is not very elegant but allows a "simple" solution for
394 394 # issue4594
395 395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 396 if output:
397 397 bundler = bundle2.bundle20(self._repo.ui)
398 398 for out in output:
399 399 bundler.addpart(out)
400 400 stream = util.chunkbuffer(bundler.getchunks())
401 401 b = bundle2.getunbundler(self.ui, stream)
402 402 bundle2.processbundle(self._repo, b)
403 403 raise
404 404 except error.PushRaced as exc:
405 405 raise error.ResponseError(
406 406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 407 )
408 408
409 409 # End of _basewirecommands interface.
410 410
411 411 # Begin of peer interface.
412 412
413 413 def commandexecutor(self):
414 414 return localcommandexecutor(self)
415 415
416 416 # End of peer interface.
417 417
418 418
419 419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 420 class locallegacypeer(localpeer):
421 421 """peer extension which implements legacy methods too; used for tests with
422 422 restricted capabilities"""
423 423
424 424 def __init__(self, repo):
425 425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426 426
427 427 # Begin of baselegacywirecommands interface.
428 428
429 429 def between(self, pairs):
430 430 return self._repo.between(pairs)
431 431
432 432 def branches(self, nodes):
433 433 return self._repo.branches(nodes)
434 434
435 435 def changegroup(self, nodes, source):
436 436 outgoing = discovery.outgoing(
437 437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 438 )
439 439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440 440
441 441 def changegroupsubset(self, bases, heads, source):
442 442 outgoing = discovery.outgoing(
443 443 self._repo, missingroots=bases, ancestorsof=heads
444 444 )
445 445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446 446
447 447 # End of baselegacywirecommands interface.
448 448
449 449
450 450 # Functions receiving (ui, features) that extensions can register to impact
451 451 # the ability to load repositories with custom requirements. Only
452 452 # functions defined in loaded extensions are called.
453 453 #
454 454 # The function receives a set of requirement strings that the repository
455 455 # is capable of opening. Functions will typically add elements to the
456 456 # set to reflect that the extension knows how to handle that requirements.
457 457 featuresetupfuncs = set()
458 458
459 459
460 460 def _getsharedvfs(hgvfs, requirements):
461 461 """returns the vfs object pointing to root of shared source
462 462 repo for a shared repository
463 463
464 464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 465 requirements is a set of requirements of current repo (shared one)
466 466 """
467 467 # The ``shared`` or ``relshared`` requirements indicate the
468 468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 469 # This is an absolute path for ``shared`` and relative to
470 470 # ``.hg/`` for ``relshared``.
471 471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474 474
475 475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476 476
477 477 if not sharedvfs.exists():
478 478 raise error.RepoError(
479 479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 480 % sharedvfs.base
481 481 )
482 482 return sharedvfs
483 483
484 484
485 485 def _readrequires(vfs, allowmissing):
486 486 """reads the require file present at root of this vfs
487 487 and return a set of requirements
488 488
489 489 If allowmissing is True, we suppress ENOENT if raised"""
490 490 # requires file contains a newline-delimited list of
491 491 # features/capabilities the opener (us) must have in order to use
492 492 # the repository. This file was introduced in Mercurial 0.9.2,
493 493 # which means very old repositories may not have one. We assume
494 494 # a missing file translates to no requirements.
495 495 try:
496 496 requirements = set(vfs.read(b'requires').splitlines())
497 497 except IOError as e:
498 498 if not (allowmissing and e.errno == errno.ENOENT):
499 499 raise
500 500 requirements = set()
501 501 return requirements
502 502
503 503
504 504 def makelocalrepository(baseui, path, intents=None):
505 505 """Create a local repository object.
506 506
507 507 Given arguments needed to construct a local repository, this function
508 508 performs various early repository loading functionality (such as
509 509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 510 the repository can be opened, derives a type suitable for representing
511 511 that repository, and returns an instance of it.
512 512
513 513 The returned object conforms to the ``repository.completelocalrepository``
514 514 interface.
515 515
516 516 The repository type is derived by calling a series of factory functions
517 517 for each aspect/interface of the final repository. These are defined by
518 518 ``REPO_INTERFACES``.
519 519
520 520 Each factory function is called to produce a type implementing a specific
521 521 interface. The cumulative list of returned types will be combined into a
522 522 new type and that type will be instantiated to represent the local
523 523 repository.
524 524
525 525 The factory functions each receive various state that may be consulted
526 526 as part of deriving a type.
527 527
528 528 Extensions should wrap these factory functions to customize repository type
529 529 creation. Note that an extension's wrapped function may be called even if
530 530 that extension is not loaded for the repo being constructed. Extensions
531 531 should check if their ``__name__`` appears in the
532 532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 533 not.
534 534 """
535 535 ui = baseui.copy()
536 536 # Prevent copying repo configuration.
537 537 ui.copy = baseui.copy
538 538
539 539 # Working directory VFS rooted at repository root.
540 540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541 541
542 542 # Main VFS for .hg/ directory.
543 543 hgpath = wdirvfs.join(b'.hg')
544 544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 545 # Whether this repository is shared one or not
546 546 shared = False
547 547 # If this repository is shared, vfs pointing to shared repo
548 548 sharedvfs = None
549 549
550 550 # The .hg/ path should exist and should be a directory. All other
551 551 # cases are errors.
552 552 if not hgvfs.isdir():
553 553 try:
554 554 hgvfs.stat()
555 555 except OSError as e:
556 556 if e.errno != errno.ENOENT:
557 557 raise
558 558 except ValueError as e:
559 559 # Can be raised on Python 3.8 when path is invalid.
560 560 raise error.Abort(
561 561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 562 )
563 563
564 564 raise error.RepoError(_(b'repository %s not found') % path)
565 565
566 566 requirements = _readrequires(hgvfs, True)
567 567 shared = (
568 568 requirementsmod.SHARED_REQUIREMENT in requirements
569 569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 570 )
571 571 storevfs = None
572 572 if shared:
573 573 # This is a shared repo
574 574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 576 else:
577 577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578 578
579 579 # if .hg/requires contains the sharesafe requirement, it means
580 580 # there exists a `.hg/store/requires` too and we should read it
581 581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 583 # is not present, refer checkrequirementscompat() for that
584 584 #
585 585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 586 # repository was shared the old way. We check the share source .hg/requires
587 587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 588 # to be reshared
589 589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591 591
592 592 if (
593 593 shared
594 594 and requirementsmod.SHARESAFE_REQUIREMENT
595 595 not in _readrequires(sharedvfs, True)
596 596 ):
597 597 mismatch_warn = ui.configbool(
598 598 b'share', b'safe-mismatch.source-not-safe.warn'
599 599 )
600 600 mismatch_config = ui.config(
601 601 b'share', b'safe-mismatch.source-not-safe'
602 602 )
603 603 if mismatch_config in (
604 604 b'downgrade-allow',
605 605 b'allow',
606 606 b'downgrade-abort',
607 607 ):
608 608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 609 from . import upgrade
610 610
611 611 upgrade.downgrade_share_to_non_safe(
612 612 ui,
613 613 hgvfs,
614 614 sharedvfs,
615 615 requirements,
616 616 mismatch_config,
617 617 mismatch_warn,
618 618 )
619 619 elif mismatch_config == b'abort':
620 620 raise error.Abort(
621 621 _(b"share source does not support share-safe requirement"),
622 622 hint=hint,
623 623 )
624 624 else:
625 625 raise error.Abort(
626 626 _(
627 627 b"share-safe mismatch with source.\nUnrecognized"
628 628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 629 b" set."
630 630 )
631 631 % mismatch_config,
632 632 hint=hint,
633 633 )
634 634 else:
635 635 requirements |= _readrequires(storevfs, False)
636 636 elif shared:
637 637 sourcerequires = _readrequires(sharedvfs, False)
638 638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 640 mismatch_warn = ui.configbool(
641 641 b'share', b'safe-mismatch.source-safe.warn'
642 642 )
643 643 if mismatch_config in (
644 644 b'upgrade-allow',
645 645 b'allow',
646 646 b'upgrade-abort',
647 647 ):
648 648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 649 from . import upgrade
650 650
651 651 upgrade.upgrade_share_to_safe(
652 652 ui,
653 653 hgvfs,
654 654 storevfs,
655 655 requirements,
656 656 mismatch_config,
657 657 mismatch_warn,
658 658 )
659 659 elif mismatch_config == b'abort':
660 660 raise error.Abort(
661 661 _(
662 662 b'version mismatch: source uses share-safe'
663 663 b' functionality while the current share does not'
664 664 ),
665 665 hint=hint,
666 666 )
667 667 else:
668 668 raise error.Abort(
669 669 _(
670 670 b"share-safe mismatch with source.\nUnrecognized"
671 671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 672 )
673 673 % mismatch_config,
674 674 hint=hint,
675 675 )
676 676
677 677 # The .hg/hgrc file may load extensions or contain config options
678 678 # that influence repository construction. Attempt to load it and
679 679 # process any new extensions that it may have pulled in.
680 680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 682 extensions.loadall(ui)
683 683 extensions.populateui(ui)
684 684
685 685 # Set of module names of extensions loaded for this repository.
686 686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687 687
688 688 supportedrequirements = gathersupportedrequirements(ui)
689 689
690 690 # We first validate the requirements are known.
691 691 ensurerequirementsrecognized(requirements, supportedrequirements)
692 692
693 693 # Then we validate that the known set is reasonable to use together.
694 694 ensurerequirementscompatible(ui, requirements)
695 695
696 696 # TODO there are unhandled edge cases related to opening repositories with
697 697 # shared storage. If storage is shared, we should also test for requirements
698 698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 699 # that repo, as that repo may load extensions needed to open it. This is a
700 700 # bit complicated because we don't want the other hgrc to overwrite settings
701 701 # in this hgrc.
702 702 #
703 703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 704 # file when sharing repos. But if a requirement is added after the share is
705 705 # performed, thereby introducing a new requirement for the opener, we may
706 706 # will not see that and could encounter a run-time error interacting with
707 707 # that shared store since it has an unknown-to-us requirement.
708 708
709 709 # At this point, we know we should be capable of opening the repository.
710 710 # Now get on with doing that.
711 711
712 712 features = set()
713 713
714 714 # The "store" part of the repository holds versioned data. How it is
715 715 # accessed is determined by various requirements. If `shared` or
716 716 # `relshared` requirements are present, this indicates current repository
717 717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 718 if shared:
719 719 storebasepath = sharedvfs.base
720 720 cachepath = sharedvfs.join(b'cache')
721 721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 722 else:
723 723 storebasepath = hgvfs.base
724 724 cachepath = hgvfs.join(b'cache')
725 725 wcachepath = hgvfs.join(b'wcache')
726 726
727 727 # The store has changed over time and the exact layout is dictated by
728 728 # requirements. The store interface abstracts differences across all
729 729 # of them.
730 730 store = makestore(
731 731 requirements,
732 732 storebasepath,
733 733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 734 )
735 735 hgvfs.createmode = store.createmode
736 736
737 737 storevfs = store.vfs
738 738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739 739
740 740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
741 741 features.add(repository.REPO_FEATURE_SIDE_DATA)
742 # the revlogv2 docket introduced race condition that we need to fix
743 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
742 744
743 745 # The cache vfs is used to manage cache files.
744 746 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
745 747 cachevfs.createmode = store.createmode
746 748 # The cache vfs is used to manage cache files related to the working copy
747 749 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
748 750 wcachevfs.createmode = store.createmode
749 751
750 752 # Now resolve the type for the repository object. We do this by repeatedly
751 753 # calling a factory function to produces types for specific aspects of the
752 754 # repo's operation. The aggregate returned types are used as base classes
753 755 # for a dynamically-derived type, which will represent our new repository.
754 756
755 757 bases = []
756 758 extrastate = {}
757 759
758 760 for iface, fn in REPO_INTERFACES:
759 761 # We pass all potentially useful state to give extensions tons of
760 762 # flexibility.
761 763 typ = fn()(
762 764 ui=ui,
763 765 intents=intents,
764 766 requirements=requirements,
765 767 features=features,
766 768 wdirvfs=wdirvfs,
767 769 hgvfs=hgvfs,
768 770 store=store,
769 771 storevfs=storevfs,
770 772 storeoptions=storevfs.options,
771 773 cachevfs=cachevfs,
772 774 wcachevfs=wcachevfs,
773 775 extensionmodulenames=extensionmodulenames,
774 776 extrastate=extrastate,
775 777 baseclasses=bases,
776 778 )
777 779
778 780 if not isinstance(typ, type):
779 781 raise error.ProgrammingError(
780 782 b'unable to construct type for %s' % iface
781 783 )
782 784
783 785 bases.append(typ)
784 786
785 787 # type() allows you to use characters in type names that wouldn't be
786 788 # recognized as Python symbols in source code. We abuse that to add
787 789 # rich information about our constructed repo.
788 790 name = pycompat.sysstr(
789 791 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
790 792 )
791 793
792 794 cls = type(name, tuple(bases), {})
793 795
794 796 return cls(
795 797 baseui=baseui,
796 798 ui=ui,
797 799 origroot=path,
798 800 wdirvfs=wdirvfs,
799 801 hgvfs=hgvfs,
800 802 requirements=requirements,
801 803 supportedrequirements=supportedrequirements,
802 804 sharedpath=storebasepath,
803 805 store=store,
804 806 cachevfs=cachevfs,
805 807 wcachevfs=wcachevfs,
806 808 features=features,
807 809 intents=intents,
808 810 )
809 811
810 812
811 813 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
812 814 """Load hgrc files/content into a ui instance.
813 815
814 816 This is called during repository opening to load any additional
815 817 config files or settings relevant to the current repository.
816 818
817 819 Returns a bool indicating whether any additional configs were loaded.
818 820
819 821 Extensions should monkeypatch this function to modify how per-repo
820 822 configs are loaded. For example, an extension may wish to pull in
821 823 configs from alternate files or sources.
822 824
823 825 sharedvfs is vfs object pointing to source repo if the current one is a
824 826 shared one
825 827 """
826 828 if not rcutil.use_repo_hgrc():
827 829 return False
828 830
829 831 ret = False
830 832 # first load config from shared source if we has to
831 833 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
832 834 try:
833 835 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
834 836 ret = True
835 837 except IOError:
836 838 pass
837 839
838 840 try:
839 841 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
840 842 ret = True
841 843 except IOError:
842 844 pass
843 845
844 846 try:
845 847 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
846 848 ret = True
847 849 except IOError:
848 850 pass
849 851
850 852 return ret
851 853
852 854
853 855 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
854 856 """Perform additional actions after .hg/hgrc is loaded.
855 857
856 858 This function is called during repository loading immediately after
857 859 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
858 860
859 861 The function can be used to validate configs, automatically add
860 862 options (including extensions) based on requirements, etc.
861 863 """
862 864
863 865 # Map of requirements to list of extensions to load automatically when
864 866 # requirement is present.
865 867 autoextensions = {
866 868 b'git': [b'git'],
867 869 b'largefiles': [b'largefiles'],
868 870 b'lfs': [b'lfs'],
869 871 }
870 872
871 873 for requirement, names in sorted(autoextensions.items()):
872 874 if requirement not in requirements:
873 875 continue
874 876
875 877 for name in names:
876 878 if not ui.hasconfig(b'extensions', name):
877 879 ui.setconfig(b'extensions', name, b'', source=b'autoload')
878 880
879 881
880 882 def gathersupportedrequirements(ui):
881 883 """Determine the complete set of recognized requirements."""
882 884 # Start with all requirements supported by this file.
883 885 supported = set(localrepository._basesupported)
884 886
885 887 # Execute ``featuresetupfuncs`` entries if they belong to an extension
886 888 # relevant to this ui instance.
887 889 modules = {m.__name__ for n, m in extensions.extensions(ui)}
888 890
889 891 for fn in featuresetupfuncs:
890 892 if fn.__module__ in modules:
891 893 fn(ui, supported)
892 894
893 895 # Add derived requirements from registered compression engines.
894 896 for name in util.compengines:
895 897 engine = util.compengines[name]
896 898 if engine.available() and engine.revlogheader():
897 899 supported.add(b'exp-compression-%s' % name)
898 900 if engine.name() == b'zstd':
899 901 supported.add(b'revlog-compression-zstd')
900 902
901 903 return supported
902 904
903 905
904 906 def ensurerequirementsrecognized(requirements, supported):
905 907 """Validate that a set of local requirements is recognized.
906 908
907 909 Receives a set of requirements. Raises an ``error.RepoError`` if there
908 910 exists any requirement in that set that currently loaded code doesn't
909 911 recognize.
910 912
911 913 Returns a set of supported requirements.
912 914 """
913 915 missing = set()
914 916
915 917 for requirement in requirements:
916 918 if requirement in supported:
917 919 continue
918 920
919 921 if not requirement or not requirement[0:1].isalnum():
920 922 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
921 923
922 924 missing.add(requirement)
923 925
924 926 if missing:
925 927 raise error.RequirementError(
926 928 _(b'repository requires features unknown to this Mercurial: %s')
927 929 % b' '.join(sorted(missing)),
928 930 hint=_(
929 931 b'see https://mercurial-scm.org/wiki/MissingRequirement '
930 932 b'for more information'
931 933 ),
932 934 )
933 935
934 936
935 937 def ensurerequirementscompatible(ui, requirements):
936 938 """Validates that a set of recognized requirements is mutually compatible.
937 939
938 940 Some requirements may not be compatible with others or require
939 941 config options that aren't enabled. This function is called during
940 942 repository opening to ensure that the set of requirements needed
941 943 to open a repository is sane and compatible with config options.
942 944
943 945 Extensions can monkeypatch this function to perform additional
944 946 checking.
945 947
946 948 ``error.RepoError`` should be raised on failure.
947 949 """
948 950 if (
949 951 requirementsmod.SPARSE_REQUIREMENT in requirements
950 952 and not sparse.enabled
951 953 ):
952 954 raise error.RepoError(
953 955 _(
954 956 b'repository is using sparse feature but '
955 957 b'sparse is not enabled; enable the '
956 958 b'"sparse" extensions to access'
957 959 )
958 960 )
959 961
960 962
961 963 def makestore(requirements, path, vfstype):
962 964 """Construct a storage object for a repository."""
963 965 if requirementsmod.STORE_REQUIREMENT in requirements:
964 966 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
965 967 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
966 968 return storemod.fncachestore(path, vfstype, dotencode)
967 969
968 970 return storemod.encodedstore(path, vfstype)
969 971
970 972 return storemod.basicstore(path, vfstype)
971 973
972 974
973 975 def resolvestorevfsoptions(ui, requirements, features):
974 976 """Resolve the options to pass to the store vfs opener.
975 977
976 978 The returned dict is used to influence behavior of the storage layer.
977 979 """
978 980 options = {}
979 981
980 982 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
981 983 options[b'treemanifest'] = True
982 984
983 985 # experimental config: format.manifestcachesize
984 986 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
985 987 if manifestcachesize is not None:
986 988 options[b'manifestcachesize'] = manifestcachesize
987 989
988 990 # In the absence of another requirement superseding a revlog-related
989 991 # requirement, we have to assume the repo is using revlog version 0.
990 992 # This revlog format is super old and we don't bother trying to parse
991 993 # opener options for it because those options wouldn't do anything
992 994 # meaningful on such old repos.
993 995 if (
994 996 requirementsmod.REVLOGV1_REQUIREMENT in requirements
995 997 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
996 998 ):
997 999 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
998 1000 else: # explicitly mark repo as using revlogv0
999 1001 options[b'revlogv0'] = True
1000 1002
1001 1003 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1002 1004 options[b'copies-storage'] = b'changeset-sidedata'
1003 1005 else:
1004 1006 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1005 1007 copiesextramode = (b'changeset-only', b'compatibility')
1006 1008 if writecopiesto in copiesextramode:
1007 1009 options[b'copies-storage'] = b'extra'
1008 1010
1009 1011 return options
1010 1012
1011 1013
1012 1014 def resolverevlogstorevfsoptions(ui, requirements, features):
1013 1015 """Resolve opener options specific to revlogs."""
1014 1016
1015 1017 options = {}
1016 1018 options[b'flagprocessors'] = {}
1017 1019
1018 1020 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1019 1021 options[b'revlogv1'] = True
1020 1022 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1021 1023 options[b'revlogv2'] = True
1022 1024
1023 1025 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1024 1026 options[b'generaldelta'] = True
1025 1027
1026 1028 # experimental config: format.chunkcachesize
1027 1029 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1028 1030 if chunkcachesize is not None:
1029 1031 options[b'chunkcachesize'] = chunkcachesize
1030 1032
1031 1033 deltabothparents = ui.configbool(
1032 1034 b'storage', b'revlog.optimize-delta-parent-choice'
1033 1035 )
1034 1036 options[b'deltabothparents'] = deltabothparents
1035 1037
1036 1038 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1037 1039 lazydeltabase = False
1038 1040 if lazydelta:
1039 1041 lazydeltabase = ui.configbool(
1040 1042 b'storage', b'revlog.reuse-external-delta-parent'
1041 1043 )
1042 1044 if lazydeltabase is None:
1043 1045 lazydeltabase = not scmutil.gddeltaconfig(ui)
1044 1046 options[b'lazydelta'] = lazydelta
1045 1047 options[b'lazydeltabase'] = lazydeltabase
1046 1048
1047 1049 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1048 1050 if 0 <= chainspan:
1049 1051 options[b'maxdeltachainspan'] = chainspan
1050 1052
1051 1053 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1052 1054 if mmapindexthreshold is not None:
1053 1055 options[b'mmapindexthreshold'] = mmapindexthreshold
1054 1056
1055 1057 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1056 1058 srdensitythres = float(
1057 1059 ui.config(b'experimental', b'sparse-read.density-threshold')
1058 1060 )
1059 1061 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1060 1062 options[b'with-sparse-read'] = withsparseread
1061 1063 options[b'sparse-read-density-threshold'] = srdensitythres
1062 1064 options[b'sparse-read-min-gap-size'] = srmingapsize
1063 1065
1064 1066 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1065 1067 options[b'sparse-revlog'] = sparserevlog
1066 1068 if sparserevlog:
1067 1069 options[b'generaldelta'] = True
1068 1070
1069 1071 maxchainlen = None
1070 1072 if sparserevlog:
1071 1073 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1072 1074 # experimental config: format.maxchainlen
1073 1075 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1074 1076 if maxchainlen is not None:
1075 1077 options[b'maxchainlen'] = maxchainlen
1076 1078
1077 1079 for r in requirements:
1078 1080 # we allow multiple compression engine requirement to co-exist because
1079 1081 # strickly speaking, revlog seems to support mixed compression style.
1080 1082 #
1081 1083 # The compression used for new entries will be "the last one"
1082 1084 prefix = r.startswith
1083 1085 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1084 1086 options[b'compengine'] = r.split(b'-', 2)[2]
1085 1087
1086 1088 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1087 1089 if options[b'zlib.level'] is not None:
1088 1090 if not (0 <= options[b'zlib.level'] <= 9):
1089 1091 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1090 1092 raise error.Abort(msg % options[b'zlib.level'])
1091 1093 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1092 1094 if options[b'zstd.level'] is not None:
1093 1095 if not (0 <= options[b'zstd.level'] <= 22):
1094 1096 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1095 1097 raise error.Abort(msg % options[b'zstd.level'])
1096 1098
1097 1099 if requirementsmod.NARROW_REQUIREMENT in requirements:
1098 1100 options[b'enableellipsis'] = True
1099 1101
1100 1102 if ui.configbool(b'experimental', b'rust.index'):
1101 1103 options[b'rust.index'] = True
1102 1104 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1103 1105 slow_path = ui.config(
1104 1106 b'storage', b'revlog.persistent-nodemap.slow-path'
1105 1107 )
1106 1108 if slow_path not in (b'allow', b'warn', b'abort'):
1107 1109 default = ui.config_default(
1108 1110 b'storage', b'revlog.persistent-nodemap.slow-path'
1109 1111 )
1110 1112 msg = _(
1111 1113 b'unknown value for config '
1112 1114 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1113 1115 )
1114 1116 ui.warn(msg % slow_path)
1115 1117 if not ui.quiet:
1116 1118 ui.warn(_(b'falling back to default value: %s\n') % default)
1117 1119 slow_path = default
1118 1120
1119 1121 msg = _(
1120 1122 b"accessing `persistent-nodemap` repository without associated "
1121 1123 b"fast implementation."
1122 1124 )
1123 1125 hint = _(
1124 1126 b"check `hg help config.format.use-persistent-nodemap` "
1125 1127 b"for details"
1126 1128 )
1127 1129 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1128 1130 if slow_path == b'warn':
1129 1131 msg = b"warning: " + msg + b'\n'
1130 1132 ui.warn(msg)
1131 1133 if not ui.quiet:
1132 1134 hint = b'(' + hint + b')\n'
1133 1135 ui.warn(hint)
1134 1136 if slow_path == b'abort':
1135 1137 raise error.Abort(msg, hint=hint)
1136 1138 options[b'persistent-nodemap'] = True
1137 1139 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1138 1140 options[b'persistent-nodemap.mmap'] = True
1139 1141 if ui.configbool(b'devel', b'persistent-nodemap'):
1140 1142 options[b'devel-force-nodemap'] = True
1141 1143
1142 1144 return options
1143 1145
1144 1146
1145 1147 def makemain(**kwargs):
1146 1148 """Produce a type conforming to ``ilocalrepositorymain``."""
1147 1149 return localrepository
1148 1150
1149 1151
1150 1152 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1151 1153 class revlogfilestorage(object):
1152 1154 """File storage when using revlogs."""
1153 1155
1154 1156 def file(self, path):
1155 1157 if path.startswith(b'/'):
1156 1158 path = path[1:]
1157 1159
1158 1160 return filelog.filelog(self.svfs, path)
1159 1161
1160 1162
1161 1163 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1162 1164 class revlognarrowfilestorage(object):
1163 1165 """File storage when using revlogs and narrow files."""
1164 1166
1165 1167 def file(self, path):
1166 1168 if path.startswith(b'/'):
1167 1169 path = path[1:]
1168 1170
1169 1171 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1170 1172
1171 1173
1172 1174 def makefilestorage(requirements, features, **kwargs):
1173 1175 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1174 1176 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1175 1177 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1176 1178
1177 1179 if requirementsmod.NARROW_REQUIREMENT in requirements:
1178 1180 return revlognarrowfilestorage
1179 1181 else:
1180 1182 return revlogfilestorage
1181 1183
1182 1184
1183 1185 # List of repository interfaces and factory functions for them. Each
1184 1186 # will be called in order during ``makelocalrepository()`` to iteratively
1185 1187 # derive the final type for a local repository instance. We capture the
1186 1188 # function as a lambda so we don't hold a reference and the module-level
1187 1189 # functions can be wrapped.
1188 1190 REPO_INTERFACES = [
1189 1191 (repository.ilocalrepositorymain, lambda: makemain),
1190 1192 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1191 1193 ]
1192 1194
1193 1195
1194 1196 @interfaceutil.implementer(repository.ilocalrepositorymain)
1195 1197 class localrepository(object):
1196 1198 """Main class for representing local repositories.
1197 1199
1198 1200 All local repositories are instances of this class.
1199 1201
1200 1202 Constructed on its own, instances of this class are not usable as
1201 1203 repository objects. To obtain a usable repository object, call
1202 1204 ``hg.repository()``, ``localrepo.instance()``, or
1203 1205 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1204 1206 ``instance()`` adds support for creating new repositories.
1205 1207 ``hg.repository()`` adds more extension integration, including calling
1206 1208 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1207 1209 used.
1208 1210 """
1209 1211
1210 1212 # obsolete experimental requirements:
1211 1213 # - manifestv2: An experimental new manifest format that allowed
1212 1214 # for stem compression of long paths. Experiment ended up not
1213 1215 # being successful (repository sizes went up due to worse delta
1214 1216 # chains), and the code was deleted in 4.6.
1215 1217 supportedformats = {
1216 1218 requirementsmod.REVLOGV1_REQUIREMENT,
1217 1219 requirementsmod.GENERALDELTA_REQUIREMENT,
1218 1220 requirementsmod.TREEMANIFEST_REQUIREMENT,
1219 1221 requirementsmod.COPIESSDC_REQUIREMENT,
1220 1222 requirementsmod.REVLOGV2_REQUIREMENT,
1221 1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1222 1224 requirementsmod.NODEMAP_REQUIREMENT,
1223 1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1224 1226 requirementsmod.SHARESAFE_REQUIREMENT,
1225 1227 }
1226 1228 _basesupported = supportedformats | {
1227 1229 requirementsmod.STORE_REQUIREMENT,
1228 1230 requirementsmod.FNCACHE_REQUIREMENT,
1229 1231 requirementsmod.SHARED_REQUIREMENT,
1230 1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1231 1233 requirementsmod.DOTENCODE_REQUIREMENT,
1232 1234 requirementsmod.SPARSE_REQUIREMENT,
1233 1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1234 1236 }
1235 1237
1236 1238 # list of prefix for file which can be written without 'wlock'
1237 1239 # Extensions should extend this list when needed
1238 1240 _wlockfreeprefix = {
1239 1241 # We migh consider requiring 'wlock' for the next
1240 1242 # two, but pretty much all the existing code assume
1241 1243 # wlock is not needed so we keep them excluded for
1242 1244 # now.
1243 1245 b'hgrc',
1244 1246 b'requires',
1245 1247 # XXX cache is a complicatged business someone
1246 1248 # should investigate this in depth at some point
1247 1249 b'cache/',
1248 1250 # XXX shouldn't be dirstate covered by the wlock?
1249 1251 b'dirstate',
1250 1252 # XXX bisect was still a bit too messy at the time
1251 1253 # this changeset was introduced. Someone should fix
1252 1254 # the remainig bit and drop this line
1253 1255 b'bisect.state',
1254 1256 }
1255 1257
1256 1258 def __init__(
1257 1259 self,
1258 1260 baseui,
1259 1261 ui,
1260 1262 origroot,
1261 1263 wdirvfs,
1262 1264 hgvfs,
1263 1265 requirements,
1264 1266 supportedrequirements,
1265 1267 sharedpath,
1266 1268 store,
1267 1269 cachevfs,
1268 1270 wcachevfs,
1269 1271 features,
1270 1272 intents=None,
1271 1273 ):
1272 1274 """Create a new local repository instance.
1273 1275
1274 1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1275 1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1276 1278 object.
1277 1279
1278 1280 Arguments:
1279 1281
1280 1282 baseui
1281 1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1282 1284
1283 1285 ui
1284 1286 ``ui.ui`` instance for use by the repository.
1285 1287
1286 1288 origroot
1287 1289 ``bytes`` path to working directory root of this repository.
1288 1290
1289 1291 wdirvfs
1290 1292 ``vfs.vfs`` rooted at the working directory.
1291 1293
1292 1294 hgvfs
1293 1295 ``vfs.vfs`` rooted at .hg/
1294 1296
1295 1297 requirements
1296 1298 ``set`` of bytestrings representing repository opening requirements.
1297 1299
1298 1300 supportedrequirements
1299 1301 ``set`` of bytestrings representing repository requirements that we
1300 1302 know how to open. May be a supetset of ``requirements``.
1301 1303
1302 1304 sharedpath
1303 1305 ``bytes`` Defining path to storage base directory. Points to a
1304 1306 ``.hg/`` directory somewhere.
1305 1307
1306 1308 store
1307 1309 ``store.basicstore`` (or derived) instance providing access to
1308 1310 versioned storage.
1309 1311
1310 1312 cachevfs
1311 1313 ``vfs.vfs`` used for cache files.
1312 1314
1313 1315 wcachevfs
1314 1316 ``vfs.vfs`` used for cache files related to the working copy.
1315 1317
1316 1318 features
1317 1319 ``set`` of bytestrings defining features/capabilities of this
1318 1320 instance.
1319 1321
1320 1322 intents
1321 1323 ``set`` of system strings indicating what this repo will be used
1322 1324 for.
1323 1325 """
1324 1326 self.baseui = baseui
1325 1327 self.ui = ui
1326 1328 self.origroot = origroot
1327 1329 # vfs rooted at working directory.
1328 1330 self.wvfs = wdirvfs
1329 1331 self.root = wdirvfs.base
1330 1332 # vfs rooted at .hg/. Used to access most non-store paths.
1331 1333 self.vfs = hgvfs
1332 1334 self.path = hgvfs.base
1333 1335 self.requirements = requirements
1334 1336 self.nodeconstants = sha1nodeconstants
1335 1337 self.nullid = self.nodeconstants.nullid
1336 1338 self.supported = supportedrequirements
1337 1339 self.sharedpath = sharedpath
1338 1340 self.store = store
1339 1341 self.cachevfs = cachevfs
1340 1342 self.wcachevfs = wcachevfs
1341 1343 self.features = features
1342 1344
1343 1345 self.filtername = None
1344 1346
1345 1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1346 1348 b'devel', b'check-locks'
1347 1349 ):
1348 1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1349 1351 # A list of callback to shape the phase if no data were found.
1350 1352 # Callback are in the form: func(repo, roots) --> processed root.
1351 1353 # This list it to be filled by extension during repo setup
1352 1354 self._phasedefaults = []
1353 1355
1354 1356 color.setup(self.ui)
1355 1357
1356 1358 self.spath = self.store.path
1357 1359 self.svfs = self.store.vfs
1358 1360 self.sjoin = self.store.join
1359 1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 1362 b'devel', b'check-locks'
1361 1363 ):
1362 1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1363 1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1364 1366 else: # standard vfs
1365 1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1366 1368
1367 1369 self._dirstatevalidatewarned = False
1368 1370
1369 1371 self._branchcaches = branchmap.BranchMapCache()
1370 1372 self._revbranchcache = None
1371 1373 self._filterpats = {}
1372 1374 self._datafilters = {}
1373 1375 self._transref = self._lockref = self._wlockref = None
1374 1376
1375 1377 # A cache for various files under .hg/ that tracks file changes,
1376 1378 # (used by the filecache decorator)
1377 1379 #
1378 1380 # Maps a property name to its util.filecacheentry
1379 1381 self._filecache = {}
1380 1382
1381 1383 # hold sets of revision to be filtered
1382 1384 # should be cleared when something might have changed the filter value:
1383 1385 # - new changesets,
1384 1386 # - phase change,
1385 1387 # - new obsolescence marker,
1386 1388 # - working directory parent change,
1387 1389 # - bookmark changes
1388 1390 self.filteredrevcache = {}
1389 1391
1390 1392 # post-dirstate-status hooks
1391 1393 self._postdsstatus = []
1392 1394
1393 1395 # generic mapping between names and nodes
1394 1396 self.names = namespaces.namespaces()
1395 1397
1396 1398 # Key to signature value.
1397 1399 self._sparsesignaturecache = {}
1398 1400 # Signature to cached matcher instance.
1399 1401 self._sparsematchercache = {}
1400 1402
1401 1403 self._extrafilterid = repoview.extrafilter(ui)
1402 1404
1403 1405 self.filecopiesmode = None
1404 1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1405 1407 self.filecopiesmode = b'changeset-sidedata'
1406 1408
1407 1409 self._wanted_sidedata = set()
1408 1410 self._sidedata_computers = {}
1409 1411 sidedatamod.set_sidedata_spec_for_repo(self)
1410 1412
1411 1413 def _getvfsward(self, origfunc):
1412 1414 """build a ward for self.vfs"""
1413 1415 rref = weakref.ref(self)
1414 1416
1415 1417 def checkvfs(path, mode=None):
1416 1418 ret = origfunc(path, mode=mode)
1417 1419 repo = rref()
1418 1420 if (
1419 1421 repo is None
1420 1422 or not util.safehasattr(repo, b'_wlockref')
1421 1423 or not util.safehasattr(repo, b'_lockref')
1422 1424 ):
1423 1425 return
1424 1426 if mode in (None, b'r', b'rb'):
1425 1427 return
1426 1428 if path.startswith(repo.path):
1427 1429 # truncate name relative to the repository (.hg)
1428 1430 path = path[len(repo.path) + 1 :]
1429 1431 if path.startswith(b'cache/'):
1430 1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1431 1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1432 1434 # path prefixes covered by 'lock'
1433 1435 vfs_path_prefixes = (
1434 1436 b'journal.',
1435 1437 b'undo.',
1436 1438 b'strip-backup/',
1437 1439 b'cache/',
1438 1440 )
1439 1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1440 1442 if repo._currentlock(repo._lockref) is None:
1441 1443 repo.ui.develwarn(
1442 1444 b'write with no lock: "%s"' % path,
1443 1445 stacklevel=3,
1444 1446 config=b'check-locks',
1445 1447 )
1446 1448 elif repo._currentlock(repo._wlockref) is None:
1447 1449 # rest of vfs files are covered by 'wlock'
1448 1450 #
1449 1451 # exclude special files
1450 1452 for prefix in self._wlockfreeprefix:
1451 1453 if path.startswith(prefix):
1452 1454 return
1453 1455 repo.ui.develwarn(
1454 1456 b'write with no wlock: "%s"' % path,
1455 1457 stacklevel=3,
1456 1458 config=b'check-locks',
1457 1459 )
1458 1460 return ret
1459 1461
1460 1462 return checkvfs
1461 1463
1462 1464 def _getsvfsward(self, origfunc):
1463 1465 """build a ward for self.svfs"""
1464 1466 rref = weakref.ref(self)
1465 1467
1466 1468 def checksvfs(path, mode=None):
1467 1469 ret = origfunc(path, mode=mode)
1468 1470 repo = rref()
1469 1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1470 1472 return
1471 1473 if mode in (None, b'r', b'rb'):
1472 1474 return
1473 1475 if path.startswith(repo.sharedpath):
1474 1476 # truncate name relative to the repository (.hg)
1475 1477 path = path[len(repo.sharedpath) + 1 :]
1476 1478 if repo._currentlock(repo._lockref) is None:
1477 1479 repo.ui.develwarn(
1478 1480 b'write with no lock: "%s"' % path, stacklevel=4
1479 1481 )
1480 1482 return ret
1481 1483
1482 1484 return checksvfs
1483 1485
1484 1486 def close(self):
1485 1487 self._writecaches()
1486 1488
1487 1489 def _writecaches(self):
1488 1490 if self._revbranchcache:
1489 1491 self._revbranchcache.write()
1490 1492
1491 1493 def _restrictcapabilities(self, caps):
1492 1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1493 1495 caps = set(caps)
1494 1496 capsblob = bundle2.encodecaps(
1495 1497 bundle2.getrepocaps(self, role=b'client')
1496 1498 )
1497 1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1498 1500 if self.ui.configbool(b'experimental', b'narrow'):
1499 1501 caps.add(wireprototypes.NARROWCAP)
1500 1502 return caps
1501 1503
1502 1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1503 1505 # self -> auditor -> self._checknested -> self
1504 1506
1505 1507 @property
1506 1508 def auditor(self):
1507 1509 # This is only used by context.workingctx.match in order to
1508 1510 # detect files in subrepos.
1509 1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1510 1512
1511 1513 @property
1512 1514 def nofsauditor(self):
1513 1515 # This is only used by context.basectx.match in order to detect
1514 1516 # files in subrepos.
1515 1517 return pathutil.pathauditor(
1516 1518 self.root, callback=self._checknested, realfs=False, cached=True
1517 1519 )
1518 1520
1519 1521 def _checknested(self, path):
1520 1522 """Determine if path is a legal nested repository."""
1521 1523 if not path.startswith(self.root):
1522 1524 return False
1523 1525 subpath = path[len(self.root) + 1 :]
1524 1526 normsubpath = util.pconvert(subpath)
1525 1527
1526 1528 # XXX: Checking against the current working copy is wrong in
1527 1529 # the sense that it can reject things like
1528 1530 #
1529 1531 # $ hg cat -r 10 sub/x.txt
1530 1532 #
1531 1533 # if sub/ is no longer a subrepository in the working copy
1532 1534 # parent revision.
1533 1535 #
1534 1536 # However, it can of course also allow things that would have
1535 1537 # been rejected before, such as the above cat command if sub/
1536 1538 # is a subrepository now, but was a normal directory before.
1537 1539 # The old path auditor would have rejected by mistake since it
1538 1540 # panics when it sees sub/.hg/.
1539 1541 #
1540 1542 # All in all, checking against the working copy seems sensible
1541 1543 # since we want to prevent access to nested repositories on
1542 1544 # the filesystem *now*.
1543 1545 ctx = self[None]
1544 1546 parts = util.splitpath(subpath)
1545 1547 while parts:
1546 1548 prefix = b'/'.join(parts)
1547 1549 if prefix in ctx.substate:
1548 1550 if prefix == normsubpath:
1549 1551 return True
1550 1552 else:
1551 1553 sub = ctx.sub(prefix)
1552 1554 return sub.checknested(subpath[len(prefix) + 1 :])
1553 1555 else:
1554 1556 parts.pop()
1555 1557 return False
1556 1558
1557 1559 def peer(self):
1558 1560 return localpeer(self) # not cached to avoid reference cycle
1559 1561
1560 1562 def unfiltered(self):
1561 1563 """Return unfiltered version of the repository
1562 1564
1563 1565 Intended to be overwritten by filtered repo."""
1564 1566 return self
1565 1567
1566 1568 def filtered(self, name, visibilityexceptions=None):
1567 1569 """Return a filtered version of a repository
1568 1570
1569 1571 The `name` parameter is the identifier of the requested view. This
1570 1572 will return a repoview object set "exactly" to the specified view.
1571 1573
1572 1574 This function does not apply recursive filtering to a repository. For
1573 1575 example calling `repo.filtered("served")` will return a repoview using
1574 1576 the "served" view, regardless of the initial view used by `repo`.
1575 1577
1576 1578 In other word, there is always only one level of `repoview` "filtering".
1577 1579 """
1578 1580 if self._extrafilterid is not None and b'%' not in name:
1579 1581 name = name + b'%' + self._extrafilterid
1580 1582
1581 1583 cls = repoview.newtype(self.unfiltered().__class__)
1582 1584 return cls(self, name, visibilityexceptions)
1583 1585
1584 1586 @mixedrepostorecache(
1585 1587 (b'bookmarks', b'plain'),
1586 1588 (b'bookmarks.current', b'plain'),
1587 1589 (b'bookmarks', b''),
1588 1590 (b'00changelog.i', b''),
1589 1591 )
1590 1592 def _bookmarks(self):
1591 1593 # Since the multiple files involved in the transaction cannot be
1592 1594 # written atomically (with current repository format), there is a race
1593 1595 # condition here.
1594 1596 #
1595 1597 # 1) changelog content A is read
1596 1598 # 2) outside transaction update changelog to content B
1597 1599 # 3) outside transaction update bookmark file referring to content B
1598 1600 # 4) bookmarks file content is read and filtered against changelog-A
1599 1601 #
1600 1602 # When this happens, bookmarks against nodes missing from A are dropped.
1601 1603 #
1602 1604 # Having this happening during read is not great, but it become worse
1603 1605 # when this happen during write because the bookmarks to the "unknown"
1604 1606 # nodes will be dropped for good. However, writes happen within locks.
1605 1607 # This locking makes it possible to have a race free consistent read.
1606 1608 # For this purpose data read from disc before locking are
1607 1609 # "invalidated" right after the locks are taken. This invalidations are
1608 1610 # "light", the `filecache` mechanism keep the data in memory and will
1609 1611 # reuse them if the underlying files did not changed. Not parsing the
1610 1612 # same data multiple times helps performances.
1611 1613 #
1612 1614 # Unfortunately in the case describe above, the files tracked by the
1613 1615 # bookmarks file cache might not have changed, but the in-memory
1614 1616 # content is still "wrong" because we used an older changelog content
1615 1617 # to process the on-disk data. So after locking, the changelog would be
1616 1618 # refreshed but `_bookmarks` would be preserved.
1617 1619 # Adding `00changelog.i` to the list of tracked file is not
1618 1620 # enough, because at the time we build the content for `_bookmarks` in
1619 1621 # (4), the changelog file has already diverged from the content used
1620 1622 # for loading `changelog` in (1)
1621 1623 #
1622 1624 # To prevent the issue, we force the changelog to be explicitly
1623 1625 # reloaded while computing `_bookmarks`. The data race can still happen
1624 1626 # without the lock (with a narrower window), but it would no longer go
1625 1627 # undetected during the lock time refresh.
1626 1628 #
1627 1629 # The new schedule is as follow
1628 1630 #
1629 1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1630 1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1631 1633 # 3) We force `changelog` filecache to be tested
1632 1634 # 4) cachestat for `changelog` are captured (for changelog)
1633 1635 # 5) `_bookmarks` is computed and cached
1634 1636 #
1635 1637 # The step in (3) ensure we have a changelog at least as recent as the
1636 1638 # cache stat computed in (1). As a result at locking time:
1637 1639 # * if the changelog did not changed since (1) -> we can reuse the data
1638 1640 # * otherwise -> the bookmarks get refreshed.
1639 1641 self._refreshchangelog()
1640 1642 return bookmarks.bmstore(self)
1641 1643
1642 1644 def _refreshchangelog(self):
1643 1645 """make sure the in memory changelog match the on-disk one"""
1644 1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1645 1647 del self.changelog
1646 1648
1647 1649 @property
1648 1650 def _activebookmark(self):
1649 1651 return self._bookmarks.active
1650 1652
1651 1653 # _phasesets depend on changelog. what we need is to call
1652 1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1653 1655 # can't be easily expressed in filecache mechanism.
1654 1656 @storecache(b'phaseroots', b'00changelog.i')
1655 1657 def _phasecache(self):
1656 1658 return phases.phasecache(self, self._phasedefaults)
1657 1659
1658 1660 @storecache(b'obsstore')
1659 1661 def obsstore(self):
1660 1662 return obsolete.makestore(self.ui, self)
1661 1663
1662 1664 @storecache(b'00changelog.i')
1663 1665 def changelog(self):
1664 1666 # load dirstate before changelog to avoid race see issue6303
1665 1667 self.dirstate.prefetch_parents()
1666 1668 return self.store.changelog(
1667 1669 txnutil.mayhavepending(self.root),
1668 1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1669 1671 )
1670 1672
1671 1673 @storecache(b'00manifest.i')
1672 1674 def manifestlog(self):
1673 1675 return self.store.manifestlog(self, self._storenarrowmatch)
1674 1676
1675 1677 @repofilecache(b'dirstate')
1676 1678 def dirstate(self):
1677 1679 return self._makedirstate()
1678 1680
1679 1681 def _makedirstate(self):
1680 1682 """Extension point for wrapping the dirstate per-repo."""
1681 1683 sparsematchfn = lambda: sparse.matcher(self)
1682 1684
1683 1685 return dirstate.dirstate(
1684 1686 self.vfs,
1685 1687 self.ui,
1686 1688 self.root,
1687 1689 self._dirstatevalidate,
1688 1690 sparsematchfn,
1689 1691 self.nodeconstants,
1690 1692 )
1691 1693
1692 1694 def _dirstatevalidate(self, node):
1693 1695 try:
1694 1696 self.changelog.rev(node)
1695 1697 return node
1696 1698 except error.LookupError:
1697 1699 if not self._dirstatevalidatewarned:
1698 1700 self._dirstatevalidatewarned = True
1699 1701 self.ui.warn(
1700 1702 _(b"warning: ignoring unknown working parent %s!\n")
1701 1703 % short(node)
1702 1704 )
1703 1705 return self.nullid
1704 1706
1705 1707 @storecache(narrowspec.FILENAME)
1706 1708 def narrowpats(self):
1707 1709 """matcher patterns for this repository's narrowspec
1708 1710
1709 1711 A tuple of (includes, excludes).
1710 1712 """
1711 1713 return narrowspec.load(self)
1712 1714
1713 1715 @storecache(narrowspec.FILENAME)
1714 1716 def _storenarrowmatch(self):
1715 1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1716 1718 return matchmod.always()
1717 1719 include, exclude = self.narrowpats
1718 1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1719 1721
1720 1722 @storecache(narrowspec.FILENAME)
1721 1723 def _narrowmatch(self):
1722 1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 1725 return matchmod.always()
1724 1726 narrowspec.checkworkingcopynarrowspec(self)
1725 1727 include, exclude = self.narrowpats
1726 1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1727 1729
1728 1730 def narrowmatch(self, match=None, includeexact=False):
1729 1731 """matcher corresponding the the repo's narrowspec
1730 1732
1731 1733 If `match` is given, then that will be intersected with the narrow
1732 1734 matcher.
1733 1735
1734 1736 If `includeexact` is True, then any exact matches from `match` will
1735 1737 be included even if they're outside the narrowspec.
1736 1738 """
1737 1739 if match:
1738 1740 if includeexact and not self._narrowmatch.always():
1739 1741 # do not exclude explicitly-specified paths so that they can
1740 1742 # be warned later on
1741 1743 em = matchmod.exact(match.files())
1742 1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1743 1745 return matchmod.intersectmatchers(match, nm)
1744 1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1745 1747 return self._narrowmatch
1746 1748
1747 1749 def setnarrowpats(self, newincludes, newexcludes):
1748 1750 narrowspec.save(self, newincludes, newexcludes)
1749 1751 self.invalidate(clearfilecache=True)
1750 1752
1751 1753 @unfilteredpropertycache
1752 1754 def _quick_access_changeid_null(self):
1753 1755 return {
1754 1756 b'null': (nullrev, self.nodeconstants.nullid),
1755 1757 nullrev: (nullrev, self.nodeconstants.nullid),
1756 1758 self.nullid: (nullrev, self.nullid),
1757 1759 }
1758 1760
1759 1761 @unfilteredpropertycache
1760 1762 def _quick_access_changeid_wc(self):
1761 1763 # also fast path access to the working copy parents
1762 1764 # however, only do it for filter that ensure wc is visible.
1763 1765 quick = self._quick_access_changeid_null.copy()
1764 1766 cl = self.unfiltered().changelog
1765 1767 for node in self.dirstate.parents():
1766 1768 if node == self.nullid:
1767 1769 continue
1768 1770 rev = cl.index.get_rev(node)
1769 1771 if rev is None:
1770 1772 # unknown working copy parent case:
1771 1773 #
1772 1774 # skip the fast path and let higher code deal with it
1773 1775 continue
1774 1776 pair = (rev, node)
1775 1777 quick[rev] = pair
1776 1778 quick[node] = pair
1777 1779 # also add the parents of the parents
1778 1780 for r in cl.parentrevs(rev):
1779 1781 if r == nullrev:
1780 1782 continue
1781 1783 n = cl.node(r)
1782 1784 pair = (r, n)
1783 1785 quick[r] = pair
1784 1786 quick[n] = pair
1785 1787 p1node = self.dirstate.p1()
1786 1788 if p1node != self.nullid:
1787 1789 quick[b'.'] = quick[p1node]
1788 1790 return quick
1789 1791
1790 1792 @unfilteredmethod
1791 1793 def _quick_access_changeid_invalidate(self):
1792 1794 if '_quick_access_changeid_wc' in vars(self):
1793 1795 del self.__dict__['_quick_access_changeid_wc']
1794 1796
1795 1797 @property
1796 1798 def _quick_access_changeid(self):
1797 1799 """an helper dictionnary for __getitem__ calls
1798 1800
1799 1801 This contains a list of symbol we can recognise right away without
1800 1802 further processing.
1801 1803 """
1802 1804 if self.filtername in repoview.filter_has_wc:
1803 1805 return self._quick_access_changeid_wc
1804 1806 return self._quick_access_changeid_null
1805 1807
1806 1808 def __getitem__(self, changeid):
1807 1809 # dealing with special cases
1808 1810 if changeid is None:
1809 1811 return context.workingctx(self)
1810 1812 if isinstance(changeid, context.basectx):
1811 1813 return changeid
1812 1814
1813 1815 # dealing with multiple revisions
1814 1816 if isinstance(changeid, slice):
1815 1817 # wdirrev isn't contiguous so the slice shouldn't include it
1816 1818 return [
1817 1819 self[i]
1818 1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1819 1821 if i not in self.changelog.filteredrevs
1820 1822 ]
1821 1823
1822 1824 # dealing with some special values
1823 1825 quick_access = self._quick_access_changeid.get(changeid)
1824 1826 if quick_access is not None:
1825 1827 rev, node = quick_access
1826 1828 return context.changectx(self, rev, node, maybe_filtered=False)
1827 1829 if changeid == b'tip':
1828 1830 node = self.changelog.tip()
1829 1831 rev = self.changelog.rev(node)
1830 1832 return context.changectx(self, rev, node)
1831 1833
1832 1834 # dealing with arbitrary values
1833 1835 try:
1834 1836 if isinstance(changeid, int):
1835 1837 node = self.changelog.node(changeid)
1836 1838 rev = changeid
1837 1839 elif changeid == b'.':
1838 1840 # this is a hack to delay/avoid loading obsmarkers
1839 1841 # when we know that '.' won't be hidden
1840 1842 node = self.dirstate.p1()
1841 1843 rev = self.unfiltered().changelog.rev(node)
1842 1844 elif len(changeid) == self.nodeconstants.nodelen:
1843 1845 try:
1844 1846 node = changeid
1845 1847 rev = self.changelog.rev(changeid)
1846 1848 except error.FilteredLookupError:
1847 1849 changeid = hex(changeid) # for the error message
1848 1850 raise
1849 1851 except LookupError:
1850 1852 # check if it might have come from damaged dirstate
1851 1853 #
1852 1854 # XXX we could avoid the unfiltered if we had a recognizable
1853 1855 # exception for filtered changeset access
1854 1856 if (
1855 1857 self.local()
1856 1858 and changeid in self.unfiltered().dirstate.parents()
1857 1859 ):
1858 1860 msg = _(b"working directory has unknown parent '%s'!")
1859 1861 raise error.Abort(msg % short(changeid))
1860 1862 changeid = hex(changeid) # for the error message
1861 1863 raise
1862 1864
1863 1865 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1864 1866 node = bin(changeid)
1865 1867 rev = self.changelog.rev(node)
1866 1868 else:
1867 1869 raise error.ProgrammingError(
1868 1870 b"unsupported changeid '%s' of type %s"
1869 1871 % (changeid, pycompat.bytestr(type(changeid)))
1870 1872 )
1871 1873
1872 1874 return context.changectx(self, rev, node)
1873 1875
1874 1876 except (error.FilteredIndexError, error.FilteredLookupError):
1875 1877 raise error.FilteredRepoLookupError(
1876 1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1877 1879 )
1878 1880 except (IndexError, LookupError):
1879 1881 raise error.RepoLookupError(
1880 1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1881 1883 )
1882 1884 except error.WdirUnsupported:
1883 1885 return context.workingctx(self)
1884 1886
1885 1887 def __contains__(self, changeid):
1886 1888 """True if the given changeid exists"""
1887 1889 try:
1888 1890 self[changeid]
1889 1891 return True
1890 1892 except error.RepoLookupError:
1891 1893 return False
1892 1894
1893 1895 def __nonzero__(self):
1894 1896 return True
1895 1897
1896 1898 __bool__ = __nonzero__
1897 1899
1898 1900 def __len__(self):
1899 1901 # no need to pay the cost of repoview.changelog
1900 1902 unfi = self.unfiltered()
1901 1903 return len(unfi.changelog)
1902 1904
1903 1905 def __iter__(self):
1904 1906 return iter(self.changelog)
1905 1907
1906 1908 def revs(self, expr, *args):
1907 1909 """Find revisions matching a revset.
1908 1910
1909 1911 The revset is specified as a string ``expr`` that may contain
1910 1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1911 1913
1912 1914 Revset aliases from the configuration are not expanded. To expand
1913 1915 user aliases, consider calling ``scmutil.revrange()`` or
1914 1916 ``repo.anyrevs([expr], user=True)``.
1915 1917
1916 1918 Returns a smartset.abstractsmartset, which is a list-like interface
1917 1919 that contains integer revisions.
1918 1920 """
1919 1921 tree = revsetlang.spectree(expr, *args)
1920 1922 return revset.makematcher(tree)(self)
1921 1923
1922 1924 def set(self, expr, *args):
1923 1925 """Find revisions matching a revset and emit changectx instances.
1924 1926
1925 1927 This is a convenience wrapper around ``revs()`` that iterates the
1926 1928 result and is a generator of changectx instances.
1927 1929
1928 1930 Revset aliases from the configuration are not expanded. To expand
1929 1931 user aliases, consider calling ``scmutil.revrange()``.
1930 1932 """
1931 1933 for r in self.revs(expr, *args):
1932 1934 yield self[r]
1933 1935
1934 1936 def anyrevs(self, specs, user=False, localalias=None):
1935 1937 """Find revisions matching one of the given revsets.
1936 1938
1937 1939 Revset aliases from the configuration are not expanded by default. To
1938 1940 expand user aliases, specify ``user=True``. To provide some local
1939 1941 definitions overriding user aliases, set ``localalias`` to
1940 1942 ``{name: definitionstring}``.
1941 1943 """
1942 1944 if specs == [b'null']:
1943 1945 return revset.baseset([nullrev])
1944 1946 if specs == [b'.']:
1945 1947 quick_data = self._quick_access_changeid.get(b'.')
1946 1948 if quick_data is not None:
1947 1949 return revset.baseset([quick_data[0]])
1948 1950 if user:
1949 1951 m = revset.matchany(
1950 1952 self.ui,
1951 1953 specs,
1952 1954 lookup=revset.lookupfn(self),
1953 1955 localalias=localalias,
1954 1956 )
1955 1957 else:
1956 1958 m = revset.matchany(None, specs, localalias=localalias)
1957 1959 return m(self)
1958 1960
1959 1961 def url(self):
1960 1962 return b'file:' + self.root
1961 1963
1962 1964 def hook(self, name, throw=False, **args):
1963 1965 """Call a hook, passing this repo instance.
1964 1966
1965 1967 This a convenience method to aid invoking hooks. Extensions likely
1966 1968 won't call this unless they have registered a custom hook or are
1967 1969 replacing code that is expected to call a hook.
1968 1970 """
1969 1971 return hook.hook(self.ui, self, name, throw, **args)
1970 1972
1971 1973 @filteredpropertycache
1972 1974 def _tagscache(self):
1973 1975 """Returns a tagscache object that contains various tags related
1974 1976 caches."""
1975 1977
1976 1978 # This simplifies its cache management by having one decorated
1977 1979 # function (this one) and the rest simply fetch things from it.
1978 1980 class tagscache(object):
1979 1981 def __init__(self):
1980 1982 # These two define the set of tags for this repository. tags
1981 1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1982 1984 # 'local'. (Global tags are defined by .hgtags across all
1983 1985 # heads, and local tags are defined in .hg/localtags.)
1984 1986 # They constitute the in-memory cache of tags.
1985 1987 self.tags = self.tagtypes = None
1986 1988
1987 1989 self.nodetagscache = self.tagslist = None
1988 1990
1989 1991 cache = tagscache()
1990 1992 cache.tags, cache.tagtypes = self._findtags()
1991 1993
1992 1994 return cache
1993 1995
1994 1996 def tags(self):
1995 1997 '''return a mapping of tag to node'''
1996 1998 t = {}
1997 1999 if self.changelog.filteredrevs:
1998 2000 tags, tt = self._findtags()
1999 2001 else:
2000 2002 tags = self._tagscache.tags
2001 2003 rev = self.changelog.rev
2002 2004 for k, v in pycompat.iteritems(tags):
2003 2005 try:
2004 2006 # ignore tags to unknown nodes
2005 2007 rev(v)
2006 2008 t[k] = v
2007 2009 except (error.LookupError, ValueError):
2008 2010 pass
2009 2011 return t
2010 2012
2011 2013 def _findtags(self):
2012 2014 """Do the hard work of finding tags. Return a pair of dicts
2013 2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2014 2016 maps tag name to a string like \'global\' or \'local\'.
2015 2017 Subclasses or extensions are free to add their own tags, but
2016 2018 should be aware that the returned dicts will be retained for the
2017 2019 duration of the localrepo object."""
2018 2020
2019 2021 # XXX what tagtype should subclasses/extensions use? Currently
2020 2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2021 2023 # Should each extension invent its own tag type? Should there
2022 2024 # be one tagtype for all such "virtual" tags? Or is the status
2023 2025 # quo fine?
2024 2026
2025 2027 # map tag name to (node, hist)
2026 2028 alltags = tagsmod.findglobaltags(self.ui, self)
2027 2029 # map tag name to tag type
2028 2030 tagtypes = {tag: b'global' for tag in alltags}
2029 2031
2030 2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2031 2033
2032 2034 # Build the return dicts. Have to re-encode tag names because
2033 2035 # the tags module always uses UTF-8 (in order not to lose info
2034 2036 # writing to the cache), but the rest of Mercurial wants them in
2035 2037 # local encoding.
2036 2038 tags = {}
2037 2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2038 2040 if node != self.nullid:
2039 2041 tags[encoding.tolocal(name)] = node
2040 2042 tags[b'tip'] = self.changelog.tip()
2041 2043 tagtypes = {
2042 2044 encoding.tolocal(name): value
2043 2045 for (name, value) in pycompat.iteritems(tagtypes)
2044 2046 }
2045 2047 return (tags, tagtypes)
2046 2048
2047 2049 def tagtype(self, tagname):
2048 2050 """
2049 2051 return the type of the given tag. result can be:
2050 2052
2051 2053 'local' : a local tag
2052 2054 'global' : a global tag
2053 2055 None : tag does not exist
2054 2056 """
2055 2057
2056 2058 return self._tagscache.tagtypes.get(tagname)
2057 2059
2058 2060 def tagslist(self):
2059 2061 '''return a list of tags ordered by revision'''
2060 2062 if not self._tagscache.tagslist:
2061 2063 l = []
2062 2064 for t, n in pycompat.iteritems(self.tags()):
2063 2065 l.append((self.changelog.rev(n), t, n))
2064 2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2065 2067
2066 2068 return self._tagscache.tagslist
2067 2069
2068 2070 def nodetags(self, node):
2069 2071 '''return the tags associated with a node'''
2070 2072 if not self._tagscache.nodetagscache:
2071 2073 nodetagscache = {}
2072 2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2073 2075 nodetagscache.setdefault(n, []).append(t)
2074 2076 for tags in pycompat.itervalues(nodetagscache):
2075 2077 tags.sort()
2076 2078 self._tagscache.nodetagscache = nodetagscache
2077 2079 return self._tagscache.nodetagscache.get(node, [])
2078 2080
2079 2081 def nodebookmarks(self, node):
2080 2082 """return the list of bookmarks pointing to the specified node"""
2081 2083 return self._bookmarks.names(node)
2082 2084
2083 2085 def branchmap(self):
2084 2086 """returns a dictionary {branch: [branchheads]} with branchheads
2085 2087 ordered by increasing revision number"""
2086 2088 return self._branchcaches[self]
2087 2089
2088 2090 @unfilteredmethod
2089 2091 def revbranchcache(self):
2090 2092 if not self._revbranchcache:
2091 2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2092 2094 return self._revbranchcache
2093 2095
2094 2096 def register_changeset(self, rev, changelogrevision):
2095 2097 self.revbranchcache().setdata(rev, changelogrevision)
2096 2098
2097 2099 def branchtip(self, branch, ignoremissing=False):
2098 2100 """return the tip node for a given branch
2099 2101
2100 2102 If ignoremissing is True, then this method will not raise an error.
2101 2103 This is helpful for callers that only expect None for a missing branch
2102 2104 (e.g. namespace).
2103 2105
2104 2106 """
2105 2107 try:
2106 2108 return self.branchmap().branchtip(branch)
2107 2109 except KeyError:
2108 2110 if not ignoremissing:
2109 2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2110 2112 else:
2111 2113 pass
2112 2114
2113 2115 def lookup(self, key):
2114 2116 node = scmutil.revsymbol(self, key).node()
2115 2117 if node is None:
2116 2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2117 2119 return node
2118 2120
2119 2121 def lookupbranch(self, key):
2120 2122 if self.branchmap().hasbranch(key):
2121 2123 return key
2122 2124
2123 2125 return scmutil.revsymbol(self, key).branch()
2124 2126
2125 2127 def known(self, nodes):
2126 2128 cl = self.changelog
2127 2129 get_rev = cl.index.get_rev
2128 2130 filtered = cl.filteredrevs
2129 2131 result = []
2130 2132 for n in nodes:
2131 2133 r = get_rev(n)
2132 2134 resp = not (r is None or r in filtered)
2133 2135 result.append(resp)
2134 2136 return result
2135 2137
2136 2138 def local(self):
2137 2139 return self
2138 2140
2139 2141 def publishing(self):
2140 2142 # it's safe (and desirable) to trust the publish flag unconditionally
2141 2143 # so that we don't finalize changes shared between users via ssh or nfs
2142 2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2143 2145
2144 2146 def cancopy(self):
2145 2147 # so statichttprepo's override of local() works
2146 2148 if not self.local():
2147 2149 return False
2148 2150 if not self.publishing():
2149 2151 return True
2150 2152 # if publishing we can't copy if there is filtered content
2151 2153 return not self.filtered(b'visible').changelog.filteredrevs
2152 2154
2153 2155 def shared(self):
2154 2156 '''the type of shared repository (None if not shared)'''
2155 2157 if self.sharedpath != self.path:
2156 2158 return b'store'
2157 2159 return None
2158 2160
2159 2161 def wjoin(self, f, *insidef):
2160 2162 return self.vfs.reljoin(self.root, f, *insidef)
2161 2163
2162 2164 def setparents(self, p1, p2=None):
2163 2165 if p2 is None:
2164 2166 p2 = self.nullid
2165 2167 self[None].setparents(p1, p2)
2166 2168 self._quick_access_changeid_invalidate()
2167 2169
2168 2170 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2169 2171 """changeid must be a changeset revision, if specified.
2170 2172 fileid can be a file revision or node."""
2171 2173 return context.filectx(
2172 2174 self, path, changeid, fileid, changectx=changectx
2173 2175 )
2174 2176
2175 2177 def getcwd(self):
2176 2178 return self.dirstate.getcwd()
2177 2179
2178 2180 def pathto(self, f, cwd=None):
2179 2181 return self.dirstate.pathto(f, cwd)
2180 2182
2181 2183 def _loadfilter(self, filter):
2182 2184 if filter not in self._filterpats:
2183 2185 l = []
2184 2186 for pat, cmd in self.ui.configitems(filter):
2185 2187 if cmd == b'!':
2186 2188 continue
2187 2189 mf = matchmod.match(self.root, b'', [pat])
2188 2190 fn = None
2189 2191 params = cmd
2190 2192 for name, filterfn in pycompat.iteritems(self._datafilters):
2191 2193 if cmd.startswith(name):
2192 2194 fn = filterfn
2193 2195 params = cmd[len(name) :].lstrip()
2194 2196 break
2195 2197 if not fn:
2196 2198 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2197 2199 fn.__name__ = 'commandfilter'
2198 2200 # Wrap old filters not supporting keyword arguments
2199 2201 if not pycompat.getargspec(fn)[2]:
2200 2202 oldfn = fn
2201 2203 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2202 2204 fn.__name__ = 'compat-' + oldfn.__name__
2203 2205 l.append((mf, fn, params))
2204 2206 self._filterpats[filter] = l
2205 2207 return self._filterpats[filter]
2206 2208
2207 2209 def _filter(self, filterpats, filename, data):
2208 2210 for mf, fn, cmd in filterpats:
2209 2211 if mf(filename):
2210 2212 self.ui.debug(
2211 2213 b"filtering %s through %s\n"
2212 2214 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2213 2215 )
2214 2216 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2215 2217 break
2216 2218
2217 2219 return data
2218 2220
2219 2221 @unfilteredpropertycache
2220 2222 def _encodefilterpats(self):
2221 2223 return self._loadfilter(b'encode')
2222 2224
2223 2225 @unfilteredpropertycache
2224 2226 def _decodefilterpats(self):
2225 2227 return self._loadfilter(b'decode')
2226 2228
2227 2229 def adddatafilter(self, name, filter):
2228 2230 self._datafilters[name] = filter
2229 2231
2230 2232 def wread(self, filename):
2231 2233 if self.wvfs.islink(filename):
2232 2234 data = self.wvfs.readlink(filename)
2233 2235 else:
2234 2236 data = self.wvfs.read(filename)
2235 2237 return self._filter(self._encodefilterpats, filename, data)
2236 2238
2237 2239 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2238 2240 """write ``data`` into ``filename`` in the working directory
2239 2241
2240 2242 This returns length of written (maybe decoded) data.
2241 2243 """
2242 2244 data = self._filter(self._decodefilterpats, filename, data)
2243 2245 if b'l' in flags:
2244 2246 self.wvfs.symlink(data, filename)
2245 2247 else:
2246 2248 self.wvfs.write(
2247 2249 filename, data, backgroundclose=backgroundclose, **kwargs
2248 2250 )
2249 2251 if b'x' in flags:
2250 2252 self.wvfs.setflags(filename, False, True)
2251 2253 else:
2252 2254 self.wvfs.setflags(filename, False, False)
2253 2255 return len(data)
2254 2256
2255 2257 def wwritedata(self, filename, data):
2256 2258 return self._filter(self._decodefilterpats, filename, data)
2257 2259
2258 2260 def currenttransaction(self):
2259 2261 """return the current transaction or None if non exists"""
2260 2262 if self._transref:
2261 2263 tr = self._transref()
2262 2264 else:
2263 2265 tr = None
2264 2266
2265 2267 if tr and tr.running():
2266 2268 return tr
2267 2269 return None
2268 2270
2269 2271 def transaction(self, desc, report=None):
2270 2272 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2271 2273 b'devel', b'check-locks'
2272 2274 ):
2273 2275 if self._currentlock(self._lockref) is None:
2274 2276 raise error.ProgrammingError(b'transaction requires locking')
2275 2277 tr = self.currenttransaction()
2276 2278 if tr is not None:
2277 2279 return tr.nest(name=desc)
2278 2280
2279 2281 # abort here if the journal already exists
2280 2282 if self.svfs.exists(b"journal"):
2281 2283 raise error.RepoError(
2282 2284 _(b"abandoned transaction found"),
2283 2285 hint=_(b"run 'hg recover' to clean up transaction"),
2284 2286 )
2285 2287
2286 2288 idbase = b"%.40f#%f" % (random.random(), time.time())
2287 2289 ha = hex(hashutil.sha1(idbase).digest())
2288 2290 txnid = b'TXN:' + ha
2289 2291 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2290 2292
2291 2293 self._writejournal(desc)
2292 2294 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2293 2295 if report:
2294 2296 rp = report
2295 2297 else:
2296 2298 rp = self.ui.warn
2297 2299 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2298 2300 # we must avoid cyclic reference between repo and transaction.
2299 2301 reporef = weakref.ref(self)
2300 2302 # Code to track tag movement
2301 2303 #
2302 2304 # Since tags are all handled as file content, it is actually quite hard
2303 2305 # to track these movement from a code perspective. So we fallback to a
2304 2306 # tracking at the repository level. One could envision to track changes
2305 2307 # to the '.hgtags' file through changegroup apply but that fails to
2306 2308 # cope with case where transaction expose new heads without changegroup
2307 2309 # being involved (eg: phase movement).
2308 2310 #
2309 2311 # For now, We gate the feature behind a flag since this likely comes
2310 2312 # with performance impacts. The current code run more often than needed
2311 2313 # and do not use caches as much as it could. The current focus is on
2312 2314 # the behavior of the feature so we disable it by default. The flag
2313 2315 # will be removed when we are happy with the performance impact.
2314 2316 #
2315 2317 # Once this feature is no longer experimental move the following
2316 2318 # documentation to the appropriate help section:
2317 2319 #
2318 2320 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2319 2321 # tags (new or changed or deleted tags). In addition the details of
2320 2322 # these changes are made available in a file at:
2321 2323 # ``REPOROOT/.hg/changes/tags.changes``.
2322 2324 # Make sure you check for HG_TAG_MOVED before reading that file as it
2323 2325 # might exist from a previous transaction even if no tag were touched
2324 2326 # in this one. Changes are recorded in a line base format::
2325 2327 #
2326 2328 # <action> <hex-node> <tag-name>\n
2327 2329 #
2328 2330 # Actions are defined as follow:
2329 2331 # "-R": tag is removed,
2330 2332 # "+A": tag is added,
2331 2333 # "-M": tag is moved (old value),
2332 2334 # "+M": tag is moved (new value),
2333 2335 tracktags = lambda x: None
2334 2336 # experimental config: experimental.hook-track-tags
2335 2337 shouldtracktags = self.ui.configbool(
2336 2338 b'experimental', b'hook-track-tags'
2337 2339 )
2338 2340 if desc != b'strip' and shouldtracktags:
2339 2341 oldheads = self.changelog.headrevs()
2340 2342
2341 2343 def tracktags(tr2):
2342 2344 repo = reporef()
2343 2345 assert repo is not None # help pytype
2344 2346 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2345 2347 newheads = repo.changelog.headrevs()
2346 2348 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2347 2349 # notes: we compare lists here.
2348 2350 # As we do it only once buiding set would not be cheaper
2349 2351 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2350 2352 if changes:
2351 2353 tr2.hookargs[b'tag_moved'] = b'1'
2352 2354 with repo.vfs(
2353 2355 b'changes/tags.changes', b'w', atomictemp=True
2354 2356 ) as changesfile:
2355 2357 # note: we do not register the file to the transaction
2356 2358 # because we needs it to still exist on the transaction
2357 2359 # is close (for txnclose hooks)
2358 2360 tagsmod.writediff(changesfile, changes)
2359 2361
2360 2362 def validate(tr2):
2361 2363 """will run pre-closing hooks"""
2362 2364 # XXX the transaction API is a bit lacking here so we take a hacky
2363 2365 # path for now
2364 2366 #
2365 2367 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2366 2368 # dict is copied before these run. In addition we needs the data
2367 2369 # available to in memory hooks too.
2368 2370 #
2369 2371 # Moreover, we also need to make sure this runs before txnclose
2370 2372 # hooks and there is no "pending" mechanism that would execute
2371 2373 # logic only if hooks are about to run.
2372 2374 #
2373 2375 # Fixing this limitation of the transaction is also needed to track
2374 2376 # other families of changes (bookmarks, phases, obsolescence).
2375 2377 #
2376 2378 # This will have to be fixed before we remove the experimental
2377 2379 # gating.
2378 2380 tracktags(tr2)
2379 2381 repo = reporef()
2380 2382 assert repo is not None # help pytype
2381 2383
2382 2384 singleheadopt = (b'experimental', b'single-head-per-branch')
2383 2385 singlehead = repo.ui.configbool(*singleheadopt)
2384 2386 if singlehead:
2385 2387 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2386 2388 accountclosed = singleheadsub.get(
2387 2389 b"account-closed-heads", False
2388 2390 )
2389 2391 if singleheadsub.get(b"public-changes-only", False):
2390 2392 filtername = b"immutable"
2391 2393 else:
2392 2394 filtername = b"visible"
2393 2395 scmutil.enforcesinglehead(
2394 2396 repo, tr2, desc, accountclosed, filtername
2395 2397 )
2396 2398 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2397 2399 for name, (old, new) in sorted(
2398 2400 tr.changes[b'bookmarks'].items()
2399 2401 ):
2400 2402 args = tr.hookargs.copy()
2401 2403 args.update(bookmarks.preparehookargs(name, old, new))
2402 2404 repo.hook(
2403 2405 b'pretxnclose-bookmark',
2404 2406 throw=True,
2405 2407 **pycompat.strkwargs(args)
2406 2408 )
2407 2409 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2408 2410 cl = repo.unfiltered().changelog
2409 2411 for revs, (old, new) in tr.changes[b'phases']:
2410 2412 for rev in revs:
2411 2413 args = tr.hookargs.copy()
2412 2414 node = hex(cl.node(rev))
2413 2415 args.update(phases.preparehookargs(node, old, new))
2414 2416 repo.hook(
2415 2417 b'pretxnclose-phase',
2416 2418 throw=True,
2417 2419 **pycompat.strkwargs(args)
2418 2420 )
2419 2421
2420 2422 repo.hook(
2421 2423 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2422 2424 )
2423 2425
2424 2426 def releasefn(tr, success):
2425 2427 repo = reporef()
2426 2428 if repo is None:
2427 2429 # If the repo has been GC'd (and this release function is being
2428 2430 # called from transaction.__del__), there's not much we can do,
2429 2431 # so just leave the unfinished transaction there and let the
2430 2432 # user run `hg recover`.
2431 2433 return
2432 2434 if success:
2433 2435 # this should be explicitly invoked here, because
2434 2436 # in-memory changes aren't written out at closing
2435 2437 # transaction, if tr.addfilegenerator (via
2436 2438 # dirstate.write or so) isn't invoked while
2437 2439 # transaction running
2438 2440 repo.dirstate.write(None)
2439 2441 else:
2440 2442 # discard all changes (including ones already written
2441 2443 # out) in this transaction
2442 2444 narrowspec.restorebackup(self, b'journal.narrowspec')
2443 2445 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2444 2446 repo.dirstate.restorebackup(None, b'journal.dirstate')
2445 2447
2446 2448 repo.invalidate(clearfilecache=True)
2447 2449
2448 2450 tr = transaction.transaction(
2449 2451 rp,
2450 2452 self.svfs,
2451 2453 vfsmap,
2452 2454 b"journal",
2453 2455 b"undo",
2454 2456 aftertrans(renames),
2455 2457 self.store.createmode,
2456 2458 validator=validate,
2457 2459 releasefn=releasefn,
2458 2460 checkambigfiles=_cachedfiles,
2459 2461 name=desc,
2460 2462 )
2461 2463 tr.changes[b'origrepolen'] = len(self)
2462 2464 tr.changes[b'obsmarkers'] = set()
2463 2465 tr.changes[b'phases'] = []
2464 2466 tr.changes[b'bookmarks'] = {}
2465 2467
2466 2468 tr.hookargs[b'txnid'] = txnid
2467 2469 tr.hookargs[b'txnname'] = desc
2468 2470 tr.hookargs[b'changes'] = tr.changes
2469 2471 # note: writing the fncache only during finalize mean that the file is
2470 2472 # outdated when running hooks. As fncache is used for streaming clone,
2471 2473 # this is not expected to break anything that happen during the hooks.
2472 2474 tr.addfinalize(b'flush-fncache', self.store.write)
2473 2475
2474 2476 def txnclosehook(tr2):
2475 2477 """To be run if transaction is successful, will schedule a hook run"""
2476 2478 # Don't reference tr2 in hook() so we don't hold a reference.
2477 2479 # This reduces memory consumption when there are multiple
2478 2480 # transactions per lock. This can likely go away if issue5045
2479 2481 # fixes the function accumulation.
2480 2482 hookargs = tr2.hookargs
2481 2483
2482 2484 def hookfunc(unused_success):
2483 2485 repo = reporef()
2484 2486 assert repo is not None # help pytype
2485 2487
2486 2488 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2487 2489 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2488 2490 for name, (old, new) in bmchanges:
2489 2491 args = tr.hookargs.copy()
2490 2492 args.update(bookmarks.preparehookargs(name, old, new))
2491 2493 repo.hook(
2492 2494 b'txnclose-bookmark',
2493 2495 throw=False,
2494 2496 **pycompat.strkwargs(args)
2495 2497 )
2496 2498
2497 2499 if hook.hashook(repo.ui, b'txnclose-phase'):
2498 2500 cl = repo.unfiltered().changelog
2499 2501 phasemv = sorted(
2500 2502 tr.changes[b'phases'], key=lambda r: r[0][0]
2501 2503 )
2502 2504 for revs, (old, new) in phasemv:
2503 2505 for rev in revs:
2504 2506 args = tr.hookargs.copy()
2505 2507 node = hex(cl.node(rev))
2506 2508 args.update(phases.preparehookargs(node, old, new))
2507 2509 repo.hook(
2508 2510 b'txnclose-phase',
2509 2511 throw=False,
2510 2512 **pycompat.strkwargs(args)
2511 2513 )
2512 2514
2513 2515 repo.hook(
2514 2516 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2515 2517 )
2516 2518
2517 2519 repo = reporef()
2518 2520 assert repo is not None # help pytype
2519 2521 repo._afterlock(hookfunc)
2520 2522
2521 2523 tr.addfinalize(b'txnclose-hook', txnclosehook)
2522 2524 # Include a leading "-" to make it happen before the transaction summary
2523 2525 # reports registered via scmutil.registersummarycallback() whose names
2524 2526 # are 00-txnreport etc. That way, the caches will be warm when the
2525 2527 # callbacks run.
2526 2528 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2527 2529
2528 2530 def txnaborthook(tr2):
2529 2531 """To be run if transaction is aborted"""
2530 2532 repo = reporef()
2531 2533 assert repo is not None # help pytype
2532 2534 repo.hook(
2533 2535 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2534 2536 )
2535 2537
2536 2538 tr.addabort(b'txnabort-hook', txnaborthook)
2537 2539 # avoid eager cache invalidation. in-memory data should be identical
2538 2540 # to stored data if transaction has no error.
2539 2541 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2540 2542 self._transref = weakref.ref(tr)
2541 2543 scmutil.registersummarycallback(self, tr, desc)
2542 2544 return tr
2543 2545
2544 2546 def _journalfiles(self):
2545 2547 return (
2546 2548 (self.svfs, b'journal'),
2547 2549 (self.svfs, b'journal.narrowspec'),
2548 2550 (self.vfs, b'journal.narrowspec.dirstate'),
2549 2551 (self.vfs, b'journal.dirstate'),
2550 2552 (self.vfs, b'journal.branch'),
2551 2553 (self.vfs, b'journal.desc'),
2552 2554 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2553 2555 (self.svfs, b'journal.phaseroots'),
2554 2556 )
2555 2557
2556 2558 def undofiles(self):
2557 2559 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2558 2560
2559 2561 @unfilteredmethod
2560 2562 def _writejournal(self, desc):
2561 2563 self.dirstate.savebackup(None, b'journal.dirstate')
2562 2564 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2563 2565 narrowspec.savebackup(self, b'journal.narrowspec')
2564 2566 self.vfs.write(
2565 2567 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2566 2568 )
2567 2569 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2568 2570 bookmarksvfs = bookmarks.bookmarksvfs(self)
2569 2571 bookmarksvfs.write(
2570 2572 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2571 2573 )
2572 2574 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2573 2575
2574 2576 def recover(self):
2575 2577 with self.lock():
2576 2578 if self.svfs.exists(b"journal"):
2577 2579 self.ui.status(_(b"rolling back interrupted transaction\n"))
2578 2580 vfsmap = {
2579 2581 b'': self.svfs,
2580 2582 b'plain': self.vfs,
2581 2583 }
2582 2584 transaction.rollback(
2583 2585 self.svfs,
2584 2586 vfsmap,
2585 2587 b"journal",
2586 2588 self.ui.warn,
2587 2589 checkambigfiles=_cachedfiles,
2588 2590 )
2589 2591 self.invalidate()
2590 2592 return True
2591 2593 else:
2592 2594 self.ui.warn(_(b"no interrupted transaction available\n"))
2593 2595 return False
2594 2596
2595 2597 def rollback(self, dryrun=False, force=False):
2596 2598 wlock = lock = dsguard = None
2597 2599 try:
2598 2600 wlock = self.wlock()
2599 2601 lock = self.lock()
2600 2602 if self.svfs.exists(b"undo"):
2601 2603 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2602 2604
2603 2605 return self._rollback(dryrun, force, dsguard)
2604 2606 else:
2605 2607 self.ui.warn(_(b"no rollback information available\n"))
2606 2608 return 1
2607 2609 finally:
2608 2610 release(dsguard, lock, wlock)
2609 2611
2610 2612 @unfilteredmethod # Until we get smarter cache management
2611 2613 def _rollback(self, dryrun, force, dsguard):
2612 2614 ui = self.ui
2613 2615 try:
2614 2616 args = self.vfs.read(b'undo.desc').splitlines()
2615 2617 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2616 2618 if len(args) >= 3:
2617 2619 detail = args[2]
2618 2620 oldtip = oldlen - 1
2619 2621
2620 2622 if detail and ui.verbose:
2621 2623 msg = _(
2622 2624 b'repository tip rolled back to revision %d'
2623 2625 b' (undo %s: %s)\n'
2624 2626 ) % (oldtip, desc, detail)
2625 2627 else:
2626 2628 msg = _(
2627 2629 b'repository tip rolled back to revision %d (undo %s)\n'
2628 2630 ) % (oldtip, desc)
2629 2631 except IOError:
2630 2632 msg = _(b'rolling back unknown transaction\n')
2631 2633 desc = None
2632 2634
2633 2635 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2634 2636 raise error.Abort(
2635 2637 _(
2636 2638 b'rollback of last commit while not checked out '
2637 2639 b'may lose data'
2638 2640 ),
2639 2641 hint=_(b'use -f to force'),
2640 2642 )
2641 2643
2642 2644 ui.status(msg)
2643 2645 if dryrun:
2644 2646 return 0
2645 2647
2646 2648 parents = self.dirstate.parents()
2647 2649 self.destroying()
2648 2650 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2649 2651 transaction.rollback(
2650 2652 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2651 2653 )
2652 2654 bookmarksvfs = bookmarks.bookmarksvfs(self)
2653 2655 if bookmarksvfs.exists(b'undo.bookmarks'):
2654 2656 bookmarksvfs.rename(
2655 2657 b'undo.bookmarks', b'bookmarks', checkambig=True
2656 2658 )
2657 2659 if self.svfs.exists(b'undo.phaseroots'):
2658 2660 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2659 2661 self.invalidate()
2660 2662
2661 2663 has_node = self.changelog.index.has_node
2662 2664 parentgone = any(not has_node(p) for p in parents)
2663 2665 if parentgone:
2664 2666 # prevent dirstateguard from overwriting already restored one
2665 2667 dsguard.close()
2666 2668
2667 2669 narrowspec.restorebackup(self, b'undo.narrowspec')
2668 2670 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2669 2671 self.dirstate.restorebackup(None, b'undo.dirstate')
2670 2672 try:
2671 2673 branch = self.vfs.read(b'undo.branch')
2672 2674 self.dirstate.setbranch(encoding.tolocal(branch))
2673 2675 except IOError:
2674 2676 ui.warn(
2675 2677 _(
2676 2678 b'named branch could not be reset: '
2677 2679 b'current branch is still \'%s\'\n'
2678 2680 )
2679 2681 % self.dirstate.branch()
2680 2682 )
2681 2683
2682 2684 parents = tuple([p.rev() for p in self[None].parents()])
2683 2685 if len(parents) > 1:
2684 2686 ui.status(
2685 2687 _(
2686 2688 b'working directory now based on '
2687 2689 b'revisions %d and %d\n'
2688 2690 )
2689 2691 % parents
2690 2692 )
2691 2693 else:
2692 2694 ui.status(
2693 2695 _(b'working directory now based on revision %d\n') % parents
2694 2696 )
2695 2697 mergestatemod.mergestate.clean(self)
2696 2698
2697 2699 # TODO: if we know which new heads may result from this rollback, pass
2698 2700 # them to destroy(), which will prevent the branchhead cache from being
2699 2701 # invalidated.
2700 2702 self.destroyed()
2701 2703 return 0
2702 2704
2703 2705 def _buildcacheupdater(self, newtransaction):
2704 2706 """called during transaction to build the callback updating cache
2705 2707
2706 2708 Lives on the repository to help extension who might want to augment
2707 2709 this logic. For this purpose, the created transaction is passed to the
2708 2710 method.
2709 2711 """
2710 2712 # we must avoid cyclic reference between repo and transaction.
2711 2713 reporef = weakref.ref(self)
2712 2714
2713 2715 def updater(tr):
2714 2716 repo = reporef()
2715 2717 assert repo is not None # help pytype
2716 2718 repo.updatecaches(tr)
2717 2719
2718 2720 return updater
2719 2721
2720 2722 @unfilteredmethod
2721 2723 def updatecaches(self, tr=None, full=False):
2722 2724 """warm appropriate caches
2723 2725
2724 2726 If this function is called after a transaction closed. The transaction
2725 2727 will be available in the 'tr' argument. This can be used to selectively
2726 2728 update caches relevant to the changes in that transaction.
2727 2729
2728 2730 If 'full' is set, make sure all caches the function knows about have
2729 2731 up-to-date data. Even the ones usually loaded more lazily.
2730 2732
2731 2733 The `full` argument can take a special "post-clone" value. In this case
2732 2734 the cache warming is made after a clone and of the slower cache might
2733 2735 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2734 2736 as we plan for a cleaner way to deal with this for 5.9.
2735 2737 """
2736 2738 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2737 2739 # During strip, many caches are invalid but
2738 2740 # later call to `destroyed` will refresh them.
2739 2741 return
2740 2742
2741 2743 if tr is None or tr.changes[b'origrepolen'] < len(self):
2742 2744 # accessing the 'served' branchmap should refresh all the others,
2743 2745 self.ui.debug(b'updating the branch cache\n')
2744 2746 self.filtered(b'served').branchmap()
2745 2747 self.filtered(b'served.hidden').branchmap()
2746 2748
2747 2749 if full:
2748 2750 unfi = self.unfiltered()
2749 2751
2750 2752 self.changelog.update_caches(transaction=tr)
2751 2753 self.manifestlog.update_caches(transaction=tr)
2752 2754
2753 2755 rbc = unfi.revbranchcache()
2754 2756 for r in unfi.changelog:
2755 2757 rbc.branchinfo(r)
2756 2758 rbc.write()
2757 2759
2758 2760 # ensure the working copy parents are in the manifestfulltextcache
2759 2761 for ctx in self[b'.'].parents():
2760 2762 ctx.manifest() # accessing the manifest is enough
2761 2763
2762 2764 if not full == b"post-clone":
2763 2765 # accessing fnode cache warms the cache
2764 2766 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2765 2767 # accessing tags warm the cache
2766 2768 self.tags()
2767 2769 self.filtered(b'served').tags()
2768 2770
2769 2771 # The `full` arg is documented as updating even the lazily-loaded
2770 2772 # caches immediately, so we're forcing a write to cause these caches
2771 2773 # to be warmed up even if they haven't explicitly been requested
2772 2774 # yet (if they've never been used by hg, they won't ever have been
2773 2775 # written, even if they're a subset of another kind of cache that
2774 2776 # *has* been used).
2775 2777 for filt in repoview.filtertable.keys():
2776 2778 filtered = self.filtered(filt)
2777 2779 filtered.branchmap().write(filtered)
2778 2780
2779 2781 def invalidatecaches(self):
2780 2782
2781 2783 if '_tagscache' in vars(self):
2782 2784 # can't use delattr on proxy
2783 2785 del self.__dict__['_tagscache']
2784 2786
2785 2787 self._branchcaches.clear()
2786 2788 self.invalidatevolatilesets()
2787 2789 self._sparsesignaturecache.clear()
2788 2790
2789 2791 def invalidatevolatilesets(self):
2790 2792 self.filteredrevcache.clear()
2791 2793 obsolete.clearobscaches(self)
2792 2794 self._quick_access_changeid_invalidate()
2793 2795
2794 2796 def invalidatedirstate(self):
2795 2797 """Invalidates the dirstate, causing the next call to dirstate
2796 2798 to check if it was modified since the last time it was read,
2797 2799 rereading it if it has.
2798 2800
2799 2801 This is different to dirstate.invalidate() that it doesn't always
2800 2802 rereads the dirstate. Use dirstate.invalidate() if you want to
2801 2803 explicitly read the dirstate again (i.e. restoring it to a previous
2802 2804 known good state)."""
2803 2805 if hasunfilteredcache(self, 'dirstate'):
2804 2806 for k in self.dirstate._filecache:
2805 2807 try:
2806 2808 delattr(self.dirstate, k)
2807 2809 except AttributeError:
2808 2810 pass
2809 2811 delattr(self.unfiltered(), 'dirstate')
2810 2812
2811 2813 def invalidate(self, clearfilecache=False):
2812 2814 """Invalidates both store and non-store parts other than dirstate
2813 2815
2814 2816 If a transaction is running, invalidation of store is omitted,
2815 2817 because discarding in-memory changes might cause inconsistency
2816 2818 (e.g. incomplete fncache causes unintentional failure, but
2817 2819 redundant one doesn't).
2818 2820 """
2819 2821 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2820 2822 for k in list(self._filecache.keys()):
2821 2823 # dirstate is invalidated separately in invalidatedirstate()
2822 2824 if k == b'dirstate':
2823 2825 continue
2824 2826 if (
2825 2827 k == b'changelog'
2826 2828 and self.currenttransaction()
2827 2829 and self.changelog._delayed
2828 2830 ):
2829 2831 # The changelog object may store unwritten revisions. We don't
2830 2832 # want to lose them.
2831 2833 # TODO: Solve the problem instead of working around it.
2832 2834 continue
2833 2835
2834 2836 if clearfilecache:
2835 2837 del self._filecache[k]
2836 2838 try:
2837 2839 delattr(unfiltered, k)
2838 2840 except AttributeError:
2839 2841 pass
2840 2842 self.invalidatecaches()
2841 2843 if not self.currenttransaction():
2842 2844 # TODO: Changing contents of store outside transaction
2843 2845 # causes inconsistency. We should make in-memory store
2844 2846 # changes detectable, and abort if changed.
2845 2847 self.store.invalidatecaches()
2846 2848
2847 2849 def invalidateall(self):
2848 2850 """Fully invalidates both store and non-store parts, causing the
2849 2851 subsequent operation to reread any outside changes."""
2850 2852 # extension should hook this to invalidate its caches
2851 2853 self.invalidate()
2852 2854 self.invalidatedirstate()
2853 2855
2854 2856 @unfilteredmethod
2855 2857 def _refreshfilecachestats(self, tr):
2856 2858 """Reload stats of cached files so that they are flagged as valid"""
2857 2859 for k, ce in self._filecache.items():
2858 2860 k = pycompat.sysstr(k)
2859 2861 if k == 'dirstate' or k not in self.__dict__:
2860 2862 continue
2861 2863 ce.refresh()
2862 2864
2863 2865 def _lock(
2864 2866 self,
2865 2867 vfs,
2866 2868 lockname,
2867 2869 wait,
2868 2870 releasefn,
2869 2871 acquirefn,
2870 2872 desc,
2871 2873 ):
2872 2874 timeout = 0
2873 2875 warntimeout = 0
2874 2876 if wait:
2875 2877 timeout = self.ui.configint(b"ui", b"timeout")
2876 2878 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2877 2879 # internal config: ui.signal-safe-lock
2878 2880 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2879 2881
2880 2882 l = lockmod.trylock(
2881 2883 self.ui,
2882 2884 vfs,
2883 2885 lockname,
2884 2886 timeout,
2885 2887 warntimeout,
2886 2888 releasefn=releasefn,
2887 2889 acquirefn=acquirefn,
2888 2890 desc=desc,
2889 2891 signalsafe=signalsafe,
2890 2892 )
2891 2893 return l
2892 2894
2893 2895 def _afterlock(self, callback):
2894 2896 """add a callback to be run when the repository is fully unlocked
2895 2897
2896 2898 The callback will be executed when the outermost lock is released
2897 2899 (with wlock being higher level than 'lock')."""
2898 2900 for ref in (self._wlockref, self._lockref):
2899 2901 l = ref and ref()
2900 2902 if l and l.held:
2901 2903 l.postrelease.append(callback)
2902 2904 break
2903 2905 else: # no lock have been found.
2904 2906 callback(True)
2905 2907
2906 2908 def lock(self, wait=True):
2907 2909 """Lock the repository store (.hg/store) and return a weak reference
2908 2910 to the lock. Use this before modifying the store (e.g. committing or
2909 2911 stripping). If you are opening a transaction, get a lock as well.)
2910 2912
2911 2913 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2912 2914 'wlock' first to avoid a dead-lock hazard."""
2913 2915 l = self._currentlock(self._lockref)
2914 2916 if l is not None:
2915 2917 l.lock()
2916 2918 return l
2917 2919
2918 2920 l = self._lock(
2919 2921 vfs=self.svfs,
2920 2922 lockname=b"lock",
2921 2923 wait=wait,
2922 2924 releasefn=None,
2923 2925 acquirefn=self.invalidate,
2924 2926 desc=_(b'repository %s') % self.origroot,
2925 2927 )
2926 2928 self._lockref = weakref.ref(l)
2927 2929 return l
2928 2930
2929 2931 def wlock(self, wait=True):
2930 2932 """Lock the non-store parts of the repository (everything under
2931 2933 .hg except .hg/store) and return a weak reference to the lock.
2932 2934
2933 2935 Use this before modifying files in .hg.
2934 2936
2935 2937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2936 2938 'wlock' first to avoid a dead-lock hazard."""
2937 2939 l = self._wlockref() if self._wlockref else None
2938 2940 if l is not None and l.held:
2939 2941 l.lock()
2940 2942 return l
2941 2943
2942 2944 # We do not need to check for non-waiting lock acquisition. Such
2943 2945 # acquisition would not cause dead-lock as they would just fail.
2944 2946 if wait and (
2945 2947 self.ui.configbool(b'devel', b'all-warnings')
2946 2948 or self.ui.configbool(b'devel', b'check-locks')
2947 2949 ):
2948 2950 if self._currentlock(self._lockref) is not None:
2949 2951 self.ui.develwarn(b'"wlock" acquired after "lock"')
2950 2952
2951 2953 def unlock():
2952 2954 if self.dirstate.pendingparentchange():
2953 2955 self.dirstate.invalidate()
2954 2956 else:
2955 2957 self.dirstate.write(None)
2956 2958
2957 2959 self._filecache[b'dirstate'].refresh()
2958 2960
2959 2961 l = self._lock(
2960 2962 self.vfs,
2961 2963 b"wlock",
2962 2964 wait,
2963 2965 unlock,
2964 2966 self.invalidatedirstate,
2965 2967 _(b'working directory of %s') % self.origroot,
2966 2968 )
2967 2969 self._wlockref = weakref.ref(l)
2968 2970 return l
2969 2971
2970 2972 def _currentlock(self, lockref):
2971 2973 """Returns the lock if it's held, or None if it's not."""
2972 2974 if lockref is None:
2973 2975 return None
2974 2976 l = lockref()
2975 2977 if l is None or not l.held:
2976 2978 return None
2977 2979 return l
2978 2980
2979 2981 def currentwlock(self):
2980 2982 """Returns the wlock if it's held, or None if it's not."""
2981 2983 return self._currentlock(self._wlockref)
2982 2984
2983 2985 def checkcommitpatterns(self, wctx, match, status, fail):
2984 2986 """check for commit arguments that aren't committable"""
2985 2987 if match.isexact() or match.prefix():
2986 2988 matched = set(status.modified + status.added + status.removed)
2987 2989
2988 2990 for f in match.files():
2989 2991 f = self.dirstate.normalize(f)
2990 2992 if f == b'.' or f in matched or f in wctx.substate:
2991 2993 continue
2992 2994 if f in status.deleted:
2993 2995 fail(f, _(b'file not found!'))
2994 2996 # Is it a directory that exists or used to exist?
2995 2997 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2996 2998 d = f + b'/'
2997 2999 for mf in matched:
2998 3000 if mf.startswith(d):
2999 3001 break
3000 3002 else:
3001 3003 fail(f, _(b"no match under directory!"))
3002 3004 elif f not in self.dirstate:
3003 3005 fail(f, _(b"file not tracked!"))
3004 3006
3005 3007 @unfilteredmethod
3006 3008 def commit(
3007 3009 self,
3008 3010 text=b"",
3009 3011 user=None,
3010 3012 date=None,
3011 3013 match=None,
3012 3014 force=False,
3013 3015 editor=None,
3014 3016 extra=None,
3015 3017 ):
3016 3018 """Add a new revision to current repository.
3017 3019
3018 3020 Revision information is gathered from the working directory,
3019 3021 match can be used to filter the committed files. If editor is
3020 3022 supplied, it is called to get a commit message.
3021 3023 """
3022 3024 if extra is None:
3023 3025 extra = {}
3024 3026
3025 3027 def fail(f, msg):
3026 3028 raise error.InputError(b'%s: %s' % (f, msg))
3027 3029
3028 3030 if not match:
3029 3031 match = matchmod.always()
3030 3032
3031 3033 if not force:
3032 3034 match.bad = fail
3033 3035
3034 3036 # lock() for recent changelog (see issue4368)
3035 3037 with self.wlock(), self.lock():
3036 3038 wctx = self[None]
3037 3039 merge = len(wctx.parents()) > 1
3038 3040
3039 3041 if not force and merge and not match.always():
3040 3042 raise error.Abort(
3041 3043 _(
3042 3044 b'cannot partially commit a merge '
3043 3045 b'(do not specify files or patterns)'
3044 3046 )
3045 3047 )
3046 3048
3047 3049 status = self.status(match=match, clean=force)
3048 3050 if force:
3049 3051 status.modified.extend(
3050 3052 status.clean
3051 3053 ) # mq may commit clean files
3052 3054
3053 3055 # check subrepos
3054 3056 subs, commitsubs, newstate = subrepoutil.precommit(
3055 3057 self.ui, wctx, status, match, force=force
3056 3058 )
3057 3059
3058 3060 # make sure all explicit patterns are matched
3059 3061 if not force:
3060 3062 self.checkcommitpatterns(wctx, match, status, fail)
3061 3063
3062 3064 cctx = context.workingcommitctx(
3063 3065 self, status, text, user, date, extra
3064 3066 )
3065 3067
3066 3068 ms = mergestatemod.mergestate.read(self)
3067 3069 mergeutil.checkunresolved(ms)
3068 3070
3069 3071 # internal config: ui.allowemptycommit
3070 3072 if cctx.isempty() and not self.ui.configbool(
3071 3073 b'ui', b'allowemptycommit'
3072 3074 ):
3073 3075 self.ui.debug(b'nothing to commit, clearing merge state\n')
3074 3076 ms.reset()
3075 3077 return None
3076 3078
3077 3079 if merge and cctx.deleted():
3078 3080 raise error.Abort(_(b"cannot commit merge with missing files"))
3079 3081
3080 3082 if editor:
3081 3083 cctx._text = editor(self, cctx, subs)
3082 3084 edited = text != cctx._text
3083 3085
3084 3086 # Save commit message in case this transaction gets rolled back
3085 3087 # (e.g. by a pretxncommit hook). Leave the content alone on
3086 3088 # the assumption that the user will use the same editor again.
3087 3089 msgfn = self.savecommitmessage(cctx._text)
3088 3090
3089 3091 # commit subs and write new state
3090 3092 if subs:
3091 3093 uipathfn = scmutil.getuipathfn(self)
3092 3094 for s in sorted(commitsubs):
3093 3095 sub = wctx.sub(s)
3094 3096 self.ui.status(
3095 3097 _(b'committing subrepository %s\n')
3096 3098 % uipathfn(subrepoutil.subrelpath(sub))
3097 3099 )
3098 3100 sr = sub.commit(cctx._text, user, date)
3099 3101 newstate[s] = (newstate[s][0], sr)
3100 3102 subrepoutil.writestate(self, newstate)
3101 3103
3102 3104 p1, p2 = self.dirstate.parents()
3103 3105 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3104 3106 try:
3105 3107 self.hook(
3106 3108 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3107 3109 )
3108 3110 with self.transaction(b'commit'):
3109 3111 ret = self.commitctx(cctx, True)
3110 3112 # update bookmarks, dirstate and mergestate
3111 3113 bookmarks.update(self, [p1, p2], ret)
3112 3114 cctx.markcommitted(ret)
3113 3115 ms.reset()
3114 3116 except: # re-raises
3115 3117 if edited:
3116 3118 self.ui.write(
3117 3119 _(b'note: commit message saved in %s\n') % msgfn
3118 3120 )
3119 3121 self.ui.write(
3120 3122 _(
3121 3123 b"note: use 'hg commit --logfile "
3122 3124 b".hg/last-message.txt --edit' to reuse it\n"
3123 3125 )
3124 3126 )
3125 3127 raise
3126 3128
3127 3129 def commithook(unused_success):
3128 3130 # hack for command that use a temporary commit (eg: histedit)
3129 3131 # temporary commit got stripped before hook release
3130 3132 if self.changelog.hasnode(ret):
3131 3133 self.hook(
3132 3134 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3133 3135 )
3134 3136
3135 3137 self._afterlock(commithook)
3136 3138 return ret
3137 3139
3138 3140 @unfilteredmethod
3139 3141 def commitctx(self, ctx, error=False, origctx=None):
3140 3142 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3141 3143
3142 3144 @unfilteredmethod
3143 3145 def destroying(self):
3144 3146 """Inform the repository that nodes are about to be destroyed.
3145 3147 Intended for use by strip and rollback, so there's a common
3146 3148 place for anything that has to be done before destroying history.
3147 3149
3148 3150 This is mostly useful for saving state that is in memory and waiting
3149 3151 to be flushed when the current lock is released. Because a call to
3150 3152 destroyed is imminent, the repo will be invalidated causing those
3151 3153 changes to stay in memory (waiting for the next unlock), or vanish
3152 3154 completely.
3153 3155 """
3154 3156 # When using the same lock to commit and strip, the phasecache is left
3155 3157 # dirty after committing. Then when we strip, the repo is invalidated,
3156 3158 # causing those changes to disappear.
3157 3159 if '_phasecache' in vars(self):
3158 3160 self._phasecache.write()
3159 3161
3160 3162 @unfilteredmethod
3161 3163 def destroyed(self):
3162 3164 """Inform the repository that nodes have been destroyed.
3163 3165 Intended for use by strip and rollback, so there's a common
3164 3166 place for anything that has to be done after destroying history.
3165 3167 """
3166 3168 # When one tries to:
3167 3169 # 1) destroy nodes thus calling this method (e.g. strip)
3168 3170 # 2) use phasecache somewhere (e.g. commit)
3169 3171 #
3170 3172 # then 2) will fail because the phasecache contains nodes that were
3171 3173 # removed. We can either remove phasecache from the filecache,
3172 3174 # causing it to reload next time it is accessed, or simply filter
3173 3175 # the removed nodes now and write the updated cache.
3174 3176 self._phasecache.filterunknown(self)
3175 3177 self._phasecache.write()
3176 3178
3177 3179 # refresh all repository caches
3178 3180 self.updatecaches()
3179 3181
3180 3182 # Ensure the persistent tag cache is updated. Doing it now
3181 3183 # means that the tag cache only has to worry about destroyed
3182 3184 # heads immediately after a strip/rollback. That in turn
3183 3185 # guarantees that "cachetip == currenttip" (comparing both rev
3184 3186 # and node) always means no nodes have been added or destroyed.
3185 3187
3186 3188 # XXX this is suboptimal when qrefresh'ing: we strip the current
3187 3189 # head, refresh the tag cache, then immediately add a new head.
3188 3190 # But I think doing it this way is necessary for the "instant
3189 3191 # tag cache retrieval" case to work.
3190 3192 self.invalidate()
3191 3193
3192 3194 def status(
3193 3195 self,
3194 3196 node1=b'.',
3195 3197 node2=None,
3196 3198 match=None,
3197 3199 ignored=False,
3198 3200 clean=False,
3199 3201 unknown=False,
3200 3202 listsubrepos=False,
3201 3203 ):
3202 3204 '''a convenience method that calls node1.status(node2)'''
3203 3205 return self[node1].status(
3204 3206 node2, match, ignored, clean, unknown, listsubrepos
3205 3207 )
3206 3208
3207 3209 def addpostdsstatus(self, ps):
3208 3210 """Add a callback to run within the wlock, at the point at which status
3209 3211 fixups happen.
3210 3212
3211 3213 On status completion, callback(wctx, status) will be called with the
3212 3214 wlock held, unless the dirstate has changed from underneath or the wlock
3213 3215 couldn't be grabbed.
3214 3216
3215 3217 Callbacks should not capture and use a cached copy of the dirstate --
3216 3218 it might change in the meanwhile. Instead, they should access the
3217 3219 dirstate via wctx.repo().dirstate.
3218 3220
3219 3221 This list is emptied out after each status run -- extensions should
3220 3222 make sure it adds to this list each time dirstate.status is called.
3221 3223 Extensions should also make sure they don't call this for statuses
3222 3224 that don't involve the dirstate.
3223 3225 """
3224 3226
3225 3227 # The list is located here for uniqueness reasons -- it is actually
3226 3228 # managed by the workingctx, but that isn't unique per-repo.
3227 3229 self._postdsstatus.append(ps)
3228 3230
3229 3231 def postdsstatus(self):
3230 3232 """Used by workingctx to get the list of post-dirstate-status hooks."""
3231 3233 return self._postdsstatus
3232 3234
3233 3235 def clearpostdsstatus(self):
3234 3236 """Used by workingctx to clear post-dirstate-status hooks."""
3235 3237 del self._postdsstatus[:]
3236 3238
3237 3239 def heads(self, start=None):
3238 3240 if start is None:
3239 3241 cl = self.changelog
3240 3242 headrevs = reversed(cl.headrevs())
3241 3243 return [cl.node(rev) for rev in headrevs]
3242 3244
3243 3245 heads = self.changelog.heads(start)
3244 3246 # sort the output in rev descending order
3245 3247 return sorted(heads, key=self.changelog.rev, reverse=True)
3246 3248
3247 3249 def branchheads(self, branch=None, start=None, closed=False):
3248 3250 """return a (possibly filtered) list of heads for the given branch
3249 3251
3250 3252 Heads are returned in topological order, from newest to oldest.
3251 3253 If branch is None, use the dirstate branch.
3252 3254 If start is not None, return only heads reachable from start.
3253 3255 If closed is True, return heads that are marked as closed as well.
3254 3256 """
3255 3257 if branch is None:
3256 3258 branch = self[None].branch()
3257 3259 branches = self.branchmap()
3258 3260 if not branches.hasbranch(branch):
3259 3261 return []
3260 3262 # the cache returns heads ordered lowest to highest
3261 3263 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3262 3264 if start is not None:
3263 3265 # filter out the heads that cannot be reached from startrev
3264 3266 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3265 3267 bheads = [h for h in bheads if h in fbheads]
3266 3268 return bheads
3267 3269
3268 3270 def branches(self, nodes):
3269 3271 if not nodes:
3270 3272 nodes = [self.changelog.tip()]
3271 3273 b = []
3272 3274 for n in nodes:
3273 3275 t = n
3274 3276 while True:
3275 3277 p = self.changelog.parents(n)
3276 3278 if p[1] != self.nullid or p[0] == self.nullid:
3277 3279 b.append((t, n, p[0], p[1]))
3278 3280 break
3279 3281 n = p[0]
3280 3282 return b
3281 3283
3282 3284 def between(self, pairs):
3283 3285 r = []
3284 3286
3285 3287 for top, bottom in pairs:
3286 3288 n, l, i = top, [], 0
3287 3289 f = 1
3288 3290
3289 3291 while n != bottom and n != self.nullid:
3290 3292 p = self.changelog.parents(n)[0]
3291 3293 if i == f:
3292 3294 l.append(n)
3293 3295 f = f * 2
3294 3296 n = p
3295 3297 i += 1
3296 3298
3297 3299 r.append(l)
3298 3300
3299 3301 return r
3300 3302
3301 3303 def checkpush(self, pushop):
3302 3304 """Extensions can override this function if additional checks have
3303 3305 to be performed before pushing, or call it if they override push
3304 3306 command.
3305 3307 """
3306 3308
3307 3309 @unfilteredpropertycache
3308 3310 def prepushoutgoinghooks(self):
3309 3311 """Return util.hooks consists of a pushop with repo, remote, outgoing
3310 3312 methods, which are called before pushing changesets.
3311 3313 """
3312 3314 return util.hooks()
3313 3315
3314 3316 def pushkey(self, namespace, key, old, new):
3315 3317 try:
3316 3318 tr = self.currenttransaction()
3317 3319 hookargs = {}
3318 3320 if tr is not None:
3319 3321 hookargs.update(tr.hookargs)
3320 3322 hookargs = pycompat.strkwargs(hookargs)
3321 3323 hookargs['namespace'] = namespace
3322 3324 hookargs['key'] = key
3323 3325 hookargs['old'] = old
3324 3326 hookargs['new'] = new
3325 3327 self.hook(b'prepushkey', throw=True, **hookargs)
3326 3328 except error.HookAbort as exc:
3327 3329 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3328 3330 if exc.hint:
3329 3331 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3330 3332 return False
3331 3333 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3332 3334 ret = pushkey.push(self, namespace, key, old, new)
3333 3335
3334 3336 def runhook(unused_success):
3335 3337 self.hook(
3336 3338 b'pushkey',
3337 3339 namespace=namespace,
3338 3340 key=key,
3339 3341 old=old,
3340 3342 new=new,
3341 3343 ret=ret,
3342 3344 )
3343 3345
3344 3346 self._afterlock(runhook)
3345 3347 return ret
3346 3348
3347 3349 def listkeys(self, namespace):
3348 3350 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3349 3351 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3350 3352 values = pushkey.list(self, namespace)
3351 3353 self.hook(b'listkeys', namespace=namespace, values=values)
3352 3354 return values
3353 3355
3354 3356 def debugwireargs(self, one, two, three=None, four=None, five=None):
3355 3357 '''used to test argument passing over the wire'''
3356 3358 return b"%s %s %s %s %s" % (
3357 3359 one,
3358 3360 two,
3359 3361 pycompat.bytestr(three),
3360 3362 pycompat.bytestr(four),
3361 3363 pycompat.bytestr(five),
3362 3364 )
3363 3365
3364 3366 def savecommitmessage(self, text):
3365 3367 fp = self.vfs(b'last-message.txt', b'wb')
3366 3368 try:
3367 3369 fp.write(text)
3368 3370 finally:
3369 3371 fp.close()
3370 3372 return self.pathto(fp.name[len(self.root) + 1 :])
3371 3373
3372 3374 def register_wanted_sidedata(self, category):
3373 3375 if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
3374 3376 # Only revlogv2 repos can want sidedata.
3375 3377 return
3376 3378 self._wanted_sidedata.add(pycompat.bytestr(category))
3377 3379
3378 3380 def register_sidedata_computer(
3379 3381 self, kind, category, keys, computer, flags, replace=False
3380 3382 ):
3381 3383 if kind not in revlogconst.ALL_KINDS:
3382 3384 msg = _(b"unexpected revlog kind '%s'.")
3383 3385 raise error.ProgrammingError(msg % kind)
3384 3386 category = pycompat.bytestr(category)
3385 3387 already_registered = category in self._sidedata_computers.get(kind, [])
3386 3388 if already_registered and not replace:
3387 3389 msg = _(
3388 3390 b"cannot register a sidedata computer twice for category '%s'."
3389 3391 )
3390 3392 raise error.ProgrammingError(msg % category)
3391 3393 if replace and not already_registered:
3392 3394 msg = _(
3393 3395 b"cannot replace a sidedata computer that isn't registered "
3394 3396 b"for category '%s'."
3395 3397 )
3396 3398 raise error.ProgrammingError(msg % category)
3397 3399 self._sidedata_computers.setdefault(kind, {})
3398 3400 self._sidedata_computers[kind][category] = (keys, computer, flags)
3399 3401
3400 3402
3401 3403 # used to avoid circular references so destructors work
3402 3404 def aftertrans(files):
3403 3405 renamefiles = [tuple(t) for t in files]
3404 3406
3405 3407 def a():
3406 3408 for vfs, src, dest in renamefiles:
3407 3409 # if src and dest refer to a same file, vfs.rename is a no-op,
3408 3410 # leaving both src and dest on disk. delete dest to make sure
3409 3411 # the rename couldn't be such a no-op.
3410 3412 vfs.tryunlink(dest)
3411 3413 try:
3412 3414 vfs.rename(src, dest)
3413 3415 except OSError: # journal file does not yet exist
3414 3416 pass
3415 3417
3416 3418 return a
3417 3419
3418 3420
3419 3421 def undoname(fn):
3420 3422 base, name = os.path.split(fn)
3421 3423 assert name.startswith(b'journal')
3422 3424 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3423 3425
3424 3426
3425 3427 def instance(ui, path, create, intents=None, createopts=None):
3426 3428 localpath = urlutil.urllocalpath(path)
3427 3429 if create:
3428 3430 createrepository(ui, localpath, createopts=createopts)
3429 3431
3430 3432 return makelocalrepository(ui, localpath, intents=intents)
3431 3433
3432 3434
3433 3435 def islocal(path):
3434 3436 return True
3435 3437
3436 3438
3437 3439 def defaultcreateopts(ui, createopts=None):
3438 3440 """Populate the default creation options for a repository.
3439 3441
3440 3442 A dictionary of explicitly requested creation options can be passed
3441 3443 in. Missing keys will be populated.
3442 3444 """
3443 3445 createopts = dict(createopts or {})
3444 3446
3445 3447 if b'backend' not in createopts:
3446 3448 # experimental config: storage.new-repo-backend
3447 3449 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3448 3450
3449 3451 return createopts
3450 3452
3451 3453
3452 3454 def newreporequirements(ui, createopts):
3453 3455 """Determine the set of requirements for a new local repository.
3454 3456
3455 3457 Extensions can wrap this function to specify custom requirements for
3456 3458 new repositories.
3457 3459 """
3458 3460 # If the repo is being created from a shared repository, we copy
3459 3461 # its requirements.
3460 3462 if b'sharedrepo' in createopts:
3461 3463 requirements = set(createopts[b'sharedrepo'].requirements)
3462 3464 if createopts.get(b'sharedrelative'):
3463 3465 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3464 3466 else:
3465 3467 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3466 3468
3467 3469 return requirements
3468 3470
3469 3471 if b'backend' not in createopts:
3470 3472 raise error.ProgrammingError(
3471 3473 b'backend key not present in createopts; '
3472 3474 b'was defaultcreateopts() called?'
3473 3475 )
3474 3476
3475 3477 if createopts[b'backend'] != b'revlogv1':
3476 3478 raise error.Abort(
3477 3479 _(
3478 3480 b'unable to determine repository requirements for '
3479 3481 b'storage backend: %s'
3480 3482 )
3481 3483 % createopts[b'backend']
3482 3484 )
3483 3485
3484 3486 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3485 3487 if ui.configbool(b'format', b'usestore'):
3486 3488 requirements.add(requirementsmod.STORE_REQUIREMENT)
3487 3489 if ui.configbool(b'format', b'usefncache'):
3488 3490 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3489 3491 if ui.configbool(b'format', b'dotencode'):
3490 3492 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3491 3493
3492 3494 compengines = ui.configlist(b'format', b'revlog-compression')
3493 3495 for compengine in compengines:
3494 3496 if compengine in util.compengines:
3495 3497 engine = util.compengines[compengine]
3496 3498 if engine.available() and engine.revlogheader():
3497 3499 break
3498 3500 else:
3499 3501 raise error.Abort(
3500 3502 _(
3501 3503 b'compression engines %s defined by '
3502 3504 b'format.revlog-compression not available'
3503 3505 )
3504 3506 % b', '.join(b'"%s"' % e for e in compengines),
3505 3507 hint=_(
3506 3508 b'run "hg debuginstall" to list available '
3507 3509 b'compression engines'
3508 3510 ),
3509 3511 )
3510 3512
3511 3513 # zlib is the historical default and doesn't need an explicit requirement.
3512 3514 if compengine == b'zstd':
3513 3515 requirements.add(b'revlog-compression-zstd')
3514 3516 elif compengine != b'zlib':
3515 3517 requirements.add(b'exp-compression-%s' % compengine)
3516 3518
3517 3519 if scmutil.gdinitconfig(ui):
3518 3520 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3519 3521 if ui.configbool(b'format', b'sparse-revlog'):
3520 3522 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3521 3523
3522 3524 # experimental config: format.exp-use-copies-side-data-changeset
3523 3525 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3524 3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3525 3527 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3526 3528 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3527 3529 if ui.configbool(b'experimental', b'treemanifest'):
3528 3530 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3529 3531
3530 3532 revlogv2 = ui.config(b'experimental', b'revlogv2')
3531 3533 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3532 3534 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3533 3535 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3534 3536 # experimental config: format.internal-phase
3535 3537 if ui.configbool(b'format', b'internal-phase'):
3536 3538 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3537 3539
3538 3540 if createopts.get(b'narrowfiles'):
3539 3541 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3540 3542
3541 3543 if createopts.get(b'lfs'):
3542 3544 requirements.add(b'lfs')
3543 3545
3544 3546 if ui.configbool(b'format', b'bookmarks-in-store'):
3545 3547 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3546 3548
3547 3549 if ui.configbool(b'format', b'use-persistent-nodemap'):
3548 3550 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3549 3551
3550 3552 # if share-safe is enabled, let's create the new repository with the new
3551 3553 # requirement
3552 3554 if ui.configbool(b'format', b'use-share-safe'):
3553 3555 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3554 3556
3555 3557 return requirements
3556 3558
3557 3559
3558 3560 def checkrequirementscompat(ui, requirements):
3559 3561 """Checks compatibility of repository requirements enabled and disabled.
3560 3562
3561 3563 Returns a set of requirements which needs to be dropped because dependend
3562 3564 requirements are not enabled. Also warns users about it"""
3563 3565
3564 3566 dropped = set()
3565 3567
3566 3568 if requirementsmod.STORE_REQUIREMENT not in requirements:
3567 3569 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3568 3570 ui.warn(
3569 3571 _(
3570 3572 b'ignoring enabled \'format.bookmarks-in-store\' config '
3571 3573 b'beacuse it is incompatible with disabled '
3572 3574 b'\'format.usestore\' config\n'
3573 3575 )
3574 3576 )
3575 3577 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3576 3578
3577 3579 if (
3578 3580 requirementsmod.SHARED_REQUIREMENT in requirements
3579 3581 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3580 3582 ):
3581 3583 raise error.Abort(
3582 3584 _(
3583 3585 b"cannot create shared repository as source was created"
3584 3586 b" with 'format.usestore' config disabled"
3585 3587 )
3586 3588 )
3587 3589
3588 3590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3589 3591 ui.warn(
3590 3592 _(
3591 3593 b"ignoring enabled 'format.use-share-safe' config because "
3592 3594 b"it is incompatible with disabled 'format.usestore'"
3593 3595 b" config\n"
3594 3596 )
3595 3597 )
3596 3598 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3597 3599
3598 3600 return dropped
3599 3601
3600 3602
3601 3603 def filterknowncreateopts(ui, createopts):
3602 3604 """Filters a dict of repo creation options against options that are known.
3603 3605
3604 3606 Receives a dict of repo creation options and returns a dict of those
3605 3607 options that we don't know how to handle.
3606 3608
3607 3609 This function is called as part of repository creation. If the
3608 3610 returned dict contains any items, repository creation will not
3609 3611 be allowed, as it means there was a request to create a repository
3610 3612 with options not recognized by loaded code.
3611 3613
3612 3614 Extensions can wrap this function to filter out creation options
3613 3615 they know how to handle.
3614 3616 """
3615 3617 known = {
3616 3618 b'backend',
3617 3619 b'lfs',
3618 3620 b'narrowfiles',
3619 3621 b'sharedrepo',
3620 3622 b'sharedrelative',
3621 3623 b'shareditems',
3622 3624 b'shallowfilestore',
3623 3625 }
3624 3626
3625 3627 return {k: v for k, v in createopts.items() if k not in known}
3626 3628
3627 3629
3628 3630 def createrepository(ui, path, createopts=None):
3629 3631 """Create a new repository in a vfs.
3630 3632
3631 3633 ``path`` path to the new repo's working directory.
3632 3634 ``createopts`` options for the new repository.
3633 3635
3634 3636 The following keys for ``createopts`` are recognized:
3635 3637
3636 3638 backend
3637 3639 The storage backend to use.
3638 3640 lfs
3639 3641 Repository will be created with ``lfs`` requirement. The lfs extension
3640 3642 will automatically be loaded when the repository is accessed.
3641 3643 narrowfiles
3642 3644 Set up repository to support narrow file storage.
3643 3645 sharedrepo
3644 3646 Repository object from which storage should be shared.
3645 3647 sharedrelative
3646 3648 Boolean indicating if the path to the shared repo should be
3647 3649 stored as relative. By default, the pointer to the "parent" repo
3648 3650 is stored as an absolute path.
3649 3651 shareditems
3650 3652 Set of items to share to the new repository (in addition to storage).
3651 3653 shallowfilestore
3652 3654 Indicates that storage for files should be shallow (not all ancestor
3653 3655 revisions are known).
3654 3656 """
3655 3657 createopts = defaultcreateopts(ui, createopts=createopts)
3656 3658
3657 3659 unknownopts = filterknowncreateopts(ui, createopts)
3658 3660
3659 3661 if not isinstance(unknownopts, dict):
3660 3662 raise error.ProgrammingError(
3661 3663 b'filterknowncreateopts() did not return a dict'
3662 3664 )
3663 3665
3664 3666 if unknownopts:
3665 3667 raise error.Abort(
3666 3668 _(
3667 3669 b'unable to create repository because of unknown '
3668 3670 b'creation option: %s'
3669 3671 )
3670 3672 % b', '.join(sorted(unknownopts)),
3671 3673 hint=_(b'is a required extension not loaded?'),
3672 3674 )
3673 3675
3674 3676 requirements = newreporequirements(ui, createopts=createopts)
3675 3677 requirements -= checkrequirementscompat(ui, requirements)
3676 3678
3677 3679 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3678 3680
3679 3681 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3680 3682 if hgvfs.exists():
3681 3683 raise error.RepoError(_(b'repository %s already exists') % path)
3682 3684
3683 3685 if b'sharedrepo' in createopts:
3684 3686 sharedpath = createopts[b'sharedrepo'].sharedpath
3685 3687
3686 3688 if createopts.get(b'sharedrelative'):
3687 3689 try:
3688 3690 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3689 3691 sharedpath = util.pconvert(sharedpath)
3690 3692 except (IOError, ValueError) as e:
3691 3693 # ValueError is raised on Windows if the drive letters differ
3692 3694 # on each path.
3693 3695 raise error.Abort(
3694 3696 _(b'cannot calculate relative path'),
3695 3697 hint=stringutil.forcebytestr(e),
3696 3698 )
3697 3699
3698 3700 if not wdirvfs.exists():
3699 3701 wdirvfs.makedirs()
3700 3702
3701 3703 hgvfs.makedir(notindexed=True)
3702 3704 if b'sharedrepo' not in createopts:
3703 3705 hgvfs.mkdir(b'cache')
3704 3706 hgvfs.mkdir(b'wcache')
3705 3707
3706 3708 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3707 3709 if has_store and b'sharedrepo' not in createopts:
3708 3710 hgvfs.mkdir(b'store')
3709 3711
3710 3712 # We create an invalid changelog outside the store so very old
3711 3713 # Mercurial versions (which didn't know about the requirements
3712 3714 # file) encounter an error on reading the changelog. This
3713 3715 # effectively locks out old clients and prevents them from
3714 3716 # mucking with a repo in an unknown format.
3715 3717 #
3716 3718 # The revlog header has version 65535, which won't be recognized by
3717 3719 # such old clients.
3718 3720 hgvfs.append(
3719 3721 b'00changelog.i',
3720 3722 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3721 3723 b'layout',
3722 3724 )
3723 3725
3724 3726 # Filter the requirements into working copy and store ones
3725 3727 wcreq, storereq = scmutil.filterrequirements(requirements)
3726 3728 # write working copy ones
3727 3729 scmutil.writerequires(hgvfs, wcreq)
3728 3730 # If there are store requirements and the current repository
3729 3731 # is not a shared one, write stored requirements
3730 3732 # For new shared repository, we don't need to write the store
3731 3733 # requirements as they are already present in store requires
3732 3734 if storereq and b'sharedrepo' not in createopts:
3733 3735 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3734 3736 scmutil.writerequires(storevfs, storereq)
3735 3737
3736 3738 # Write out file telling readers where to find the shared store.
3737 3739 if b'sharedrepo' in createopts:
3738 3740 hgvfs.write(b'sharedpath', sharedpath)
3739 3741
3740 3742 if createopts.get(b'shareditems'):
3741 3743 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3742 3744 hgvfs.write(b'shared', shared)
3743 3745
3744 3746
3745 3747 def poisonrepository(repo):
3746 3748 """Poison a repository instance so it can no longer be used."""
3747 3749 # Perform any cleanup on the instance.
3748 3750 repo.close()
3749 3751
3750 3752 # Our strategy is to replace the type of the object with one that
3751 3753 # has all attribute lookups result in error.
3752 3754 #
3753 3755 # But we have to allow the close() method because some constructors
3754 3756 # of repos call close() on repo references.
3755 3757 class poisonedrepository(object):
3756 3758 def __getattribute__(self, item):
3757 3759 if item == 'close':
3758 3760 return object.__getattribute__(self, item)
3759 3761
3760 3762 raise error.ProgrammingError(
3761 3763 b'repo instances should not be used after unshare'
3762 3764 )
3763 3765
3764 3766 def close(self):
3765 3767 pass
3766 3768
3767 3769 # We may have a repoview, which intercepts __setattr__. So be sure
3768 3770 # we operate at the lowest level possible.
3769 3771 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3214 +1,3249 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 FEATURES_BY_VERSION,
39 39 FLAG_GENERALDELTA,
40 40 FLAG_INLINE_DATA,
41 41 INDEX_HEADER,
42 42 REVLOGV0,
43 43 REVLOGV1,
44 44 REVLOGV1_FLAGS,
45 45 REVLOGV2,
46 46 REVLOGV2_FLAGS,
47 47 REVLOG_DEFAULT_FLAGS,
48 48 REVLOG_DEFAULT_FORMAT,
49 49 REVLOG_DEFAULT_VERSION,
50 50 SUPPORTED_FLAGS,
51 51 )
52 52 from .revlogutils.flagutil import (
53 53 REVIDX_DEFAULT_FLAGS,
54 54 REVIDX_ELLIPSIS,
55 55 REVIDX_EXTSTORED,
56 56 REVIDX_FLAGS_ORDER,
57 57 REVIDX_HASCOPIESINFO,
58 58 REVIDX_ISCENSORED,
59 59 REVIDX_RAWTEXT_CHANGING_FLAGS,
60 60 )
61 61 from .thirdparty import attr
62 62 from . import (
63 63 ancestor,
64 64 dagop,
65 65 error,
66 66 mdiff,
67 67 policy,
68 68 pycompat,
69 69 templatefilters,
70 70 util,
71 71 )
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76 from .revlogutils import (
77 77 deltas as deltautil,
78 78 docket as docketutil,
79 79 flagutil,
80 80 nodemap as nodemaputil,
81 81 revlogv0,
82 82 sidedata as sidedatautil,
83 83 )
84 84 from .utils import (
85 85 storageutil,
86 86 stringutil,
87 87 )
88 88
89 89 # blanked usage of all the name to prevent pyflakes constraints
90 90 # We need these name available in the module for extensions.
91 91
92 92 REVLOGV0
93 93 REVLOGV1
94 94 REVLOGV2
95 95 FLAG_INLINE_DATA
96 96 FLAG_GENERALDELTA
97 97 REVLOG_DEFAULT_FLAGS
98 98 REVLOG_DEFAULT_FORMAT
99 99 REVLOG_DEFAULT_VERSION
100 100 REVLOGV1_FLAGS
101 101 REVLOGV2_FLAGS
102 102 REVIDX_ISCENSORED
103 103 REVIDX_ELLIPSIS
104 104 REVIDX_HASCOPIESINFO
105 105 REVIDX_EXTSTORED
106 106 REVIDX_DEFAULT_FLAGS
107 107 REVIDX_FLAGS_ORDER
108 108 REVIDX_RAWTEXT_CHANGING_FLAGS
109 109
110 110 parsers = policy.importmod('parsers')
111 111 rustancestor = policy.importrust('ancestor')
112 112 rustdagop = policy.importrust('dagop')
113 113 rustrevlog = policy.importrust('revlog')
114 114
115 115 # Aliased for performance.
116 116 _zlibdecompress = zlib.decompress
117 117
118 118 # max size of revlog with inline data
119 119 _maxinline = 131072
120 120 _chunksize = 1048576
121 121
122 122 # Flag processors for REVIDX_ELLIPSIS.
123 123 def ellipsisreadprocessor(rl, text):
124 124 return text, False
125 125
126 126
127 127 def ellipsiswriteprocessor(rl, text):
128 128 return text, False
129 129
130 130
131 131 def ellipsisrawprocessor(rl, text):
132 132 return False
133 133
134 134
135 135 ellipsisprocessor = (
136 136 ellipsisreadprocessor,
137 137 ellipsiswriteprocessor,
138 138 ellipsisrawprocessor,
139 139 )
140 140
141 141
142 142 def offset_type(offset, type):
143 143 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
144 144 raise ValueError(b'unknown revlog index flags')
145 145 return int(int(offset) << 16 | type)
146 146
147 147
148 148 def _verify_revision(rl, skipflags, state, node):
149 149 """Verify the integrity of the given revlog ``node`` while providing a hook
150 150 point for extensions to influence the operation."""
151 151 if skipflags:
152 152 state[b'skipread'].add(node)
153 153 else:
154 154 # Side-effect: read content and verify hash.
155 155 rl.revision(node)
156 156
157 157
158 158 # True if a fast implementation for persistent-nodemap is available
159 159 #
160 160 # We also consider we have a "fast" implementation in "pure" python because
161 161 # people using pure don't really have performance consideration (and a
162 162 # wheelbarrow of other slowness source)
163 163 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
164 164 parsers, 'BaseIndexObject'
165 165 )
166 166
167 167
168 168 @attr.s(slots=True, frozen=True)
169 169 class _revisioninfo(object):
170 170 """Information about a revision that allows building its fulltext
171 171 node: expected hash of the revision
172 172 p1, p2: parent revs of the revision
173 173 btext: built text cache consisting of a one-element list
174 174 cachedelta: (baserev, uncompressed_delta) or None
175 175 flags: flags associated to the revision storage
176 176
177 177 One of btext[0] or cachedelta must be set.
178 178 """
179 179
180 180 node = attr.ib()
181 181 p1 = attr.ib()
182 182 p2 = attr.ib()
183 183 btext = attr.ib()
184 184 textlen = attr.ib()
185 185 cachedelta = attr.ib()
186 186 flags = attr.ib()
187 187
188 188
189 189 @interfaceutil.implementer(repository.irevisiondelta)
190 190 @attr.s(slots=True)
191 191 class revlogrevisiondelta(object):
192 192 node = attr.ib()
193 193 p1node = attr.ib()
194 194 p2node = attr.ib()
195 195 basenode = attr.ib()
196 196 flags = attr.ib()
197 197 baserevisionsize = attr.ib()
198 198 revision = attr.ib()
199 199 delta = attr.ib()
200 200 sidedata = attr.ib()
201 201 protocol_flags = attr.ib()
202 202 linknode = attr.ib(default=None)
203 203
204 204
205 205 @interfaceutil.implementer(repository.iverifyproblem)
206 206 @attr.s(frozen=True)
207 207 class revlogproblem(object):
208 208 warning = attr.ib(default=None)
209 209 error = attr.ib(default=None)
210 210 node = attr.ib(default=None)
211 211
212 212
213 213 def parse_index_v1(data, inline):
214 214 # call the C implementation to parse the index data
215 215 index, cache = parsers.parse_index2(data, inline)
216 216 return index, cache
217 217
218 218
219 219 def parse_index_v2(data, inline):
220 220 # call the C implementation to parse the index data
221 221 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
222 222 return index, cache
223 223
224 224
225 225 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
226 226
227 227 def parse_index_v1_nodemap(data, inline):
228 228 index, cache = parsers.parse_index_devel_nodemap(data, inline)
229 229 return index, cache
230 230
231 231
232 232 else:
233 233 parse_index_v1_nodemap = None
234 234
235 235
236 236 def parse_index_v1_mixed(data, inline):
237 237 index, cache = parse_index_v1(data, inline)
238 238 return rustrevlog.MixedIndex(index), cache
239 239
240 240
241 241 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
242 242 # signed integer)
243 243 _maxentrysize = 0x7FFFFFFF
244 244
245 245
246 246 class revlog(object):
247 247 """
248 248 the underlying revision storage object
249 249
250 250 A revlog consists of two parts, an index and the revision data.
251 251
252 252 The index is a file with a fixed record size containing
253 253 information on each revision, including its nodeid (hash), the
254 254 nodeids of its parents, the position and offset of its data within
255 255 the data file, and the revision it's based on. Finally, each entry
256 256 contains a linkrev entry that can serve as a pointer to external
257 257 data.
258 258
259 259 The revision data itself is a linear collection of data chunks.
260 260 Each chunk represents a revision and is usually represented as a
261 261 delta against the previous chunk. To bound lookup time, runs of
262 262 deltas are limited to about 2 times the length of the original
263 263 version data. This makes retrieval of a version proportional to
264 264 its size, or O(1) relative to the number of revisions.
265 265
266 266 Both pieces of the revlog are written to in an append-only
267 267 fashion, which means we never need to rewrite a file to insert or
268 268 remove data, and can use some simple techniques to avoid the need
269 269 for locking while reading.
270 270
271 271 If checkambig, indexfile is opened with checkambig=True at
272 272 writing, to avoid file stat ambiguity.
273 273
274 274 If mmaplargeindex is True, and an mmapindexthreshold is set, the
275 275 index will be mmapped rather than read if it is larger than the
276 276 configured threshold.
277 277
278 278 If censorable is True, the revlog can have censored revisions.
279 279
280 280 If `upperboundcomp` is not None, this is the expected maximal gain from
281 281 compression for the data content.
282 282
283 283 `concurrencychecker` is an optional function that receives 3 arguments: a
284 284 file handle, a filename, and an expected position. It should check whether
285 285 the current position in the file handle is valid, and log/warn/fail (by
286 286 raising).
287 287 """
288 288
289 289 _flagserrorclass = error.RevlogError
290 290
291 291 def __init__(
292 292 self,
293 293 opener,
294 294 target,
295 295 radix,
296 296 postfix=None,
297 297 checkambig=False,
298 298 mmaplargeindex=False,
299 299 censorable=False,
300 300 upperboundcomp=None,
301 301 persistentnodemap=False,
302 302 concurrencychecker=None,
303 303 ):
304 304 """
305 305 create a revlog object
306 306
307 307 opener is a function that abstracts the file opening operation
308 308 and can be used to implement COW semantics or the like.
309 309
310 310 `target`: a (KIND, ID) tuple that identify the content stored in
311 311 this revlog. It help the rest of the code to understand what the revlog
312 312 is about without having to resort to heuristic and index filename
313 313 analysis. Note: that this must be reliably be set by normal code, but
314 314 that test, debug, or performance measurement code might not set this to
315 315 accurate value.
316 316 """
317 317 self.upperboundcomp = upperboundcomp
318 318
319 319 self.radix = radix
320 320
321 321 self._docket_file = None
322 322 self._indexfile = None
323 323 self._datafile = None
324 324 self._nodemap_file = None
325 325 self.postfix = postfix
326 326 self.opener = opener
327 327 if persistentnodemap:
328 328 self._nodemap_file = nodemaputil.get_nodemap_file(self)
329 329
330 330 assert target[0] in ALL_KINDS
331 331 assert len(target) == 2
332 332 self.target = target
333 333 # When True, indexfile is opened with checkambig=True at writing, to
334 334 # avoid file stat ambiguity.
335 335 self._checkambig = checkambig
336 336 self._mmaplargeindex = mmaplargeindex
337 337 self._censorable = censorable
338 338 # 3-tuple of (node, rev, text) for a raw revision.
339 339 self._revisioncache = None
340 340 # Maps rev to chain base rev.
341 341 self._chainbasecache = util.lrucachedict(100)
342 342 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
343 343 self._chunkcache = (0, b'')
344 344 # How much data to read and cache into the raw revlog data cache.
345 345 self._chunkcachesize = 65536
346 346 self._maxchainlen = None
347 347 self._deltabothparents = True
348 348 self.index = None
349 349 self._docket = None
350 350 self._nodemap_docket = None
351 351 # Mapping of partial identifiers to full nodes.
352 352 self._pcache = {}
353 353 # Mapping of revision integer to full node.
354 354 self._compengine = b'zlib'
355 355 self._compengineopts = {}
356 356 self._maxdeltachainspan = -1
357 357 self._withsparseread = False
358 358 self._sparserevlog = False
359 359 self.hassidedata = False
360 360 self._srdensitythreshold = 0.50
361 361 self._srmingapsize = 262144
362 362
363 363 # Make copy of flag processors so each revlog instance can support
364 364 # custom flags.
365 365 self._flagprocessors = dict(flagutil.flagprocessors)
366 366
367 367 # 2-tuple of file handles being used for active writing.
368 368 self._writinghandles = None
369 369 # prevent nesting of addgroup
370 370 self._adding_group = None
371 371
372 372 self._loadindex()
373 373
374 374 self._concurrencychecker = concurrencychecker
375 375
376 376 def _init_opts(self):
377 377 """process options (from above/config) to setup associated default revlog mode
378 378
379 379 These values might be affected when actually reading on disk information.
380 380
381 381 The relevant values are returned for use in _loadindex().
382 382
383 383 * newversionflags:
384 384 version header to use if we need to create a new revlog
385 385
386 386 * mmapindexthreshold:
387 387 minimal index size for start to use mmap
388 388
389 389 * force_nodemap:
390 390 force the usage of a "development" version of the nodemap code
391 391 """
392 392 mmapindexthreshold = None
393 393 opts = self.opener.options
394 394
395 395 if b'revlogv2' in opts:
396 396 new_header = REVLOGV2 | FLAG_INLINE_DATA
397 397 elif b'revlogv1' in opts:
398 398 new_header = REVLOGV1 | FLAG_INLINE_DATA
399 399 if b'generaldelta' in opts:
400 400 new_header |= FLAG_GENERALDELTA
401 401 elif b'revlogv0' in self.opener.options:
402 402 new_header = REVLOGV0
403 403 else:
404 404 new_header = REVLOG_DEFAULT_VERSION
405 405
406 406 if b'chunkcachesize' in opts:
407 407 self._chunkcachesize = opts[b'chunkcachesize']
408 408 if b'maxchainlen' in opts:
409 409 self._maxchainlen = opts[b'maxchainlen']
410 410 if b'deltabothparents' in opts:
411 411 self._deltabothparents = opts[b'deltabothparents']
412 412 self._lazydelta = bool(opts.get(b'lazydelta', True))
413 413 self._lazydeltabase = False
414 414 if self._lazydelta:
415 415 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
416 416 if b'compengine' in opts:
417 417 self._compengine = opts[b'compengine']
418 418 if b'zlib.level' in opts:
419 419 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
420 420 if b'zstd.level' in opts:
421 421 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
422 422 if b'maxdeltachainspan' in opts:
423 423 self._maxdeltachainspan = opts[b'maxdeltachainspan']
424 424 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
425 425 mmapindexthreshold = opts[b'mmapindexthreshold']
426 426 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
427 427 withsparseread = bool(opts.get(b'with-sparse-read', False))
428 428 # sparse-revlog forces sparse-read
429 429 self._withsparseread = self._sparserevlog or withsparseread
430 430 if b'sparse-read-density-threshold' in opts:
431 431 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
432 432 if b'sparse-read-min-gap-size' in opts:
433 433 self._srmingapsize = opts[b'sparse-read-min-gap-size']
434 434 if opts.get(b'enableellipsis'):
435 435 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
436 436
437 437 # revlog v0 doesn't have flag processors
438 438 for flag, processor in pycompat.iteritems(
439 439 opts.get(b'flagprocessors', {})
440 440 ):
441 441 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
442 442
443 443 if self._chunkcachesize <= 0:
444 444 raise error.RevlogError(
445 445 _(b'revlog chunk cache size %r is not greater than 0')
446 446 % self._chunkcachesize
447 447 )
448 448 elif self._chunkcachesize & (self._chunkcachesize - 1):
449 449 raise error.RevlogError(
450 450 _(b'revlog chunk cache size %r is not a power of 2')
451 451 % self._chunkcachesize
452 452 )
453 453 force_nodemap = opts.get(b'devel-force-nodemap', False)
454 454 return new_header, mmapindexthreshold, force_nodemap
455 455
456 def _get_data(self, filepath, mmap_threshold):
456 def _get_data(self, filepath, mmap_threshold, size=None):
457 457 """return a file content with or without mmap
458 458
459 459 If the file is missing return the empty string"""
460 460 try:
461 461 with self.opener(filepath) as fp:
462 462 if mmap_threshold is not None:
463 463 file_size = self.opener.fstat(fp).st_size
464 464 if file_size >= mmap_threshold:
465 if size is not None:
466 # avoid potentiel mmap crash
467 size = min(file_size, size)
465 468 # TODO: should .close() to release resources without
466 469 # relying on Python GC
470 if size is None:
467 471 return util.buffer(util.mmapread(fp))
472 else:
473 return util.buffer(util.mmapread(fp, size))
474 if size is None:
468 475 return fp.read()
476 else:
477 return fp.read(size)
469 478 except IOError as inst:
470 479 if inst.errno != errno.ENOENT:
471 480 raise
472 481 return b''
473 482
474 483 def _loadindex(self):
475 484
476 485 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
477 486
478 487 if self.postfix is None:
479 488 entry_point = b'%s.i' % self.radix
480 489 else:
481 490 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
482 491
483 492 entry_data = b''
484 493 self._initempty = True
485 494 entry_data = self._get_data(entry_point, mmapindexthreshold)
486 495 if len(entry_data) > 0:
487 496 header = INDEX_HEADER.unpack(entry_data[:4])[0]
488 497 self._initempty = False
489 498 else:
490 499 header = new_header
491 500
492 501 self._format_flags = header & ~0xFFFF
493 502 self._format_version = header & 0xFFFF
494 503
495 504 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
496 505 if supported_flags is None:
497 506 msg = _(b'unknown version (%d) in revlog %s')
498 507 msg %= (self._format_version, self.display_id)
499 508 raise error.RevlogError(msg)
500 509 elif self._format_flags & ~supported_flags:
501 510 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
502 511 display_flag = self._format_flags >> 16
503 512 msg %= (display_flag, self._format_version, self.display_id)
504 513 raise error.RevlogError(msg)
505 514
506 515 features = FEATURES_BY_VERSION[self._format_version]
507 516 self._inline = features[b'inline'](self._format_flags)
508 517 self._generaldelta = features[b'generaldelta'](self._format_flags)
509 518 self.hassidedata = features[b'sidedata']
510 519
511 520 if not features[b'docket']:
512 521 self._indexfile = entry_point
513 522 index_data = entry_data
514 523 else:
515 524 self._docket_file = entry_point
516 525 if self._initempty:
517 526 self._docket = docketutil.default_docket(self, header)
518 527 else:
519 528 self._docket = docketutil.parse_docket(self, entry_data)
520 529 self._indexfile = self._docket.index_filepath()
521 index_data = self._get_data(self._indexfile, mmapindexthreshold)
530 index_data = b''
531 index_size = self._docket.index_end
532 if index_size > 0:
533 index_data = self._get_data(
534 self._indexfile, mmapindexthreshold, size=index_size
535 )
536 if len(index_data) < index_size:
537 msg = _(b'too few index data for %s: got %d, expected %d')
538 msg %= (self.display_id, len(index_data), index_size)
539 raise error.RevlogError(msg)
540
522 541 self._inline = False
523 542 # generaldelta implied by version 2 revlogs.
524 543 self._generaldelta = True
525 544 # the logic for persistent nodemap will be dealt with within the
526 545 # main docket, so disable it for now.
527 546 self._nodemap_file = None
528 547
529 548 if self.postfix is None or self.postfix == b'a':
530 549 self._datafile = b'%s.d' % self.radix
531 550 else:
532 551 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
533 552
534 553 self.nodeconstants = sha1nodeconstants
535 554 self.nullid = self.nodeconstants.nullid
536 555
537 556 # sparse-revlog can't be on without general-delta (issue6056)
538 557 if not self._generaldelta:
539 558 self._sparserevlog = False
540 559
541 560 self._storedeltachains = True
542 561
543 562 devel_nodemap = (
544 563 self._nodemap_file
545 564 and force_nodemap
546 565 and parse_index_v1_nodemap is not None
547 566 )
548 567
549 568 use_rust_index = False
550 569 if rustrevlog is not None:
551 570 if self._nodemap_file is not None:
552 571 use_rust_index = True
553 572 else:
554 573 use_rust_index = self.opener.options.get(b'rust.index')
555 574
556 575 self._parse_index = parse_index_v1
557 576 if self._format_version == REVLOGV0:
558 577 self._parse_index = revlogv0.parse_index_v0
559 578 elif self._format_version == REVLOGV2:
560 579 self._parse_index = parse_index_v2
561 580 elif devel_nodemap:
562 581 self._parse_index = parse_index_v1_nodemap
563 582 elif use_rust_index:
564 583 self._parse_index = parse_index_v1_mixed
565 584 try:
566 585 d = self._parse_index(index_data, self._inline)
567 586 index, _chunkcache = d
568 587 use_nodemap = (
569 588 not self._inline
570 589 and self._nodemap_file is not None
571 590 and util.safehasattr(index, 'update_nodemap_data')
572 591 )
573 592 if use_nodemap:
574 593 nodemap_data = nodemaputil.persisted_data(self)
575 594 if nodemap_data is not None:
576 595 docket = nodemap_data[0]
577 596 if (
578 597 len(d[0]) > docket.tip_rev
579 598 and d[0][docket.tip_rev][7] == docket.tip_node
580 599 ):
581 600 # no changelog tampering
582 601 self._nodemap_docket = docket
583 602 index.update_nodemap_data(*nodemap_data)
584 603 except (ValueError, IndexError):
585 604 raise error.RevlogError(
586 605 _(b"index %s is corrupted") % self.display_id
587 606 )
588 607 self.index, self._chunkcache = d
589 608 if not self._chunkcache:
590 609 self._chunkclear()
591 610 # revnum -> (chain-length, sum-delta-length)
592 611 self._chaininfocache = util.lrucachedict(500)
593 612 # revlog header -> revlog compressor
594 613 self._decompressors = {}
595 614
596 615 @util.propertycache
597 616 def revlog_kind(self):
598 617 return self.target[0]
599 618
600 619 @util.propertycache
601 620 def display_id(self):
602 621 """The public facing "ID" of the revlog that we use in message"""
603 622 # Maybe we should build a user facing representation of
604 623 # revlog.target instead of using `self.radix`
605 624 return self.radix
606 625
607 626 @util.propertycache
608 627 def _compressor(self):
609 628 engine = util.compengines[self._compengine]
610 629 return engine.revlogcompressor(self._compengineopts)
611 630
612 631 def _indexfp(self):
613 632 """file object for the revlog's index file"""
614 633 return self.opener(self._indexfile, mode=b"r")
615 634
616 635 def __index_write_fp(self):
617 636 # You should not use this directly and use `_writing` instead
618 637 try:
619 638 f = self.opener(
620 639 self._indexfile, mode=b"r+", checkambig=self._checkambig
621 640 )
641 if self._docket is None:
622 642 f.seek(0, os.SEEK_END)
643 else:
644 f.seek(self._docket.index_end, os.SEEK_SET)
623 645 return f
624 646 except IOError as inst:
625 647 if inst.errno != errno.ENOENT:
626 648 raise
627 649 return self.opener(
628 650 self._indexfile, mode=b"w+", checkambig=self._checkambig
629 651 )
630 652
631 653 def __index_new_fp(self):
632 654 # You should not use this unless you are upgrading from inline revlog
633 655 return self.opener(
634 656 self._indexfile,
635 657 mode=b"w",
636 658 checkambig=self._checkambig,
637 659 atomictemp=True,
638 660 )
639 661
640 662 def _datafp(self, mode=b'r'):
641 663 """file object for the revlog's data file"""
642 664 return self.opener(self._datafile, mode=mode)
643 665
644 666 @contextlib.contextmanager
645 667 def _datareadfp(self, existingfp=None):
646 668 """file object suitable to read data"""
647 669 # Use explicit file handle, if given.
648 670 if existingfp is not None:
649 671 yield existingfp
650 672
651 673 # Use a file handle being actively used for writes, if available.
652 674 # There is some danger to doing this because reads will seek the
653 675 # file. However, _writeentry() performs a SEEK_END before all writes,
654 676 # so we should be safe.
655 677 elif self._writinghandles:
656 678 if self._inline:
657 679 yield self._writinghandles[0]
658 680 else:
659 681 yield self._writinghandles[1]
660 682
661 683 # Otherwise open a new file handle.
662 684 else:
663 685 if self._inline:
664 686 func = self._indexfp
665 687 else:
666 688 func = self._datafp
667 689 with func() as fp:
668 690 yield fp
669 691
670 692 def tiprev(self):
671 693 return len(self.index) - 1
672 694
673 695 def tip(self):
674 696 return self.node(self.tiprev())
675 697
676 698 def __contains__(self, rev):
677 699 return 0 <= rev < len(self)
678 700
679 701 def __len__(self):
680 702 return len(self.index)
681 703
682 704 def __iter__(self):
683 705 return iter(pycompat.xrange(len(self)))
684 706
685 707 def revs(self, start=0, stop=None):
686 708 """iterate over all rev in this revlog (from start to stop)"""
687 709 return storageutil.iterrevs(len(self), start=start, stop=stop)
688 710
689 711 @property
690 712 def nodemap(self):
691 713 msg = (
692 714 b"revlog.nodemap is deprecated, "
693 715 b"use revlog.index.[has_node|rev|get_rev]"
694 716 )
695 717 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
696 718 return self.index.nodemap
697 719
698 720 @property
699 721 def _nodecache(self):
700 722 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
701 723 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
702 724 return self.index.nodemap
703 725
704 726 def hasnode(self, node):
705 727 try:
706 728 self.rev(node)
707 729 return True
708 730 except KeyError:
709 731 return False
710 732
711 733 def candelta(self, baserev, rev):
712 734 """whether two revisions (baserev, rev) can be delta-ed or not"""
713 735 # Disable delta if either rev requires a content-changing flag
714 736 # processor (ex. LFS). This is because such flag processor can alter
715 737 # the rawtext content that the delta will be based on, and two clients
716 738 # could have a same revlog node with different flags (i.e. different
717 739 # rawtext contents) and the delta could be incompatible.
718 740 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
719 741 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
720 742 ):
721 743 return False
722 744 return True
723 745
724 746 def update_caches(self, transaction):
725 747 if self._nodemap_file is not None:
726 748 if transaction is None:
727 749 nodemaputil.update_persistent_nodemap(self)
728 750 else:
729 751 nodemaputil.setup_persistent_nodemap(transaction, self)
730 752
731 753 def clearcaches(self):
732 754 self._revisioncache = None
733 755 self._chainbasecache.clear()
734 756 self._chunkcache = (0, b'')
735 757 self._pcache = {}
736 758 self._nodemap_docket = None
737 759 self.index.clearcaches()
738 760 # The python code is the one responsible for validating the docket, we
739 761 # end up having to refresh it here.
740 762 use_nodemap = (
741 763 not self._inline
742 764 and self._nodemap_file is not None
743 765 and util.safehasattr(self.index, 'update_nodemap_data')
744 766 )
745 767 if use_nodemap:
746 768 nodemap_data = nodemaputil.persisted_data(self)
747 769 if nodemap_data is not None:
748 770 self._nodemap_docket = nodemap_data[0]
749 771 self.index.update_nodemap_data(*nodemap_data)
750 772
751 773 def rev(self, node):
752 774 try:
753 775 return self.index.rev(node)
754 776 except TypeError:
755 777 raise
756 778 except error.RevlogError:
757 779 # parsers.c radix tree lookup failed
758 780 if (
759 781 node == self.nodeconstants.wdirid
760 782 or node in self.nodeconstants.wdirfilenodeids
761 783 ):
762 784 raise error.WdirUnsupported
763 785 raise error.LookupError(node, self.display_id, _(b'no node'))
764 786
765 787 # Accessors for index entries.
766 788
767 789 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
768 790 # are flags.
769 791 def start(self, rev):
770 792 return int(self.index[rev][0] >> 16)
771 793
772 794 def flags(self, rev):
773 795 return self.index[rev][0] & 0xFFFF
774 796
775 797 def length(self, rev):
776 798 return self.index[rev][1]
777 799
778 800 def sidedata_length(self, rev):
779 801 if not self.hassidedata:
780 802 return 0
781 803 return self.index[rev][9]
782 804
783 805 def rawsize(self, rev):
784 806 """return the length of the uncompressed text for a given revision"""
785 807 l = self.index[rev][2]
786 808 if l >= 0:
787 809 return l
788 810
789 811 t = self.rawdata(rev)
790 812 return len(t)
791 813
792 814 def size(self, rev):
793 815 """length of non-raw text (processed by a "read" flag processor)"""
794 816 # fast path: if no "read" flag processor could change the content,
795 817 # size is rawsize. note: ELLIPSIS is known to not change the content.
796 818 flags = self.flags(rev)
797 819 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
798 820 return self.rawsize(rev)
799 821
800 822 return len(self.revision(rev, raw=False))
801 823
802 824 def chainbase(self, rev):
803 825 base = self._chainbasecache.get(rev)
804 826 if base is not None:
805 827 return base
806 828
807 829 index = self.index
808 830 iterrev = rev
809 831 base = index[iterrev][3]
810 832 while base != iterrev:
811 833 iterrev = base
812 834 base = index[iterrev][3]
813 835
814 836 self._chainbasecache[rev] = base
815 837 return base
816 838
817 839 def linkrev(self, rev):
818 840 return self.index[rev][4]
819 841
820 842 def parentrevs(self, rev):
821 843 try:
822 844 entry = self.index[rev]
823 845 except IndexError:
824 846 if rev == wdirrev:
825 847 raise error.WdirUnsupported
826 848 raise
827 849 if entry[5] == nullrev:
828 850 return entry[6], entry[5]
829 851 else:
830 852 return entry[5], entry[6]
831 853
832 854 # fast parentrevs(rev) where rev isn't filtered
833 855 _uncheckedparentrevs = parentrevs
834 856
835 857 def node(self, rev):
836 858 try:
837 859 return self.index[rev][7]
838 860 except IndexError:
839 861 if rev == wdirrev:
840 862 raise error.WdirUnsupported
841 863 raise
842 864
843 865 # Derived from index values.
844 866
845 867 def end(self, rev):
846 868 return self.start(rev) + self.length(rev)
847 869
848 870 def parents(self, node):
849 871 i = self.index
850 872 d = i[self.rev(node)]
851 873 # inline node() to avoid function call overhead
852 874 if d[5] == self.nullid:
853 875 return i[d[6]][7], i[d[5]][7]
854 876 else:
855 877 return i[d[5]][7], i[d[6]][7]
856 878
857 879 def chainlen(self, rev):
858 880 return self._chaininfo(rev)[0]
859 881
860 882 def _chaininfo(self, rev):
861 883 chaininfocache = self._chaininfocache
862 884 if rev in chaininfocache:
863 885 return chaininfocache[rev]
864 886 index = self.index
865 887 generaldelta = self._generaldelta
866 888 iterrev = rev
867 889 e = index[iterrev]
868 890 clen = 0
869 891 compresseddeltalen = 0
870 892 while iterrev != e[3]:
871 893 clen += 1
872 894 compresseddeltalen += e[1]
873 895 if generaldelta:
874 896 iterrev = e[3]
875 897 else:
876 898 iterrev -= 1
877 899 if iterrev in chaininfocache:
878 900 t = chaininfocache[iterrev]
879 901 clen += t[0]
880 902 compresseddeltalen += t[1]
881 903 break
882 904 e = index[iterrev]
883 905 else:
884 906 # Add text length of base since decompressing that also takes
885 907 # work. For cache hits the length is already included.
886 908 compresseddeltalen += e[1]
887 909 r = (clen, compresseddeltalen)
888 910 chaininfocache[rev] = r
889 911 return r
890 912
891 913 def _deltachain(self, rev, stoprev=None):
892 914 """Obtain the delta chain for a revision.
893 915
894 916 ``stoprev`` specifies a revision to stop at. If not specified, we
895 917 stop at the base of the chain.
896 918
897 919 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
898 920 revs in ascending order and ``stopped`` is a bool indicating whether
899 921 ``stoprev`` was hit.
900 922 """
901 923 # Try C implementation.
902 924 try:
903 925 return self.index.deltachain(rev, stoprev, self._generaldelta)
904 926 except AttributeError:
905 927 pass
906 928
907 929 chain = []
908 930
909 931 # Alias to prevent attribute lookup in tight loop.
910 932 index = self.index
911 933 generaldelta = self._generaldelta
912 934
913 935 iterrev = rev
914 936 e = index[iterrev]
915 937 while iterrev != e[3] and iterrev != stoprev:
916 938 chain.append(iterrev)
917 939 if generaldelta:
918 940 iterrev = e[3]
919 941 else:
920 942 iterrev -= 1
921 943 e = index[iterrev]
922 944
923 945 if iterrev == stoprev:
924 946 stopped = True
925 947 else:
926 948 chain.append(iterrev)
927 949 stopped = False
928 950
929 951 chain.reverse()
930 952 return chain, stopped
931 953
932 954 def ancestors(self, revs, stoprev=0, inclusive=False):
933 955 """Generate the ancestors of 'revs' in reverse revision order.
934 956 Does not generate revs lower than stoprev.
935 957
936 958 See the documentation for ancestor.lazyancestors for more details."""
937 959
938 960 # first, make sure start revisions aren't filtered
939 961 revs = list(revs)
940 962 checkrev = self.node
941 963 for r in revs:
942 964 checkrev(r)
943 965 # and we're sure ancestors aren't filtered as well
944 966
945 967 if rustancestor is not None:
946 968 lazyancestors = rustancestor.LazyAncestors
947 969 arg = self.index
948 970 else:
949 971 lazyancestors = ancestor.lazyancestors
950 972 arg = self._uncheckedparentrevs
951 973 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
952 974
953 975 def descendants(self, revs):
954 976 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
955 977
956 978 def findcommonmissing(self, common=None, heads=None):
957 979 """Return a tuple of the ancestors of common and the ancestors of heads
958 980 that are not ancestors of common. In revset terminology, we return the
959 981 tuple:
960 982
961 983 ::common, (::heads) - (::common)
962 984
963 985 The list is sorted by revision number, meaning it is
964 986 topologically sorted.
965 987
966 988 'heads' and 'common' are both lists of node IDs. If heads is
967 989 not supplied, uses all of the revlog's heads. If common is not
968 990 supplied, uses nullid."""
969 991 if common is None:
970 992 common = [self.nullid]
971 993 if heads is None:
972 994 heads = self.heads()
973 995
974 996 common = [self.rev(n) for n in common]
975 997 heads = [self.rev(n) for n in heads]
976 998
977 999 # we want the ancestors, but inclusive
978 1000 class lazyset(object):
979 1001 def __init__(self, lazyvalues):
980 1002 self.addedvalues = set()
981 1003 self.lazyvalues = lazyvalues
982 1004
983 1005 def __contains__(self, value):
984 1006 return value in self.addedvalues or value in self.lazyvalues
985 1007
986 1008 def __iter__(self):
987 1009 added = self.addedvalues
988 1010 for r in added:
989 1011 yield r
990 1012 for r in self.lazyvalues:
991 1013 if not r in added:
992 1014 yield r
993 1015
994 1016 def add(self, value):
995 1017 self.addedvalues.add(value)
996 1018
997 1019 def update(self, values):
998 1020 self.addedvalues.update(values)
999 1021
1000 1022 has = lazyset(self.ancestors(common))
1001 1023 has.add(nullrev)
1002 1024 has.update(common)
1003 1025
1004 1026 # take all ancestors from heads that aren't in has
1005 1027 missing = set()
1006 1028 visit = collections.deque(r for r in heads if r not in has)
1007 1029 while visit:
1008 1030 r = visit.popleft()
1009 1031 if r in missing:
1010 1032 continue
1011 1033 else:
1012 1034 missing.add(r)
1013 1035 for p in self.parentrevs(r):
1014 1036 if p not in has:
1015 1037 visit.append(p)
1016 1038 missing = list(missing)
1017 1039 missing.sort()
1018 1040 return has, [self.node(miss) for miss in missing]
1019 1041
1020 1042 def incrementalmissingrevs(self, common=None):
1021 1043 """Return an object that can be used to incrementally compute the
1022 1044 revision numbers of the ancestors of arbitrary sets that are not
1023 1045 ancestors of common. This is an ancestor.incrementalmissingancestors
1024 1046 object.
1025 1047
1026 1048 'common' is a list of revision numbers. If common is not supplied, uses
1027 1049 nullrev.
1028 1050 """
1029 1051 if common is None:
1030 1052 common = [nullrev]
1031 1053
1032 1054 if rustancestor is not None:
1033 1055 return rustancestor.MissingAncestors(self.index, common)
1034 1056 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1035 1057
1036 1058 def findmissingrevs(self, common=None, heads=None):
1037 1059 """Return the revision numbers of the ancestors of heads that
1038 1060 are not ancestors of common.
1039 1061
1040 1062 More specifically, return a list of revision numbers corresponding to
1041 1063 nodes N such that every N satisfies the following constraints:
1042 1064
1043 1065 1. N is an ancestor of some node in 'heads'
1044 1066 2. N is not an ancestor of any node in 'common'
1045 1067
1046 1068 The list is sorted by revision number, meaning it is
1047 1069 topologically sorted.
1048 1070
1049 1071 'heads' and 'common' are both lists of revision numbers. If heads is
1050 1072 not supplied, uses all of the revlog's heads. If common is not
1051 1073 supplied, uses nullid."""
1052 1074 if common is None:
1053 1075 common = [nullrev]
1054 1076 if heads is None:
1055 1077 heads = self.headrevs()
1056 1078
1057 1079 inc = self.incrementalmissingrevs(common=common)
1058 1080 return inc.missingancestors(heads)
1059 1081
1060 1082 def findmissing(self, common=None, heads=None):
1061 1083 """Return the ancestors of heads that are not ancestors of common.
1062 1084
1063 1085 More specifically, return a list of nodes N such that every N
1064 1086 satisfies the following constraints:
1065 1087
1066 1088 1. N is an ancestor of some node in 'heads'
1067 1089 2. N is not an ancestor of any node in 'common'
1068 1090
1069 1091 The list is sorted by revision number, meaning it is
1070 1092 topologically sorted.
1071 1093
1072 1094 'heads' and 'common' are both lists of node IDs. If heads is
1073 1095 not supplied, uses all of the revlog's heads. If common is not
1074 1096 supplied, uses nullid."""
1075 1097 if common is None:
1076 1098 common = [self.nullid]
1077 1099 if heads is None:
1078 1100 heads = self.heads()
1079 1101
1080 1102 common = [self.rev(n) for n in common]
1081 1103 heads = [self.rev(n) for n in heads]
1082 1104
1083 1105 inc = self.incrementalmissingrevs(common=common)
1084 1106 return [self.node(r) for r in inc.missingancestors(heads)]
1085 1107
1086 1108 def nodesbetween(self, roots=None, heads=None):
1087 1109 """Return a topological path from 'roots' to 'heads'.
1088 1110
1089 1111 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1090 1112 topologically sorted list of all nodes N that satisfy both of
1091 1113 these constraints:
1092 1114
1093 1115 1. N is a descendant of some node in 'roots'
1094 1116 2. N is an ancestor of some node in 'heads'
1095 1117
1096 1118 Every node is considered to be both a descendant and an ancestor
1097 1119 of itself, so every reachable node in 'roots' and 'heads' will be
1098 1120 included in 'nodes'.
1099 1121
1100 1122 'outroots' is the list of reachable nodes in 'roots', i.e., the
1101 1123 subset of 'roots' that is returned in 'nodes'. Likewise,
1102 1124 'outheads' is the subset of 'heads' that is also in 'nodes'.
1103 1125
1104 1126 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1105 1127 unspecified, uses nullid as the only root. If 'heads' is
1106 1128 unspecified, uses list of all of the revlog's heads."""
1107 1129 nonodes = ([], [], [])
1108 1130 if roots is not None:
1109 1131 roots = list(roots)
1110 1132 if not roots:
1111 1133 return nonodes
1112 1134 lowestrev = min([self.rev(n) for n in roots])
1113 1135 else:
1114 1136 roots = [self.nullid] # Everybody's a descendant of nullid
1115 1137 lowestrev = nullrev
1116 1138 if (lowestrev == nullrev) and (heads is None):
1117 1139 # We want _all_ the nodes!
1118 1140 return (
1119 1141 [self.node(r) for r in self],
1120 1142 [self.nullid],
1121 1143 list(self.heads()),
1122 1144 )
1123 1145 if heads is None:
1124 1146 # All nodes are ancestors, so the latest ancestor is the last
1125 1147 # node.
1126 1148 highestrev = len(self) - 1
1127 1149 # Set ancestors to None to signal that every node is an ancestor.
1128 1150 ancestors = None
1129 1151 # Set heads to an empty dictionary for later discovery of heads
1130 1152 heads = {}
1131 1153 else:
1132 1154 heads = list(heads)
1133 1155 if not heads:
1134 1156 return nonodes
1135 1157 ancestors = set()
1136 1158 # Turn heads into a dictionary so we can remove 'fake' heads.
1137 1159 # Also, later we will be using it to filter out the heads we can't
1138 1160 # find from roots.
1139 1161 heads = dict.fromkeys(heads, False)
1140 1162 # Start at the top and keep marking parents until we're done.
1141 1163 nodestotag = set(heads)
1142 1164 # Remember where the top was so we can use it as a limit later.
1143 1165 highestrev = max([self.rev(n) for n in nodestotag])
1144 1166 while nodestotag:
1145 1167 # grab a node to tag
1146 1168 n = nodestotag.pop()
1147 1169 # Never tag nullid
1148 1170 if n == self.nullid:
1149 1171 continue
1150 1172 # A node's revision number represents its place in a
1151 1173 # topologically sorted list of nodes.
1152 1174 r = self.rev(n)
1153 1175 if r >= lowestrev:
1154 1176 if n not in ancestors:
1155 1177 # If we are possibly a descendant of one of the roots
1156 1178 # and we haven't already been marked as an ancestor
1157 1179 ancestors.add(n) # Mark as ancestor
1158 1180 # Add non-nullid parents to list of nodes to tag.
1159 1181 nodestotag.update(
1160 1182 [p for p in self.parents(n) if p != self.nullid]
1161 1183 )
1162 1184 elif n in heads: # We've seen it before, is it a fake head?
1163 1185 # So it is, real heads should not be the ancestors of
1164 1186 # any other heads.
1165 1187 heads.pop(n)
1166 1188 if not ancestors:
1167 1189 return nonodes
1168 1190 # Now that we have our set of ancestors, we want to remove any
1169 1191 # roots that are not ancestors.
1170 1192
1171 1193 # If one of the roots was nullid, everything is included anyway.
1172 1194 if lowestrev > nullrev:
1173 1195 # But, since we weren't, let's recompute the lowest rev to not
1174 1196 # include roots that aren't ancestors.
1175 1197
1176 1198 # Filter out roots that aren't ancestors of heads
1177 1199 roots = [root for root in roots if root in ancestors]
1178 1200 # Recompute the lowest revision
1179 1201 if roots:
1180 1202 lowestrev = min([self.rev(root) for root in roots])
1181 1203 else:
1182 1204 # No more roots? Return empty list
1183 1205 return nonodes
1184 1206 else:
1185 1207 # We are descending from nullid, and don't need to care about
1186 1208 # any other roots.
1187 1209 lowestrev = nullrev
1188 1210 roots = [self.nullid]
1189 1211 # Transform our roots list into a set.
1190 1212 descendants = set(roots)
1191 1213 # Also, keep the original roots so we can filter out roots that aren't
1192 1214 # 'real' roots (i.e. are descended from other roots).
1193 1215 roots = descendants.copy()
1194 1216 # Our topologically sorted list of output nodes.
1195 1217 orderedout = []
1196 1218 # Don't start at nullid since we don't want nullid in our output list,
1197 1219 # and if nullid shows up in descendants, empty parents will look like
1198 1220 # they're descendants.
1199 1221 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1200 1222 n = self.node(r)
1201 1223 isdescendant = False
1202 1224 if lowestrev == nullrev: # Everybody is a descendant of nullid
1203 1225 isdescendant = True
1204 1226 elif n in descendants:
1205 1227 # n is already a descendant
1206 1228 isdescendant = True
1207 1229 # This check only needs to be done here because all the roots
1208 1230 # will start being marked is descendants before the loop.
1209 1231 if n in roots:
1210 1232 # If n was a root, check if it's a 'real' root.
1211 1233 p = tuple(self.parents(n))
1212 1234 # If any of its parents are descendants, it's not a root.
1213 1235 if (p[0] in descendants) or (p[1] in descendants):
1214 1236 roots.remove(n)
1215 1237 else:
1216 1238 p = tuple(self.parents(n))
1217 1239 # A node is a descendant if either of its parents are
1218 1240 # descendants. (We seeded the dependents list with the roots
1219 1241 # up there, remember?)
1220 1242 if (p[0] in descendants) or (p[1] in descendants):
1221 1243 descendants.add(n)
1222 1244 isdescendant = True
1223 1245 if isdescendant and ((ancestors is None) or (n in ancestors)):
1224 1246 # Only include nodes that are both descendants and ancestors.
1225 1247 orderedout.append(n)
1226 1248 if (ancestors is not None) and (n in heads):
1227 1249 # We're trying to figure out which heads are reachable
1228 1250 # from roots.
1229 1251 # Mark this head as having been reached
1230 1252 heads[n] = True
1231 1253 elif ancestors is None:
1232 1254 # Otherwise, we're trying to discover the heads.
1233 1255 # Assume this is a head because if it isn't, the next step
1234 1256 # will eventually remove it.
1235 1257 heads[n] = True
1236 1258 # But, obviously its parents aren't.
1237 1259 for p in self.parents(n):
1238 1260 heads.pop(p, None)
1239 1261 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1240 1262 roots = list(roots)
1241 1263 assert orderedout
1242 1264 assert roots
1243 1265 assert heads
1244 1266 return (orderedout, roots, heads)
1245 1267
1246 1268 def headrevs(self, revs=None):
1247 1269 if revs is None:
1248 1270 try:
1249 1271 return self.index.headrevs()
1250 1272 except AttributeError:
1251 1273 return self._headrevs()
1252 1274 if rustdagop is not None:
1253 1275 return rustdagop.headrevs(self.index, revs)
1254 1276 return dagop.headrevs(revs, self._uncheckedparentrevs)
1255 1277
1256 1278 def computephases(self, roots):
1257 1279 return self.index.computephasesmapsets(roots)
1258 1280
1259 1281 def _headrevs(self):
1260 1282 count = len(self)
1261 1283 if not count:
1262 1284 return [nullrev]
1263 1285 # we won't iter over filtered rev so nobody is a head at start
1264 1286 ishead = [0] * (count + 1)
1265 1287 index = self.index
1266 1288 for r in self:
1267 1289 ishead[r] = 1 # I may be an head
1268 1290 e = index[r]
1269 1291 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1270 1292 return [r for r, val in enumerate(ishead) if val]
1271 1293
1272 1294 def heads(self, start=None, stop=None):
1273 1295 """return the list of all nodes that have no children
1274 1296
1275 1297 if start is specified, only heads that are descendants of
1276 1298 start will be returned
1277 1299 if stop is specified, it will consider all the revs from stop
1278 1300 as if they had no children
1279 1301 """
1280 1302 if start is None and stop is None:
1281 1303 if not len(self):
1282 1304 return [self.nullid]
1283 1305 return [self.node(r) for r in self.headrevs()]
1284 1306
1285 1307 if start is None:
1286 1308 start = nullrev
1287 1309 else:
1288 1310 start = self.rev(start)
1289 1311
1290 1312 stoprevs = {self.rev(n) for n in stop or []}
1291 1313
1292 1314 revs = dagop.headrevssubset(
1293 1315 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1294 1316 )
1295 1317
1296 1318 return [self.node(rev) for rev in revs]
1297 1319
1298 1320 def children(self, node):
1299 1321 """find the children of a given node"""
1300 1322 c = []
1301 1323 p = self.rev(node)
1302 1324 for r in self.revs(start=p + 1):
1303 1325 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1304 1326 if prevs:
1305 1327 for pr in prevs:
1306 1328 if pr == p:
1307 1329 c.append(self.node(r))
1308 1330 elif p == nullrev:
1309 1331 c.append(self.node(r))
1310 1332 return c
1311 1333
1312 1334 def commonancestorsheads(self, a, b):
1313 1335 """calculate all the heads of the common ancestors of nodes a and b"""
1314 1336 a, b = self.rev(a), self.rev(b)
1315 1337 ancs = self._commonancestorsheads(a, b)
1316 1338 return pycompat.maplist(self.node, ancs)
1317 1339
1318 1340 def _commonancestorsheads(self, *revs):
1319 1341 """calculate all the heads of the common ancestors of revs"""
1320 1342 try:
1321 1343 ancs = self.index.commonancestorsheads(*revs)
1322 1344 except (AttributeError, OverflowError): # C implementation failed
1323 1345 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1324 1346 return ancs
1325 1347
1326 1348 def isancestor(self, a, b):
1327 1349 """return True if node a is an ancestor of node b
1328 1350
1329 1351 A revision is considered an ancestor of itself."""
1330 1352 a, b = self.rev(a), self.rev(b)
1331 1353 return self.isancestorrev(a, b)
1332 1354
1333 1355 def isancestorrev(self, a, b):
1334 1356 """return True if revision a is an ancestor of revision b
1335 1357
1336 1358 A revision is considered an ancestor of itself.
1337 1359
1338 1360 The implementation of this is trivial but the use of
1339 1361 reachableroots is not."""
1340 1362 if a == nullrev:
1341 1363 return True
1342 1364 elif a == b:
1343 1365 return True
1344 1366 elif a > b:
1345 1367 return False
1346 1368 return bool(self.reachableroots(a, [b], [a], includepath=False))
1347 1369
1348 1370 def reachableroots(self, minroot, heads, roots, includepath=False):
1349 1371 """return (heads(::(<roots> and <roots>::<heads>)))
1350 1372
1351 1373 If includepath is True, return (<roots>::<heads>)."""
1352 1374 try:
1353 1375 return self.index.reachableroots2(
1354 1376 minroot, heads, roots, includepath
1355 1377 )
1356 1378 except AttributeError:
1357 1379 return dagop._reachablerootspure(
1358 1380 self.parentrevs, minroot, roots, heads, includepath
1359 1381 )
1360 1382
1361 1383 def ancestor(self, a, b):
1362 1384 """calculate the "best" common ancestor of nodes a and b"""
1363 1385
1364 1386 a, b = self.rev(a), self.rev(b)
1365 1387 try:
1366 1388 ancs = self.index.ancestors(a, b)
1367 1389 except (AttributeError, OverflowError):
1368 1390 ancs = ancestor.ancestors(self.parentrevs, a, b)
1369 1391 if ancs:
1370 1392 # choose a consistent winner when there's a tie
1371 1393 return min(map(self.node, ancs))
1372 1394 return self.nullid
1373 1395
1374 1396 def _match(self, id):
1375 1397 if isinstance(id, int):
1376 1398 # rev
1377 1399 return self.node(id)
1378 1400 if len(id) == self.nodeconstants.nodelen:
1379 1401 # possibly a binary node
1380 1402 # odds of a binary node being all hex in ASCII are 1 in 10**25
1381 1403 try:
1382 1404 node = id
1383 1405 self.rev(node) # quick search the index
1384 1406 return node
1385 1407 except error.LookupError:
1386 1408 pass # may be partial hex id
1387 1409 try:
1388 1410 # str(rev)
1389 1411 rev = int(id)
1390 1412 if b"%d" % rev != id:
1391 1413 raise ValueError
1392 1414 if rev < 0:
1393 1415 rev = len(self) + rev
1394 1416 if rev < 0 or rev >= len(self):
1395 1417 raise ValueError
1396 1418 return self.node(rev)
1397 1419 except (ValueError, OverflowError):
1398 1420 pass
1399 1421 if len(id) == 2 * self.nodeconstants.nodelen:
1400 1422 try:
1401 1423 # a full hex nodeid?
1402 1424 node = bin(id)
1403 1425 self.rev(node)
1404 1426 return node
1405 1427 except (TypeError, error.LookupError):
1406 1428 pass
1407 1429
1408 1430 def _partialmatch(self, id):
1409 1431 # we don't care wdirfilenodeids as they should be always full hash
1410 1432 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1411 1433 try:
1412 1434 partial = self.index.partialmatch(id)
1413 1435 if partial and self.hasnode(partial):
1414 1436 if maybewdir:
1415 1437 # single 'ff...' match in radix tree, ambiguous with wdir
1416 1438 raise error.RevlogError
1417 1439 return partial
1418 1440 if maybewdir:
1419 1441 # no 'ff...' match in radix tree, wdir identified
1420 1442 raise error.WdirUnsupported
1421 1443 return None
1422 1444 except error.RevlogError:
1423 1445 # parsers.c radix tree lookup gave multiple matches
1424 1446 # fast path: for unfiltered changelog, radix tree is accurate
1425 1447 if not getattr(self, 'filteredrevs', None):
1426 1448 raise error.AmbiguousPrefixLookupError(
1427 1449 id, self.display_id, _(b'ambiguous identifier')
1428 1450 )
1429 1451 # fall through to slow path that filters hidden revisions
1430 1452 except (AttributeError, ValueError):
1431 1453 # we are pure python, or key was too short to search radix tree
1432 1454 pass
1433 1455
1434 1456 if id in self._pcache:
1435 1457 return self._pcache[id]
1436 1458
1437 1459 if len(id) <= 40:
1438 1460 try:
1439 1461 # hex(node)[:...]
1440 1462 l = len(id) // 2 # grab an even number of digits
1441 1463 prefix = bin(id[: l * 2])
1442 1464 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1443 1465 nl = [
1444 1466 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1445 1467 ]
1446 1468 if self.nodeconstants.nullhex.startswith(id):
1447 1469 nl.append(self.nullid)
1448 1470 if len(nl) > 0:
1449 1471 if len(nl) == 1 and not maybewdir:
1450 1472 self._pcache[id] = nl[0]
1451 1473 return nl[0]
1452 1474 raise error.AmbiguousPrefixLookupError(
1453 1475 id, self.display_id, _(b'ambiguous identifier')
1454 1476 )
1455 1477 if maybewdir:
1456 1478 raise error.WdirUnsupported
1457 1479 return None
1458 1480 except TypeError:
1459 1481 pass
1460 1482
1461 1483 def lookup(self, id):
1462 1484 """locate a node based on:
1463 1485 - revision number or str(revision number)
1464 1486 - nodeid or subset of hex nodeid
1465 1487 """
1466 1488 n = self._match(id)
1467 1489 if n is not None:
1468 1490 return n
1469 1491 n = self._partialmatch(id)
1470 1492 if n:
1471 1493 return n
1472 1494
1473 1495 raise error.LookupError(id, self.display_id, _(b'no match found'))
1474 1496
1475 1497 def shortest(self, node, minlength=1):
1476 1498 """Find the shortest unambiguous prefix that matches node."""
1477 1499
1478 1500 def isvalid(prefix):
1479 1501 try:
1480 1502 matchednode = self._partialmatch(prefix)
1481 1503 except error.AmbiguousPrefixLookupError:
1482 1504 return False
1483 1505 except error.WdirUnsupported:
1484 1506 # single 'ff...' match
1485 1507 return True
1486 1508 if matchednode is None:
1487 1509 raise error.LookupError(node, self.display_id, _(b'no node'))
1488 1510 return True
1489 1511
1490 1512 def maybewdir(prefix):
1491 1513 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1492 1514
1493 1515 hexnode = hex(node)
1494 1516
1495 1517 def disambiguate(hexnode, minlength):
1496 1518 """Disambiguate against wdirid."""
1497 1519 for length in range(minlength, len(hexnode) + 1):
1498 1520 prefix = hexnode[:length]
1499 1521 if not maybewdir(prefix):
1500 1522 return prefix
1501 1523
1502 1524 if not getattr(self, 'filteredrevs', None):
1503 1525 try:
1504 1526 length = max(self.index.shortest(node), minlength)
1505 1527 return disambiguate(hexnode, length)
1506 1528 except error.RevlogError:
1507 1529 if node != self.nodeconstants.wdirid:
1508 1530 raise error.LookupError(
1509 1531 node, self.display_id, _(b'no node')
1510 1532 )
1511 1533 except AttributeError:
1512 1534 # Fall through to pure code
1513 1535 pass
1514 1536
1515 1537 if node == self.nodeconstants.wdirid:
1516 1538 for length in range(minlength, len(hexnode) + 1):
1517 1539 prefix = hexnode[:length]
1518 1540 if isvalid(prefix):
1519 1541 return prefix
1520 1542
1521 1543 for length in range(minlength, len(hexnode) + 1):
1522 1544 prefix = hexnode[:length]
1523 1545 if isvalid(prefix):
1524 1546 return disambiguate(hexnode, length)
1525 1547
1526 1548 def cmp(self, node, text):
1527 1549 """compare text with a given file revision
1528 1550
1529 1551 returns True if text is different than what is stored.
1530 1552 """
1531 1553 p1, p2 = self.parents(node)
1532 1554 return storageutil.hashrevisionsha1(text, p1, p2) != node
1533 1555
1534 1556 def _cachesegment(self, offset, data):
1535 1557 """Add a segment to the revlog cache.
1536 1558
1537 1559 Accepts an absolute offset and the data that is at that location.
1538 1560 """
1539 1561 o, d = self._chunkcache
1540 1562 # try to add to existing cache
1541 1563 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1542 1564 self._chunkcache = o, d + data
1543 1565 else:
1544 1566 self._chunkcache = offset, data
1545 1567
1546 1568 def _readsegment(self, offset, length, df=None):
1547 1569 """Load a segment of raw data from the revlog.
1548 1570
1549 1571 Accepts an absolute offset, length to read, and an optional existing
1550 1572 file handle to read from.
1551 1573
1552 1574 If an existing file handle is passed, it will be seeked and the
1553 1575 original seek position will NOT be restored.
1554 1576
1555 1577 Returns a str or buffer of raw byte data.
1556 1578
1557 1579 Raises if the requested number of bytes could not be read.
1558 1580 """
1559 1581 # Cache data both forward and backward around the requested
1560 1582 # data, in a fixed size window. This helps speed up operations
1561 1583 # involving reading the revlog backwards.
1562 1584 cachesize = self._chunkcachesize
1563 1585 realoffset = offset & ~(cachesize - 1)
1564 1586 reallength = (
1565 1587 (offset + length + cachesize) & ~(cachesize - 1)
1566 1588 ) - realoffset
1567 1589 with self._datareadfp(df) as df:
1568 1590 df.seek(realoffset)
1569 1591 d = df.read(reallength)
1570 1592
1571 1593 self._cachesegment(realoffset, d)
1572 1594 if offset != realoffset or reallength != length:
1573 1595 startoffset = offset - realoffset
1574 1596 if len(d) - startoffset < length:
1575 1597 raise error.RevlogError(
1576 1598 _(
1577 1599 b'partial read of revlog %s; expected %d bytes from '
1578 1600 b'offset %d, got %d'
1579 1601 )
1580 1602 % (
1581 1603 self._indexfile if self._inline else self._datafile,
1582 1604 length,
1583 1605 offset,
1584 1606 len(d) - startoffset,
1585 1607 )
1586 1608 )
1587 1609
1588 1610 return util.buffer(d, startoffset, length)
1589 1611
1590 1612 if len(d) < length:
1591 1613 raise error.RevlogError(
1592 1614 _(
1593 1615 b'partial read of revlog %s; expected %d bytes from offset '
1594 1616 b'%d, got %d'
1595 1617 )
1596 1618 % (
1597 1619 self._indexfile if self._inline else self._datafile,
1598 1620 length,
1599 1621 offset,
1600 1622 len(d),
1601 1623 )
1602 1624 )
1603 1625
1604 1626 return d
1605 1627
1606 1628 def _getsegment(self, offset, length, df=None):
1607 1629 """Obtain a segment of raw data from the revlog.
1608 1630
1609 1631 Accepts an absolute offset, length of bytes to obtain, and an
1610 1632 optional file handle to the already-opened revlog. If the file
1611 1633 handle is used, it's original seek position will not be preserved.
1612 1634
1613 1635 Requests for data may be returned from a cache.
1614 1636
1615 1637 Returns a str or a buffer instance of raw byte data.
1616 1638 """
1617 1639 o, d = self._chunkcache
1618 1640 l = len(d)
1619 1641
1620 1642 # is it in the cache?
1621 1643 cachestart = offset - o
1622 1644 cacheend = cachestart + length
1623 1645 if cachestart >= 0 and cacheend <= l:
1624 1646 if cachestart == 0 and cacheend == l:
1625 1647 return d # avoid a copy
1626 1648 return util.buffer(d, cachestart, cacheend - cachestart)
1627 1649
1628 1650 return self._readsegment(offset, length, df=df)
1629 1651
1630 1652 def _getsegmentforrevs(self, startrev, endrev, df=None):
1631 1653 """Obtain a segment of raw data corresponding to a range of revisions.
1632 1654
1633 1655 Accepts the start and end revisions and an optional already-open
1634 1656 file handle to be used for reading. If the file handle is read, its
1635 1657 seek position will not be preserved.
1636 1658
1637 1659 Requests for data may be satisfied by a cache.
1638 1660
1639 1661 Returns a 2-tuple of (offset, data) for the requested range of
1640 1662 revisions. Offset is the integer offset from the beginning of the
1641 1663 revlog and data is a str or buffer of the raw byte data.
1642 1664
1643 1665 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1644 1666 to determine where each revision's data begins and ends.
1645 1667 """
1646 1668 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1647 1669 # (functions are expensive).
1648 1670 index = self.index
1649 1671 istart = index[startrev]
1650 1672 start = int(istart[0] >> 16)
1651 1673 if startrev == endrev:
1652 1674 end = start + istart[1]
1653 1675 else:
1654 1676 iend = index[endrev]
1655 1677 end = int(iend[0] >> 16) + iend[1]
1656 1678
1657 1679 if self._inline:
1658 1680 start += (startrev + 1) * self.index.entry_size
1659 1681 end += (endrev + 1) * self.index.entry_size
1660 1682 length = end - start
1661 1683
1662 1684 return start, self._getsegment(start, length, df=df)
1663 1685
1664 1686 def _chunk(self, rev, df=None):
1665 1687 """Obtain a single decompressed chunk for a revision.
1666 1688
1667 1689 Accepts an integer revision and an optional already-open file handle
1668 1690 to be used for reading. If used, the seek position of the file will not
1669 1691 be preserved.
1670 1692
1671 1693 Returns a str holding uncompressed data for the requested revision.
1672 1694 """
1673 1695 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1674 1696
1675 1697 def _chunks(self, revs, df=None, targetsize=None):
1676 1698 """Obtain decompressed chunks for the specified revisions.
1677 1699
1678 1700 Accepts an iterable of numeric revisions that are assumed to be in
1679 1701 ascending order. Also accepts an optional already-open file handle
1680 1702 to be used for reading. If used, the seek position of the file will
1681 1703 not be preserved.
1682 1704
1683 1705 This function is similar to calling ``self._chunk()`` multiple times,
1684 1706 but is faster.
1685 1707
1686 1708 Returns a list with decompressed data for each requested revision.
1687 1709 """
1688 1710 if not revs:
1689 1711 return []
1690 1712 start = self.start
1691 1713 length = self.length
1692 1714 inline = self._inline
1693 1715 iosize = self.index.entry_size
1694 1716 buffer = util.buffer
1695 1717
1696 1718 l = []
1697 1719 ladd = l.append
1698 1720
1699 1721 if not self._withsparseread:
1700 1722 slicedchunks = (revs,)
1701 1723 else:
1702 1724 slicedchunks = deltautil.slicechunk(
1703 1725 self, revs, targetsize=targetsize
1704 1726 )
1705 1727
1706 1728 for revschunk in slicedchunks:
1707 1729 firstrev = revschunk[0]
1708 1730 # Skip trailing revisions with empty diff
1709 1731 for lastrev in revschunk[::-1]:
1710 1732 if length(lastrev) != 0:
1711 1733 break
1712 1734
1713 1735 try:
1714 1736 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1715 1737 except OverflowError:
1716 1738 # issue4215 - we can't cache a run of chunks greater than
1717 1739 # 2G on Windows
1718 1740 return [self._chunk(rev, df=df) for rev in revschunk]
1719 1741
1720 1742 decomp = self.decompress
1721 1743 for rev in revschunk:
1722 1744 chunkstart = start(rev)
1723 1745 if inline:
1724 1746 chunkstart += (rev + 1) * iosize
1725 1747 chunklength = length(rev)
1726 1748 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1727 1749
1728 1750 return l
1729 1751
1730 1752 def _chunkclear(self):
1731 1753 """Clear the raw chunk cache."""
1732 1754 self._chunkcache = (0, b'')
1733 1755
1734 1756 def deltaparent(self, rev):
1735 1757 """return deltaparent of the given revision"""
1736 1758 base = self.index[rev][3]
1737 1759 if base == rev:
1738 1760 return nullrev
1739 1761 elif self._generaldelta:
1740 1762 return base
1741 1763 else:
1742 1764 return rev - 1
1743 1765
1744 1766 def issnapshot(self, rev):
1745 1767 """tells whether rev is a snapshot"""
1746 1768 if not self._sparserevlog:
1747 1769 return self.deltaparent(rev) == nullrev
1748 1770 elif util.safehasattr(self.index, b'issnapshot'):
1749 1771 # directly assign the method to cache the testing and access
1750 1772 self.issnapshot = self.index.issnapshot
1751 1773 return self.issnapshot(rev)
1752 1774 if rev == nullrev:
1753 1775 return True
1754 1776 entry = self.index[rev]
1755 1777 base = entry[3]
1756 1778 if base == rev:
1757 1779 return True
1758 1780 if base == nullrev:
1759 1781 return True
1760 1782 p1 = entry[5]
1761 1783 p2 = entry[6]
1762 1784 if base == p1 or base == p2:
1763 1785 return False
1764 1786 return self.issnapshot(base)
1765 1787
1766 1788 def snapshotdepth(self, rev):
1767 1789 """number of snapshot in the chain before this one"""
1768 1790 if not self.issnapshot(rev):
1769 1791 raise error.ProgrammingError(b'revision %d not a snapshot')
1770 1792 return len(self._deltachain(rev)[0]) - 1
1771 1793
1772 1794 def revdiff(self, rev1, rev2):
1773 1795 """return or calculate a delta between two revisions
1774 1796
1775 1797 The delta calculated is in binary form and is intended to be written to
1776 1798 revlog data directly. So this function needs raw revision data.
1777 1799 """
1778 1800 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1779 1801 return bytes(self._chunk(rev2))
1780 1802
1781 1803 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1782 1804
1783 1805 def _processflags(self, text, flags, operation, raw=False):
1784 1806 """deprecated entry point to access flag processors"""
1785 1807 msg = b'_processflag(...) use the specialized variant'
1786 1808 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1787 1809 if raw:
1788 1810 return text, flagutil.processflagsraw(self, text, flags)
1789 1811 elif operation == b'read':
1790 1812 return flagutil.processflagsread(self, text, flags)
1791 1813 else: # write operation
1792 1814 return flagutil.processflagswrite(self, text, flags)
1793 1815
1794 1816 def revision(self, nodeorrev, _df=None, raw=False):
1795 1817 """return an uncompressed revision of a given node or revision
1796 1818 number.
1797 1819
1798 1820 _df - an existing file handle to read from. (internal-only)
1799 1821 raw - an optional argument specifying if the revision data is to be
1800 1822 treated as raw data when applying flag transforms. 'raw' should be set
1801 1823 to True when generating changegroups or in debug commands.
1802 1824 """
1803 1825 if raw:
1804 1826 msg = (
1805 1827 b'revlog.revision(..., raw=True) is deprecated, '
1806 1828 b'use revlog.rawdata(...)'
1807 1829 )
1808 1830 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1809 1831 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1810 1832
1811 1833 def sidedata(self, nodeorrev, _df=None):
1812 1834 """a map of extra data related to the changeset but not part of the hash
1813 1835
1814 1836 This function currently return a dictionary. However, more advanced
1815 1837 mapping object will likely be used in the future for a more
1816 1838 efficient/lazy code.
1817 1839 """
1818 1840 return self._revisiondata(nodeorrev, _df)[1]
1819 1841
1820 1842 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1821 1843 # deal with <nodeorrev> argument type
1822 1844 if isinstance(nodeorrev, int):
1823 1845 rev = nodeorrev
1824 1846 node = self.node(rev)
1825 1847 else:
1826 1848 node = nodeorrev
1827 1849 rev = None
1828 1850
1829 1851 # fast path the special `nullid` rev
1830 1852 if node == self.nullid:
1831 1853 return b"", {}
1832 1854
1833 1855 # ``rawtext`` is the text as stored inside the revlog. Might be the
1834 1856 # revision or might need to be processed to retrieve the revision.
1835 1857 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1836 1858
1837 1859 if self.hassidedata:
1838 1860 if rev is None:
1839 1861 rev = self.rev(node)
1840 1862 sidedata = self._sidedata(rev)
1841 1863 else:
1842 1864 sidedata = {}
1843 1865
1844 1866 if raw and validated:
1845 1867 # if we don't want to process the raw text and that raw
1846 1868 # text is cached, we can exit early.
1847 1869 return rawtext, sidedata
1848 1870 if rev is None:
1849 1871 rev = self.rev(node)
1850 1872 # the revlog's flag for this revision
1851 1873 # (usually alter its state or content)
1852 1874 flags = self.flags(rev)
1853 1875
1854 1876 if validated and flags == REVIDX_DEFAULT_FLAGS:
1855 1877 # no extra flags set, no flag processor runs, text = rawtext
1856 1878 return rawtext, sidedata
1857 1879
1858 1880 if raw:
1859 1881 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1860 1882 text = rawtext
1861 1883 else:
1862 1884 r = flagutil.processflagsread(self, rawtext, flags)
1863 1885 text, validatehash = r
1864 1886 if validatehash:
1865 1887 self.checkhash(text, node, rev=rev)
1866 1888 if not validated:
1867 1889 self._revisioncache = (node, rev, rawtext)
1868 1890
1869 1891 return text, sidedata
1870 1892
1871 1893 def _rawtext(self, node, rev, _df=None):
1872 1894 """return the possibly unvalidated rawtext for a revision
1873 1895
1874 1896 returns (rev, rawtext, validated)
1875 1897 """
1876 1898
1877 1899 # revision in the cache (could be useful to apply delta)
1878 1900 cachedrev = None
1879 1901 # An intermediate text to apply deltas to
1880 1902 basetext = None
1881 1903
1882 1904 # Check if we have the entry in cache
1883 1905 # The cache entry looks like (node, rev, rawtext)
1884 1906 if self._revisioncache:
1885 1907 if self._revisioncache[0] == node:
1886 1908 return (rev, self._revisioncache[2], True)
1887 1909 cachedrev = self._revisioncache[1]
1888 1910
1889 1911 if rev is None:
1890 1912 rev = self.rev(node)
1891 1913
1892 1914 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1893 1915 if stopped:
1894 1916 basetext = self._revisioncache[2]
1895 1917
1896 1918 # drop cache to save memory, the caller is expected to
1897 1919 # update self._revisioncache after validating the text
1898 1920 self._revisioncache = None
1899 1921
1900 1922 targetsize = None
1901 1923 rawsize = self.index[rev][2]
1902 1924 if 0 <= rawsize:
1903 1925 targetsize = 4 * rawsize
1904 1926
1905 1927 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1906 1928 if basetext is None:
1907 1929 basetext = bytes(bins[0])
1908 1930 bins = bins[1:]
1909 1931
1910 1932 rawtext = mdiff.patches(basetext, bins)
1911 1933 del basetext # let us have a chance to free memory early
1912 1934 return (rev, rawtext, False)
1913 1935
1914 1936 def _sidedata(self, rev):
1915 1937 """Return the sidedata for a given revision number."""
1916 1938 index_entry = self.index[rev]
1917 1939 sidedata_offset = index_entry[8]
1918 1940 sidedata_size = index_entry[9]
1919 1941
1920 1942 if self._inline:
1921 1943 sidedata_offset += self.index.entry_size * (1 + rev)
1922 1944 if sidedata_size == 0:
1923 1945 return {}
1924 1946
1925 1947 segment = self._getsegment(sidedata_offset, sidedata_size)
1926 1948 sidedata = sidedatautil.deserialize_sidedata(segment)
1927 1949 return sidedata
1928 1950
1929 1951 def rawdata(self, nodeorrev, _df=None):
1930 1952 """return an uncompressed raw data of a given node or revision number.
1931 1953
1932 1954 _df - an existing file handle to read from. (internal-only)
1933 1955 """
1934 1956 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1935 1957
1936 1958 def hash(self, text, p1, p2):
1937 1959 """Compute a node hash.
1938 1960
1939 1961 Available as a function so that subclasses can replace the hash
1940 1962 as needed.
1941 1963 """
1942 1964 return storageutil.hashrevisionsha1(text, p1, p2)
1943 1965
1944 1966 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1945 1967 """Check node hash integrity.
1946 1968
1947 1969 Available as a function so that subclasses can extend hash mismatch
1948 1970 behaviors as needed.
1949 1971 """
1950 1972 try:
1951 1973 if p1 is None and p2 is None:
1952 1974 p1, p2 = self.parents(node)
1953 1975 if node != self.hash(text, p1, p2):
1954 1976 # Clear the revision cache on hash failure. The revision cache
1955 1977 # only stores the raw revision and clearing the cache does have
1956 1978 # the side-effect that we won't have a cache hit when the raw
1957 1979 # revision data is accessed. But this case should be rare and
1958 1980 # it is extra work to teach the cache about the hash
1959 1981 # verification state.
1960 1982 if self._revisioncache and self._revisioncache[0] == node:
1961 1983 self._revisioncache = None
1962 1984
1963 1985 revornode = rev
1964 1986 if revornode is None:
1965 1987 revornode = templatefilters.short(hex(node))
1966 1988 raise error.RevlogError(
1967 1989 _(b"integrity check failed on %s:%s")
1968 1990 % (self.display_id, pycompat.bytestr(revornode))
1969 1991 )
1970 1992 except error.RevlogError:
1971 1993 if self._censorable and storageutil.iscensoredtext(text):
1972 1994 raise error.CensoredNodeError(self.display_id, node, text)
1973 1995 raise
1974 1996
1975 1997 def _enforceinlinesize(self, tr):
1976 1998 """Check if the revlog is too big for inline and convert if so.
1977 1999
1978 2000 This should be called after revisions are added to the revlog. If the
1979 2001 revlog has grown too large to be an inline revlog, it will convert it
1980 2002 to use multiple index and data files.
1981 2003 """
1982 2004 tiprev = len(self) - 1
1983 2005 total_size = self.start(tiprev) + self.length(tiprev)
1984 2006 if not self._inline or total_size < _maxinline:
1985 2007 return
1986 2008
1987 2009 troffset = tr.findoffset(self._indexfile)
1988 2010 if troffset is None:
1989 2011 raise error.RevlogError(
1990 2012 _(b"%s not found in the transaction") % self._indexfile
1991 2013 )
1992 2014 trindex = 0
1993 2015 tr.add(self._datafile, 0)
1994 2016
1995 2017 existing_handles = False
1996 2018 if self._writinghandles is not None:
1997 2019 existing_handles = True
1998 2020 fp = self._writinghandles[0]
1999 2021 fp.flush()
2000 2022 fp.close()
2001 2023 # We can't use the cached file handle after close(). So prevent
2002 2024 # its usage.
2003 2025 self._writinghandles = None
2004 2026
2005 2027 new_dfh = self._datafp(b'w+')
2006 2028 new_dfh.truncate(0) # drop any potentially existing data
2007 2029 try:
2008 2030 with self._indexfp() as read_ifh:
2009 2031 for r in self:
2010 2032 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2011 2033 if troffset <= self.start(r):
2012 2034 trindex = r
2013 2035 new_dfh.flush()
2014 2036
2015 2037 with self.__index_new_fp() as fp:
2016 2038 self._format_flags &= ~FLAG_INLINE_DATA
2017 2039 self._inline = False
2018 2040 for i in self:
2019 2041 e = self.index.entry_binary(i)
2020 2042 if i == 0 and self._docket is None:
2021 2043 header = self._format_flags | self._format_version
2022 2044 header = self.index.pack_header(header)
2023 2045 e = header + e
2024 2046 fp.write(e)
2047 if self._docket is not None:
2048 self._docket.index_end = fp.tell()
2025 2049 # the temp file replace the real index when we exit the context
2026 2050 # manager
2027 2051
2028 2052 tr.replace(self._indexfile, trindex * self.index.entry_size)
2029 2053 nodemaputil.setup_persistent_nodemap(tr, self)
2030 2054 self._chunkclear()
2031 2055
2032 2056 if existing_handles:
2033 2057 # switched from inline to conventional reopen the index
2034 2058 ifh = self.__index_write_fp()
2035 2059 self._writinghandles = (ifh, new_dfh)
2036 2060 new_dfh = None
2037 2061 finally:
2038 2062 if new_dfh is not None:
2039 2063 new_dfh.close()
2040 2064
2041 2065 def _nodeduplicatecallback(self, transaction, node):
2042 2066 """called when trying to add a node already stored."""
2043 2067
2044 2068 @contextlib.contextmanager
2045 2069 def _writing(self, transaction):
2046 2070 if self._writinghandles is not None:
2047 2071 yield
2048 2072 else:
2049 2073 r = len(self)
2050 2074 dsize = 0
2051 2075 if r:
2052 2076 dsize = self.end(r - 1)
2053 2077 dfh = None
2054 2078 if not self._inline:
2055 2079 try:
2056 2080 dfh = self._datafp(b"r+")
2057 2081 dfh.seek(0, os.SEEK_END)
2058 2082 except IOError as inst:
2059 2083 if inst.errno != errno.ENOENT:
2060 2084 raise
2061 2085 dfh = self._datafp(b"w+")
2062 2086 transaction.add(self._datafile, dsize)
2063 2087 try:
2064 2088 isize = r * self.index.entry_size
2065 2089 ifh = self.__index_write_fp()
2066 2090 if self._inline:
2067 2091 transaction.add(self._indexfile, dsize + isize)
2068 2092 else:
2069 2093 transaction.add(self._indexfile, isize)
2070 2094 try:
2071 2095 self._writinghandles = (ifh, dfh)
2072 2096 try:
2073 2097 yield
2074 2098 if self._docket is not None:
2075 2099 self._docket.write(transaction)
2076 2100 finally:
2077 2101 self._writinghandles = None
2078 2102 finally:
2079 2103 ifh.close()
2080 2104 finally:
2081 2105 if dfh is not None:
2082 2106 dfh.close()
2083 2107
2084 2108 def addrevision(
2085 2109 self,
2086 2110 text,
2087 2111 transaction,
2088 2112 link,
2089 2113 p1,
2090 2114 p2,
2091 2115 cachedelta=None,
2092 2116 node=None,
2093 2117 flags=REVIDX_DEFAULT_FLAGS,
2094 2118 deltacomputer=None,
2095 2119 sidedata=None,
2096 2120 ):
2097 2121 """add a revision to the log
2098 2122
2099 2123 text - the revision data to add
2100 2124 transaction - the transaction object used for rollback
2101 2125 link - the linkrev data to add
2102 2126 p1, p2 - the parent nodeids of the revision
2103 2127 cachedelta - an optional precomputed delta
2104 2128 node - nodeid of revision; typically node is not specified, and it is
2105 2129 computed by default as hash(text, p1, p2), however subclasses might
2106 2130 use different hashing method (and override checkhash() in such case)
2107 2131 flags - the known flags to set on the revision
2108 2132 deltacomputer - an optional deltacomputer instance shared between
2109 2133 multiple calls
2110 2134 """
2111 2135 if link == nullrev:
2112 2136 raise error.RevlogError(
2113 2137 _(b"attempted to add linkrev -1 to %s") % self.display_id
2114 2138 )
2115 2139
2116 2140 if sidedata is None:
2117 2141 sidedata = {}
2118 2142 elif sidedata and not self.hassidedata:
2119 2143 raise error.ProgrammingError(
2120 2144 _(b"trying to add sidedata to a revlog who don't support them")
2121 2145 )
2122 2146
2123 2147 if flags:
2124 2148 node = node or self.hash(text, p1, p2)
2125 2149
2126 2150 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2127 2151
2128 2152 # If the flag processor modifies the revision data, ignore any provided
2129 2153 # cachedelta.
2130 2154 if rawtext != text:
2131 2155 cachedelta = None
2132 2156
2133 2157 if len(rawtext) > _maxentrysize:
2134 2158 raise error.RevlogError(
2135 2159 _(
2136 2160 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2137 2161 )
2138 2162 % (self.display_id, len(rawtext))
2139 2163 )
2140 2164
2141 2165 node = node or self.hash(rawtext, p1, p2)
2142 2166 rev = self.index.get_rev(node)
2143 2167 if rev is not None:
2144 2168 return rev
2145 2169
2146 2170 if validatehash:
2147 2171 self.checkhash(rawtext, node, p1=p1, p2=p2)
2148 2172
2149 2173 return self.addrawrevision(
2150 2174 rawtext,
2151 2175 transaction,
2152 2176 link,
2153 2177 p1,
2154 2178 p2,
2155 2179 node,
2156 2180 flags,
2157 2181 cachedelta=cachedelta,
2158 2182 deltacomputer=deltacomputer,
2159 2183 sidedata=sidedata,
2160 2184 )
2161 2185
2162 2186 def addrawrevision(
2163 2187 self,
2164 2188 rawtext,
2165 2189 transaction,
2166 2190 link,
2167 2191 p1,
2168 2192 p2,
2169 2193 node,
2170 2194 flags,
2171 2195 cachedelta=None,
2172 2196 deltacomputer=None,
2173 2197 sidedata=None,
2174 2198 ):
2175 2199 """add a raw revision with known flags, node and parents
2176 2200 useful when reusing a revision not stored in this revlog (ex: received
2177 2201 over wire, or read from an external bundle).
2178 2202 """
2179 2203 with self._writing(transaction):
2180 2204 return self._addrevision(
2181 2205 node,
2182 2206 rawtext,
2183 2207 transaction,
2184 2208 link,
2185 2209 p1,
2186 2210 p2,
2187 2211 flags,
2188 2212 cachedelta,
2189 2213 deltacomputer=deltacomputer,
2190 2214 sidedata=sidedata,
2191 2215 )
2192 2216
2193 2217 def compress(self, data):
2194 2218 """Generate a possibly-compressed representation of data."""
2195 2219 if not data:
2196 2220 return b'', data
2197 2221
2198 2222 compressed = self._compressor.compress(data)
2199 2223
2200 2224 if compressed:
2201 2225 # The revlog compressor added the header in the returned data.
2202 2226 return b'', compressed
2203 2227
2204 2228 if data[0:1] == b'\0':
2205 2229 return b'', data
2206 2230 return b'u', data
2207 2231
2208 2232 def decompress(self, data):
2209 2233 """Decompress a revlog chunk.
2210 2234
2211 2235 The chunk is expected to begin with a header identifying the
2212 2236 format type so it can be routed to an appropriate decompressor.
2213 2237 """
2214 2238 if not data:
2215 2239 return data
2216 2240
2217 2241 # Revlogs are read much more frequently than they are written and many
2218 2242 # chunks only take microseconds to decompress, so performance is
2219 2243 # important here.
2220 2244 #
2221 2245 # We can make a few assumptions about revlogs:
2222 2246 #
2223 2247 # 1) the majority of chunks will be compressed (as opposed to inline
2224 2248 # raw data).
2225 2249 # 2) decompressing *any* data will likely by at least 10x slower than
2226 2250 # returning raw inline data.
2227 2251 # 3) we want to prioritize common and officially supported compression
2228 2252 # engines
2229 2253 #
2230 2254 # It follows that we want to optimize for "decompress compressed data
2231 2255 # when encoded with common and officially supported compression engines"
2232 2256 # case over "raw data" and "data encoded by less common or non-official
2233 2257 # compression engines." That is why we have the inline lookup first
2234 2258 # followed by the compengines lookup.
2235 2259 #
2236 2260 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2237 2261 # compressed chunks. And this matters for changelog and manifest reads.
2238 2262 t = data[0:1]
2239 2263
2240 2264 if t == b'x':
2241 2265 try:
2242 2266 return _zlibdecompress(data)
2243 2267 except zlib.error as e:
2244 2268 raise error.RevlogError(
2245 2269 _(b'revlog decompress error: %s')
2246 2270 % stringutil.forcebytestr(e)
2247 2271 )
2248 2272 # '\0' is more common than 'u' so it goes first.
2249 2273 elif t == b'\0':
2250 2274 return data
2251 2275 elif t == b'u':
2252 2276 return util.buffer(data, 1)
2253 2277
2254 2278 try:
2255 2279 compressor = self._decompressors[t]
2256 2280 except KeyError:
2257 2281 try:
2258 2282 engine = util.compengines.forrevlogheader(t)
2259 2283 compressor = engine.revlogcompressor(self._compengineopts)
2260 2284 self._decompressors[t] = compressor
2261 2285 except KeyError:
2262 2286 raise error.RevlogError(
2263 2287 _(b'unknown compression type %s') % binascii.hexlify(t)
2264 2288 )
2265 2289
2266 2290 return compressor.decompress(data)
2267 2291
2268 2292 def _addrevision(
2269 2293 self,
2270 2294 node,
2271 2295 rawtext,
2272 2296 transaction,
2273 2297 link,
2274 2298 p1,
2275 2299 p2,
2276 2300 flags,
2277 2301 cachedelta,
2278 2302 alwayscache=False,
2279 2303 deltacomputer=None,
2280 2304 sidedata=None,
2281 2305 ):
2282 2306 """internal function to add revisions to the log
2283 2307
2284 2308 see addrevision for argument descriptions.
2285 2309
2286 2310 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2287 2311
2288 2312 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2289 2313 be used.
2290 2314
2291 2315 invariants:
2292 2316 - rawtext is optional (can be None); if not set, cachedelta must be set.
2293 2317 if both are set, they must correspond to each other.
2294 2318 """
2295 2319 if node == self.nullid:
2296 2320 raise error.RevlogError(
2297 2321 _(b"%s: attempt to add null revision") % self.display_id
2298 2322 )
2299 2323 if (
2300 2324 node == self.nodeconstants.wdirid
2301 2325 or node in self.nodeconstants.wdirfilenodeids
2302 2326 ):
2303 2327 raise error.RevlogError(
2304 2328 _(b"%s: attempt to add wdir revision") % self.display_id
2305 2329 )
2306 2330 if self._writinghandles is None:
2307 2331 msg = b'adding revision outside `revlog._writing` context'
2308 2332 raise error.ProgrammingError(msg)
2309 2333
2310 2334 if self._inline:
2311 2335 fh = self._writinghandles[0]
2312 2336 else:
2313 2337 fh = self._writinghandles[1]
2314 2338
2315 2339 btext = [rawtext]
2316 2340
2317 2341 curr = len(self)
2318 2342 prev = curr - 1
2319 2343
2320 2344 offset = self._get_data_offset(prev)
2321 2345
2322 2346 if self._concurrencychecker:
2323 2347 ifh, dfh = self._writinghandles
2324 2348 if self._inline:
2325 2349 # offset is "as if" it were in the .d file, so we need to add on
2326 2350 # the size of the entry metadata.
2327 2351 self._concurrencychecker(
2328 2352 ifh, self._indexfile, offset + curr * self.index.entry_size
2329 2353 )
2330 2354 else:
2331 2355 # Entries in the .i are a consistent size.
2332 2356 self._concurrencychecker(
2333 2357 ifh, self._indexfile, curr * self.index.entry_size
2334 2358 )
2335 2359 self._concurrencychecker(dfh, self._datafile, offset)
2336 2360
2337 2361 p1r, p2r = self.rev(p1), self.rev(p2)
2338 2362
2339 2363 # full versions are inserted when the needed deltas
2340 2364 # become comparable to the uncompressed text
2341 2365 if rawtext is None:
2342 2366 # need rawtext size, before changed by flag processors, which is
2343 2367 # the non-raw size. use revlog explicitly to avoid filelog's extra
2344 2368 # logic that might remove metadata size.
2345 2369 textlen = mdiff.patchedsize(
2346 2370 revlog.size(self, cachedelta[0]), cachedelta[1]
2347 2371 )
2348 2372 else:
2349 2373 textlen = len(rawtext)
2350 2374
2351 2375 if deltacomputer is None:
2352 2376 deltacomputer = deltautil.deltacomputer(self)
2353 2377
2354 2378 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2355 2379
2356 2380 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2357 2381
2358 2382 if sidedata and self.hassidedata:
2359 2383 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2360 2384 sidedata_offset = offset + deltainfo.deltalen
2361 2385 else:
2362 2386 serialized_sidedata = b""
2363 2387 # Don't store the offset if the sidedata is empty, that way
2364 2388 # we can easily detect empty sidedata and they will be no different
2365 2389 # than ones we manually add.
2366 2390 sidedata_offset = 0
2367 2391
2368 2392 e = (
2369 2393 offset_type(offset, flags),
2370 2394 deltainfo.deltalen,
2371 2395 textlen,
2372 2396 deltainfo.base,
2373 2397 link,
2374 2398 p1r,
2375 2399 p2r,
2376 2400 node,
2377 2401 sidedata_offset,
2378 2402 len(serialized_sidedata),
2379 2403 )
2380 2404
2381 2405 self.index.append(e)
2382 2406 entry = self.index.entry_binary(curr)
2383 2407 if curr == 0 and self._docket is None:
2384 2408 header = self._format_flags | self._format_version
2385 2409 header = self.index.pack_header(header)
2386 2410 entry = header + entry
2387 2411 self._writeentry(
2388 2412 transaction,
2389 2413 entry,
2390 2414 deltainfo.data,
2391 2415 link,
2392 2416 offset,
2393 2417 serialized_sidedata,
2394 2418 )
2395 2419
2396 2420 rawtext = btext[0]
2397 2421
2398 2422 if alwayscache and rawtext is None:
2399 2423 rawtext = deltacomputer.buildtext(revinfo, fh)
2400 2424
2401 2425 if type(rawtext) == bytes: # only accept immutable objects
2402 2426 self._revisioncache = (node, curr, rawtext)
2403 2427 self._chainbasecache[curr] = deltainfo.chainbase
2404 2428 return curr
2405 2429
2406 2430 def _get_data_offset(self, prev):
2407 2431 """Returns the current offset in the (in-transaction) data file.
2408 2432 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2409 2433 file to store that information: since sidedata can be rewritten to the
2410 2434 end of the data file within a transaction, you can have cases where, for
2411 2435 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2412 2436 to `n - 1`'s sidedata being written after `n`'s data.
2413 2437
2414 2438 TODO cache this in a docket file before getting out of experimental."""
2415 2439 if self._format_version != REVLOGV2:
2416 2440 return self.end(prev)
2417 2441
2418 2442 offset = 0
2419 2443 for rev, entry in enumerate(self.index):
2420 2444 sidedata_end = entry[8] + entry[9]
2421 2445 # Sidedata for a previous rev has potentially been written after
2422 2446 # this rev's end, so take the max.
2423 2447 offset = max(self.end(rev), offset, sidedata_end)
2424 2448 return offset
2425 2449
2426 2450 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2427 2451 # Files opened in a+ mode have inconsistent behavior on various
2428 2452 # platforms. Windows requires that a file positioning call be made
2429 2453 # when the file handle transitions between reads and writes. See
2430 2454 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2431 2455 # platforms, Python or the platform itself can be buggy. Some versions
2432 2456 # of Solaris have been observed to not append at the end of the file
2433 2457 # if the file was seeked to before the end. See issue4943 for more.
2434 2458 #
2435 2459 # We work around this issue by inserting a seek() before writing.
2436 2460 # Note: This is likely not necessary on Python 3. However, because
2437 2461 # the file handle is reused for reads and may be seeked there, we need
2438 2462 # to be careful before changing this.
2439 2463 if self._writinghandles is None:
2440 2464 msg = b'adding revision outside `revlog._writing` context'
2441 2465 raise error.ProgrammingError(msg)
2442 2466 ifh, dfh = self._writinghandles
2467 if self._docket is None:
2443 2468 ifh.seek(0, os.SEEK_END)
2469 else:
2470 ifh.seek(self._docket.index_end, os.SEEK_SET)
2444 2471 if dfh:
2445 2472 dfh.seek(0, os.SEEK_END)
2446 2473
2447 2474 curr = len(self) - 1
2448 2475 if not self._inline:
2449 2476 transaction.add(self._datafile, offset)
2450 2477 transaction.add(self._indexfile, curr * len(entry))
2451 2478 if data[0]:
2452 2479 dfh.write(data[0])
2453 2480 dfh.write(data[1])
2454 2481 if sidedata:
2455 2482 dfh.write(sidedata)
2456 2483 ifh.write(entry)
2457 2484 else:
2458 2485 offset += curr * self.index.entry_size
2459 2486 transaction.add(self._indexfile, offset)
2460 2487 ifh.write(entry)
2461 2488 ifh.write(data[0])
2462 2489 ifh.write(data[1])
2463 2490 if sidedata:
2464 2491 ifh.write(sidedata)
2465 2492 self._enforceinlinesize(transaction)
2493 if self._docket is not None:
2494 self._docket.index_end = self._writinghandles[0].tell()
2495
2466 2496 nodemaputil.setup_persistent_nodemap(transaction, self)
2467 2497
2468 2498 def addgroup(
2469 2499 self,
2470 2500 deltas,
2471 2501 linkmapper,
2472 2502 transaction,
2473 2503 alwayscache=False,
2474 2504 addrevisioncb=None,
2475 2505 duplicaterevisioncb=None,
2476 2506 ):
2477 2507 """
2478 2508 add a delta group
2479 2509
2480 2510 given a set of deltas, add them to the revision log. the
2481 2511 first delta is against its parent, which should be in our
2482 2512 log, the rest are against the previous delta.
2483 2513
2484 2514 If ``addrevisioncb`` is defined, it will be called with arguments of
2485 2515 this revlog and the node that was added.
2486 2516 """
2487 2517
2488 2518 if self._adding_group:
2489 2519 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2490 2520
2491 2521 self._adding_group = True
2492 2522 empty = True
2493 2523 try:
2494 2524 with self._writing(transaction):
2495 2525 deltacomputer = deltautil.deltacomputer(self)
2496 2526 # loop through our set of deltas
2497 2527 for data in deltas:
2498 2528 (
2499 2529 node,
2500 2530 p1,
2501 2531 p2,
2502 2532 linknode,
2503 2533 deltabase,
2504 2534 delta,
2505 2535 flags,
2506 2536 sidedata,
2507 2537 ) = data
2508 2538 link = linkmapper(linknode)
2509 2539 flags = flags or REVIDX_DEFAULT_FLAGS
2510 2540
2511 2541 rev = self.index.get_rev(node)
2512 2542 if rev is not None:
2513 2543 # this can happen if two branches make the same change
2514 2544 self._nodeduplicatecallback(transaction, rev)
2515 2545 if duplicaterevisioncb:
2516 2546 duplicaterevisioncb(self, rev)
2517 2547 empty = False
2518 2548 continue
2519 2549
2520 2550 for p in (p1, p2):
2521 2551 if not self.index.has_node(p):
2522 2552 raise error.LookupError(
2523 2553 p, self.radix, _(b'unknown parent')
2524 2554 )
2525 2555
2526 2556 if not self.index.has_node(deltabase):
2527 2557 raise error.LookupError(
2528 2558 deltabase, self.display_id, _(b'unknown delta base')
2529 2559 )
2530 2560
2531 2561 baserev = self.rev(deltabase)
2532 2562
2533 2563 if baserev != nullrev and self.iscensored(baserev):
2534 2564 # if base is censored, delta must be full replacement in a
2535 2565 # single patch operation
2536 2566 hlen = struct.calcsize(b">lll")
2537 2567 oldlen = self.rawsize(baserev)
2538 2568 newlen = len(delta) - hlen
2539 2569 if delta[:hlen] != mdiff.replacediffheader(
2540 2570 oldlen, newlen
2541 2571 ):
2542 2572 raise error.CensoredBaseError(
2543 2573 self.display_id, self.node(baserev)
2544 2574 )
2545 2575
2546 2576 if not flags and self._peek_iscensored(baserev, delta):
2547 2577 flags |= REVIDX_ISCENSORED
2548 2578
2549 2579 # We assume consumers of addrevisioncb will want to retrieve
2550 2580 # the added revision, which will require a call to
2551 2581 # revision(). revision() will fast path if there is a cache
2552 2582 # hit. So, we tell _addrevision() to always cache in this case.
2553 2583 # We're only using addgroup() in the context of changegroup
2554 2584 # generation so the revision data can always be handled as raw
2555 2585 # by the flagprocessor.
2556 2586 rev = self._addrevision(
2557 2587 node,
2558 2588 None,
2559 2589 transaction,
2560 2590 link,
2561 2591 p1,
2562 2592 p2,
2563 2593 flags,
2564 2594 (baserev, delta),
2565 2595 alwayscache=alwayscache,
2566 2596 deltacomputer=deltacomputer,
2567 2597 sidedata=sidedata,
2568 2598 )
2569 2599
2570 2600 if addrevisioncb:
2571 2601 addrevisioncb(self, rev)
2572 2602 empty = False
2573 2603 finally:
2574 2604 self._adding_group = False
2575 2605 return not empty
2576 2606
2577 2607 def iscensored(self, rev):
2578 2608 """Check if a file revision is censored."""
2579 2609 if not self._censorable:
2580 2610 return False
2581 2611
2582 2612 return self.flags(rev) & REVIDX_ISCENSORED
2583 2613
2584 2614 def _peek_iscensored(self, baserev, delta):
2585 2615 """Quickly check if a delta produces a censored revision."""
2586 2616 if not self._censorable:
2587 2617 return False
2588 2618
2589 2619 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2590 2620
2591 2621 def getstrippoint(self, minlink):
2592 2622 """find the minimum rev that must be stripped to strip the linkrev
2593 2623
2594 2624 Returns a tuple containing the minimum rev and a set of all revs that
2595 2625 have linkrevs that will be broken by this strip.
2596 2626 """
2597 2627 return storageutil.resolvestripinfo(
2598 2628 minlink,
2599 2629 len(self) - 1,
2600 2630 self.headrevs(),
2601 2631 self.linkrev,
2602 2632 self.parentrevs,
2603 2633 )
2604 2634
2605 2635 def strip(self, minlink, transaction):
2606 2636 """truncate the revlog on the first revision with a linkrev >= minlink
2607 2637
2608 2638 This function is called when we're stripping revision minlink and
2609 2639 its descendants from the repository.
2610 2640
2611 2641 We have to remove all revisions with linkrev >= minlink, because
2612 2642 the equivalent changelog revisions will be renumbered after the
2613 2643 strip.
2614 2644
2615 2645 So we truncate the revlog on the first of these revisions, and
2616 2646 trust that the caller has saved the revisions that shouldn't be
2617 2647 removed and that it'll re-add them after this truncation.
2618 2648 """
2619 2649 if len(self) == 0:
2620 2650 return
2621 2651
2622 2652 rev, _ = self.getstrippoint(minlink)
2623 2653 if rev == len(self):
2624 2654 return
2625 2655
2626 2656 # first truncate the files on disk
2627 2657 end = self.start(rev)
2628 2658 if not self._inline:
2629 2659 transaction.add(self._datafile, end)
2630 2660 end = rev * self.index.entry_size
2631 2661 else:
2632 2662 end += rev * self.index.entry_size
2633 2663
2634 2664 transaction.add(self._indexfile, end)
2665 if self._docket is not None:
2666 # XXX we could, leverage the docket while stripping. However it is
2667 # not powerfull enough at the time of this comment
2668 self._docket.index_end = end
2669 self._docket.write(transaction, stripping=True)
2635 2670
2636 2671 # then reset internal state in memory to forget those revisions
2637 2672 self._revisioncache = None
2638 2673 self._chaininfocache = util.lrucachedict(500)
2639 2674 self._chunkclear()
2640 2675
2641 2676 del self.index[rev:-1]
2642 2677
2643 2678 def checksize(self):
2644 2679 """Check size of index and data files
2645 2680
2646 2681 return a (dd, di) tuple.
2647 2682 - dd: extra bytes for the "data" file
2648 2683 - di: extra bytes for the "index" file
2649 2684
2650 2685 A healthy revlog will return (0, 0).
2651 2686 """
2652 2687 expected = 0
2653 2688 if len(self):
2654 2689 expected = max(0, self.end(len(self) - 1))
2655 2690
2656 2691 try:
2657 2692 with self._datafp() as f:
2658 2693 f.seek(0, io.SEEK_END)
2659 2694 actual = f.tell()
2660 2695 dd = actual - expected
2661 2696 except IOError as inst:
2662 2697 if inst.errno != errno.ENOENT:
2663 2698 raise
2664 2699 dd = 0
2665 2700
2666 2701 try:
2667 2702 f = self.opener(self._indexfile)
2668 2703 f.seek(0, io.SEEK_END)
2669 2704 actual = f.tell()
2670 2705 f.close()
2671 2706 s = self.index.entry_size
2672 2707 i = max(0, actual // s)
2673 2708 di = actual - (i * s)
2674 2709 if self._inline:
2675 2710 databytes = 0
2676 2711 for r in self:
2677 2712 databytes += max(0, self.length(r))
2678 2713 dd = 0
2679 2714 di = actual - len(self) * s - databytes
2680 2715 except IOError as inst:
2681 2716 if inst.errno != errno.ENOENT:
2682 2717 raise
2683 2718 di = 0
2684 2719
2685 2720 return (dd, di)
2686 2721
2687 2722 def files(self):
2688 2723 res = [self._indexfile]
2689 2724 if not self._inline:
2690 2725 res.append(self._datafile)
2691 2726 return res
2692 2727
2693 2728 def emitrevisions(
2694 2729 self,
2695 2730 nodes,
2696 2731 nodesorder=None,
2697 2732 revisiondata=False,
2698 2733 assumehaveparentrevisions=False,
2699 2734 deltamode=repository.CG_DELTAMODE_STD,
2700 2735 sidedata_helpers=None,
2701 2736 ):
2702 2737 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2703 2738 raise error.ProgrammingError(
2704 2739 b'unhandled value for nodesorder: %s' % nodesorder
2705 2740 )
2706 2741
2707 2742 if nodesorder is None and not self._generaldelta:
2708 2743 nodesorder = b'storage'
2709 2744
2710 2745 if (
2711 2746 not self._storedeltachains
2712 2747 and deltamode != repository.CG_DELTAMODE_PREV
2713 2748 ):
2714 2749 deltamode = repository.CG_DELTAMODE_FULL
2715 2750
2716 2751 return storageutil.emitrevisions(
2717 2752 self,
2718 2753 nodes,
2719 2754 nodesorder,
2720 2755 revlogrevisiondelta,
2721 2756 deltaparentfn=self.deltaparent,
2722 2757 candeltafn=self.candelta,
2723 2758 rawsizefn=self.rawsize,
2724 2759 revdifffn=self.revdiff,
2725 2760 flagsfn=self.flags,
2726 2761 deltamode=deltamode,
2727 2762 revisiondata=revisiondata,
2728 2763 assumehaveparentrevisions=assumehaveparentrevisions,
2729 2764 sidedata_helpers=sidedata_helpers,
2730 2765 )
2731 2766
2732 2767 DELTAREUSEALWAYS = b'always'
2733 2768 DELTAREUSESAMEREVS = b'samerevs'
2734 2769 DELTAREUSENEVER = b'never'
2735 2770
2736 2771 DELTAREUSEFULLADD = b'fulladd'
2737 2772
2738 2773 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2739 2774
2740 2775 def clone(
2741 2776 self,
2742 2777 tr,
2743 2778 destrevlog,
2744 2779 addrevisioncb=None,
2745 2780 deltareuse=DELTAREUSESAMEREVS,
2746 2781 forcedeltabothparents=None,
2747 2782 sidedata_helpers=None,
2748 2783 ):
2749 2784 """Copy this revlog to another, possibly with format changes.
2750 2785
2751 2786 The destination revlog will contain the same revisions and nodes.
2752 2787 However, it may not be bit-for-bit identical due to e.g. delta encoding
2753 2788 differences.
2754 2789
2755 2790 The ``deltareuse`` argument control how deltas from the existing revlog
2756 2791 are preserved in the destination revlog. The argument can have the
2757 2792 following values:
2758 2793
2759 2794 DELTAREUSEALWAYS
2760 2795 Deltas will always be reused (if possible), even if the destination
2761 2796 revlog would not select the same revisions for the delta. This is the
2762 2797 fastest mode of operation.
2763 2798 DELTAREUSESAMEREVS
2764 2799 Deltas will be reused if the destination revlog would pick the same
2765 2800 revisions for the delta. This mode strikes a balance between speed
2766 2801 and optimization.
2767 2802 DELTAREUSENEVER
2768 2803 Deltas will never be reused. This is the slowest mode of execution.
2769 2804 This mode can be used to recompute deltas (e.g. if the diff/delta
2770 2805 algorithm changes).
2771 2806 DELTAREUSEFULLADD
2772 2807 Revision will be re-added as if their were new content. This is
2773 2808 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2774 2809 eg: large file detection and handling.
2775 2810
2776 2811 Delta computation can be slow, so the choice of delta reuse policy can
2777 2812 significantly affect run time.
2778 2813
2779 2814 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2780 2815 two extremes. Deltas will be reused if they are appropriate. But if the
2781 2816 delta could choose a better revision, it will do so. This means if you
2782 2817 are converting a non-generaldelta revlog to a generaldelta revlog,
2783 2818 deltas will be recomputed if the delta's parent isn't a parent of the
2784 2819 revision.
2785 2820
2786 2821 In addition to the delta policy, the ``forcedeltabothparents``
2787 2822 argument controls whether to force compute deltas against both parents
2788 2823 for merges. By default, the current default is used.
2789 2824
2790 2825 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2791 2826 `sidedata_helpers`.
2792 2827 """
2793 2828 if deltareuse not in self.DELTAREUSEALL:
2794 2829 raise ValueError(
2795 2830 _(b'value for deltareuse invalid: %s') % deltareuse
2796 2831 )
2797 2832
2798 2833 if len(destrevlog):
2799 2834 raise ValueError(_(b'destination revlog is not empty'))
2800 2835
2801 2836 if getattr(self, 'filteredrevs', None):
2802 2837 raise ValueError(_(b'source revlog has filtered revisions'))
2803 2838 if getattr(destrevlog, 'filteredrevs', None):
2804 2839 raise ValueError(_(b'destination revlog has filtered revisions'))
2805 2840
2806 2841 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2807 2842 # if possible.
2808 2843 oldlazydelta = destrevlog._lazydelta
2809 2844 oldlazydeltabase = destrevlog._lazydeltabase
2810 2845 oldamd = destrevlog._deltabothparents
2811 2846
2812 2847 try:
2813 2848 if deltareuse == self.DELTAREUSEALWAYS:
2814 2849 destrevlog._lazydeltabase = True
2815 2850 destrevlog._lazydelta = True
2816 2851 elif deltareuse == self.DELTAREUSESAMEREVS:
2817 2852 destrevlog._lazydeltabase = False
2818 2853 destrevlog._lazydelta = True
2819 2854 elif deltareuse == self.DELTAREUSENEVER:
2820 2855 destrevlog._lazydeltabase = False
2821 2856 destrevlog._lazydelta = False
2822 2857
2823 2858 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2824 2859
2825 2860 self._clone(
2826 2861 tr,
2827 2862 destrevlog,
2828 2863 addrevisioncb,
2829 2864 deltareuse,
2830 2865 forcedeltabothparents,
2831 2866 sidedata_helpers,
2832 2867 )
2833 2868
2834 2869 finally:
2835 2870 destrevlog._lazydelta = oldlazydelta
2836 2871 destrevlog._lazydeltabase = oldlazydeltabase
2837 2872 destrevlog._deltabothparents = oldamd
2838 2873
2839 2874 def _clone(
2840 2875 self,
2841 2876 tr,
2842 2877 destrevlog,
2843 2878 addrevisioncb,
2844 2879 deltareuse,
2845 2880 forcedeltabothparents,
2846 2881 sidedata_helpers,
2847 2882 ):
2848 2883 """perform the core duty of `revlog.clone` after parameter processing"""
2849 2884 deltacomputer = deltautil.deltacomputer(destrevlog)
2850 2885 index = self.index
2851 2886 for rev in self:
2852 2887 entry = index[rev]
2853 2888
2854 2889 # Some classes override linkrev to take filtered revs into
2855 2890 # account. Use raw entry from index.
2856 2891 flags = entry[0] & 0xFFFF
2857 2892 linkrev = entry[4]
2858 2893 p1 = index[entry[5]][7]
2859 2894 p2 = index[entry[6]][7]
2860 2895 node = entry[7]
2861 2896
2862 2897 # (Possibly) reuse the delta from the revlog if allowed and
2863 2898 # the revlog chunk is a delta.
2864 2899 cachedelta = None
2865 2900 rawtext = None
2866 2901 if deltareuse == self.DELTAREUSEFULLADD:
2867 2902 text, sidedata = self._revisiondata(rev)
2868 2903
2869 2904 if sidedata_helpers is not None:
2870 2905 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2871 2906 self, sidedata_helpers, sidedata, rev
2872 2907 )
2873 2908 flags = flags | new_flags[0] & ~new_flags[1]
2874 2909
2875 2910 destrevlog.addrevision(
2876 2911 text,
2877 2912 tr,
2878 2913 linkrev,
2879 2914 p1,
2880 2915 p2,
2881 2916 cachedelta=cachedelta,
2882 2917 node=node,
2883 2918 flags=flags,
2884 2919 deltacomputer=deltacomputer,
2885 2920 sidedata=sidedata,
2886 2921 )
2887 2922 else:
2888 2923 if destrevlog._lazydelta:
2889 2924 dp = self.deltaparent(rev)
2890 2925 if dp != nullrev:
2891 2926 cachedelta = (dp, bytes(self._chunk(rev)))
2892 2927
2893 2928 sidedata = None
2894 2929 if not cachedelta:
2895 2930 rawtext, sidedata = self._revisiondata(rev)
2896 2931 if sidedata is None:
2897 2932 sidedata = self.sidedata(rev)
2898 2933
2899 2934 if sidedata_helpers is not None:
2900 2935 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
2901 2936 self, sidedata_helpers, sidedata, rev
2902 2937 )
2903 2938 flags = flags | new_flags[0] & ~new_flags[1]
2904 2939
2905 2940 with destrevlog._writing(tr):
2906 2941 destrevlog._addrevision(
2907 2942 node,
2908 2943 rawtext,
2909 2944 tr,
2910 2945 linkrev,
2911 2946 p1,
2912 2947 p2,
2913 2948 flags,
2914 2949 cachedelta,
2915 2950 deltacomputer=deltacomputer,
2916 2951 sidedata=sidedata,
2917 2952 )
2918 2953
2919 2954 if addrevisioncb:
2920 2955 addrevisioncb(self, rev, node)
2921 2956
2922 2957 def censorrevision(self, tr, censornode, tombstone=b''):
2923 2958 if self._format_version == REVLOGV0:
2924 2959 raise error.RevlogError(
2925 2960 _(b'cannot censor with version %d revlogs')
2926 2961 % self._format_version
2927 2962 )
2928 2963
2929 2964 censorrev = self.rev(censornode)
2930 2965 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2931 2966
2932 2967 if len(tombstone) > self.rawsize(censorrev):
2933 2968 raise error.Abort(
2934 2969 _(b'censor tombstone must be no longer than censored data')
2935 2970 )
2936 2971
2937 2972 # Rewriting the revlog in place is hard. Our strategy for censoring is
2938 2973 # to create a new revlog, copy all revisions to it, then replace the
2939 2974 # revlogs on transaction close.
2940 2975 #
2941 2976 # This is a bit dangerous. We could easily have a mismatch of state.
2942 2977 newrl = revlog(
2943 2978 self.opener,
2944 2979 target=self.target,
2945 2980 radix=self.radix,
2946 2981 postfix=b'tmpcensored',
2947 2982 censorable=True,
2948 2983 )
2949 2984 newrl._format_version = self._format_version
2950 2985 newrl._format_flags = self._format_flags
2951 2986 newrl._generaldelta = self._generaldelta
2952 2987 newrl._parse_index = self._parse_index
2953 2988
2954 2989 for rev in self.revs():
2955 2990 node = self.node(rev)
2956 2991 p1, p2 = self.parents(node)
2957 2992
2958 2993 if rev == censorrev:
2959 2994 newrl.addrawrevision(
2960 2995 tombstone,
2961 2996 tr,
2962 2997 self.linkrev(censorrev),
2963 2998 p1,
2964 2999 p2,
2965 3000 censornode,
2966 3001 REVIDX_ISCENSORED,
2967 3002 )
2968 3003
2969 3004 if newrl.deltaparent(rev) != nullrev:
2970 3005 raise error.Abort(
2971 3006 _(
2972 3007 b'censored revision stored as delta; '
2973 3008 b'cannot censor'
2974 3009 ),
2975 3010 hint=_(
2976 3011 b'censoring of revlogs is not '
2977 3012 b'fully implemented; please report '
2978 3013 b'this bug'
2979 3014 ),
2980 3015 )
2981 3016 continue
2982 3017
2983 3018 if self.iscensored(rev):
2984 3019 if self.deltaparent(rev) != nullrev:
2985 3020 raise error.Abort(
2986 3021 _(
2987 3022 b'cannot censor due to censored '
2988 3023 b'revision having delta stored'
2989 3024 )
2990 3025 )
2991 3026 rawtext = self._chunk(rev)
2992 3027 else:
2993 3028 rawtext = self.rawdata(rev)
2994 3029
2995 3030 newrl.addrawrevision(
2996 3031 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
2997 3032 )
2998 3033
2999 3034 tr.addbackup(self._indexfile, location=b'store')
3000 3035 if not self._inline:
3001 3036 tr.addbackup(self._datafile, location=b'store')
3002 3037
3003 3038 self.opener.rename(newrl._indexfile, self._indexfile)
3004 3039 if not self._inline:
3005 3040 self.opener.rename(newrl._datafile, self._datafile)
3006 3041
3007 3042 self.clearcaches()
3008 3043 self._loadindex()
3009 3044
3010 3045 def verifyintegrity(self, state):
3011 3046 """Verifies the integrity of the revlog.
3012 3047
3013 3048 Yields ``revlogproblem`` instances describing problems that are
3014 3049 found.
3015 3050 """
3016 3051 dd, di = self.checksize()
3017 3052 if dd:
3018 3053 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3019 3054 if di:
3020 3055 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3021 3056
3022 3057 version = self._format_version
3023 3058
3024 3059 # The verifier tells us what version revlog we should be.
3025 3060 if version != state[b'expectedversion']:
3026 3061 yield revlogproblem(
3027 3062 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3028 3063 % (self.display_id, version, state[b'expectedversion'])
3029 3064 )
3030 3065
3031 3066 state[b'skipread'] = set()
3032 3067 state[b'safe_renamed'] = set()
3033 3068
3034 3069 for rev in self:
3035 3070 node = self.node(rev)
3036 3071
3037 3072 # Verify contents. 4 cases to care about:
3038 3073 #
3039 3074 # common: the most common case
3040 3075 # rename: with a rename
3041 3076 # meta: file content starts with b'\1\n', the metadata
3042 3077 # header defined in filelog.py, but without a rename
3043 3078 # ext: content stored externally
3044 3079 #
3045 3080 # More formally, their differences are shown below:
3046 3081 #
3047 3082 # | common | rename | meta | ext
3048 3083 # -------------------------------------------------------
3049 3084 # flags() | 0 | 0 | 0 | not 0
3050 3085 # renamed() | False | True | False | ?
3051 3086 # rawtext[0:2]=='\1\n'| False | True | True | ?
3052 3087 #
3053 3088 # "rawtext" means the raw text stored in revlog data, which
3054 3089 # could be retrieved by "rawdata(rev)". "text"
3055 3090 # mentioned below is "revision(rev)".
3056 3091 #
3057 3092 # There are 3 different lengths stored physically:
3058 3093 # 1. L1: rawsize, stored in revlog index
3059 3094 # 2. L2: len(rawtext), stored in revlog data
3060 3095 # 3. L3: len(text), stored in revlog data if flags==0, or
3061 3096 # possibly somewhere else if flags!=0
3062 3097 #
3063 3098 # L1 should be equal to L2. L3 could be different from them.
3064 3099 # "text" may or may not affect commit hash depending on flag
3065 3100 # processors (see flagutil.addflagprocessor).
3066 3101 #
3067 3102 # | common | rename | meta | ext
3068 3103 # -------------------------------------------------
3069 3104 # rawsize() | L1 | L1 | L1 | L1
3070 3105 # size() | L1 | L2-LM | L1(*) | L1 (?)
3071 3106 # len(rawtext) | L2 | L2 | L2 | L2
3072 3107 # len(text) | L2 | L2 | L2 | L3
3073 3108 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3074 3109 #
3075 3110 # LM: length of metadata, depending on rawtext
3076 3111 # (*): not ideal, see comment in filelog.size
3077 3112 # (?): could be "- len(meta)" if the resolved content has
3078 3113 # rename metadata
3079 3114 #
3080 3115 # Checks needed to be done:
3081 3116 # 1. length check: L1 == L2, in all cases.
3082 3117 # 2. hash check: depending on flag processor, we may need to
3083 3118 # use either "text" (external), or "rawtext" (in revlog).
3084 3119
3085 3120 try:
3086 3121 skipflags = state.get(b'skipflags', 0)
3087 3122 if skipflags:
3088 3123 skipflags &= self.flags(rev)
3089 3124
3090 3125 _verify_revision(self, skipflags, state, node)
3091 3126
3092 3127 l1 = self.rawsize(rev)
3093 3128 l2 = len(self.rawdata(node))
3094 3129
3095 3130 if l1 != l2:
3096 3131 yield revlogproblem(
3097 3132 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3098 3133 node=node,
3099 3134 )
3100 3135
3101 3136 except error.CensoredNodeError:
3102 3137 if state[b'erroroncensored']:
3103 3138 yield revlogproblem(
3104 3139 error=_(b'censored file data'), node=node
3105 3140 )
3106 3141 state[b'skipread'].add(node)
3107 3142 except Exception as e:
3108 3143 yield revlogproblem(
3109 3144 error=_(b'unpacking %s: %s')
3110 3145 % (short(node), stringutil.forcebytestr(e)),
3111 3146 node=node,
3112 3147 )
3113 3148 state[b'skipread'].add(node)
3114 3149
3115 3150 def storageinfo(
3116 3151 self,
3117 3152 exclusivefiles=False,
3118 3153 sharedfiles=False,
3119 3154 revisionscount=False,
3120 3155 trackedsize=False,
3121 3156 storedsize=False,
3122 3157 ):
3123 3158 d = {}
3124 3159
3125 3160 if exclusivefiles:
3126 3161 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3127 3162 if not self._inline:
3128 3163 d[b'exclusivefiles'].append((self.opener, self._datafile))
3129 3164
3130 3165 if sharedfiles:
3131 3166 d[b'sharedfiles'] = []
3132 3167
3133 3168 if revisionscount:
3134 3169 d[b'revisionscount'] = len(self)
3135 3170
3136 3171 if trackedsize:
3137 3172 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3138 3173
3139 3174 if storedsize:
3140 3175 d[b'storedsize'] = sum(
3141 3176 self.opener.stat(path).st_size for path in self.files()
3142 3177 )
3143 3178
3144 3179 return d
3145 3180
3146 3181 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3147 3182 if not self.hassidedata:
3148 3183 return
3149 3184 # revlog formats with sidedata support does not support inline
3150 3185 assert not self._inline
3151 3186 if not helpers[1] and not helpers[2]:
3152 3187 # Nothing to generate or remove
3153 3188 return
3154 3189
3155 3190 # changelog implement some "delayed" writing mechanism that assume that
3156 3191 # all index data is writen in append mode and is therefor incompatible
3157 3192 # with the seeked write done in this method. The use of such "delayed"
3158 3193 # writing will soon be removed for revlog version that support side
3159 3194 # data, so for now, we only keep this simple assert to highlight the
3160 3195 # situation.
3161 3196 delayed = getattr(self, '_delayed', False)
3162 3197 diverted = getattr(self, '_divert', False)
3163 3198 if delayed and not diverted:
3164 3199 msg = "cannot rewrite_sidedata of a delayed revlog"
3165 3200 raise error.ProgrammingError(msg)
3166 3201
3167 3202 new_entries = []
3168 3203 # append the new sidedata
3169 3204 with self._writing(transaction):
3170 3205 ifh, dfh = self._writinghandles
3171 3206 dfh.seek(0, os.SEEK_END)
3172 3207 current_offset = dfh.tell()
3173 3208 for rev in range(startrev, endrev + 1):
3174 3209 entry = self.index[rev]
3175 3210 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3176 3211 store=self,
3177 3212 sidedata_helpers=helpers,
3178 3213 sidedata={},
3179 3214 rev=rev,
3180 3215 )
3181 3216
3182 3217 serialized_sidedata = sidedatautil.serialize_sidedata(
3183 3218 new_sidedata
3184 3219 )
3185 3220 if entry[8] != 0 or entry[9] != 0:
3186 3221 # rewriting entries that already have sidedata is not
3187 3222 # supported yet, because it introduces garbage data in the
3188 3223 # revlog.
3189 3224 msg = b"rewriting existing sidedata is not supported yet"
3190 3225 raise error.Abort(msg)
3191 3226
3192 3227 # Apply (potential) flags to add and to remove after running
3193 3228 # the sidedata helpers
3194 3229 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3195 3230 entry = (new_offset_flags,) + entry[1:8]
3196 3231 entry += (current_offset, len(serialized_sidedata))
3197 3232
3198 3233 # the sidedata computation might have move the file cursors around
3199 3234 dfh.seek(current_offset, os.SEEK_SET)
3200 3235 dfh.write(serialized_sidedata)
3201 3236 new_entries.append(entry)
3202 3237 current_offset += len(serialized_sidedata)
3203 3238
3204 3239 # rewrite the new index entries
3205 3240 ifh.seek(startrev * self.index.entry_size)
3206 3241 for i, e in enumerate(new_entries):
3207 3242 rev = startrev + i
3208 3243 self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
3209 3244 packed = self.index.entry_binary(rev)
3210 3245 if rev == 0 and self._docket is None:
3211 3246 header = self._format_flags | self._format_version
3212 3247 header = self.index.pack_header(header)
3213 3248 packed = header + packed
3214 3249 ifh.write(packed)
@@ -1,80 +1,100 b''
1 1 # docket - code related to revlog "docket"
2 2 #
3 3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 ### Revlog docket file
9 9 #
10 10 # The revlog is stored on disk using multiple files:
11 11 #
12 12 # * a small docket file, containing metadata and a pointer,
13 13 #
14 14 # * an index file, containing fixed width information about revisions,
15 15 #
16 16 # * a data file, containing variable width data for these revisions,
17 17
18 18 from __future__ import absolute_import
19 19
20 20 import struct
21 21
22 22 from . import (
23 23 constants,
24 24 )
25 25
26 26 # Docket format
27 27 #
28 28 # * 4 bytes: revlog version
29 29 # | This is mandatory as docket must be compatible with the previous
30 30 # | revlog index header.
31 S_HEADER = struct.Struct(constants.INDEX_HEADER.format)
31 # * 8 bytes: size of index data
32 S_HEADER = struct.Struct(constants.INDEX_HEADER.format + 'L')
32 33
33 34
34 35 class RevlogDocket(object):
35 36 """metadata associated with revlog"""
36 37
37 def __init__(self, revlog, version_header=None):
38 def __init__(self, revlog, version_header=None, index_end=0):
38 39 self._version_header = version_header
39 40 self._dirty = False
40 41 self._radix = revlog.radix
41 42 self._path = revlog._docket_file
42 43 self._opener = revlog.opener
44 self._index_end = index_end
43 45
44 46 def index_filepath(self):
45 47 """file path to the current index file associated to this docket"""
46 48 # very simplistic version at first
47 49 return b"%s.idx" % self._radix
48 50
49 def write(self, transaction):
51 @property
52 def index_end(self):
53 return self._index_end
54
55 @index_end.setter
56 def index_end(self, new_size):
57 if new_size != self._index_end:
58 self._index_end = new_size
59 self._dirty = True
60
61 def write(self, transaction, stripping=False):
50 62 """write the modification of disk if any
51 63
52 64 This make the new content visible to all process"""
53 65 if self._dirty:
66 if not stripping:
67 # XXX we could, leverage the docket while stripping. However it
68 # is not powerfull enough at the time of this comment
54 69 transaction.addbackup(self._path, location=b'store')
55 70 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
56 71 f.write(self._serialize())
57 72 self._dirty = False
58 73
59 74 def _serialize(self):
60 return S_HEADER.pack(self._version_header)
75 data = (
76 self._version_header,
77 self._index_end,
78 )
79 return S_HEADER.pack(*data)
61 80
62 81
63 82 def default_docket(revlog, version_header):
64 83 """given a revlog version a new docket object for the given revlog"""
65 84 if (version_header & 0xFFFF) != constants.REVLOGV2:
66 85 return None
67 86 docket = RevlogDocket(revlog, version_header=version_header)
68 87 docket._dirty = True
69 88 return docket
70 89
71 90
72 91 def parse_docket(revlog, data):
73 92 """given some docket data return a docket object for the given revlog"""
74 93 header = S_HEADER.unpack(data[: S_HEADER.size])
75 (version_header,) = header
94 version_header, index_size = header
76 95 docket = RevlogDocket(
77 96 revlog,
78 97 version_header=version_header,
98 index_end=index_size,
79 99 )
80 100 return docket
General Comments 0
You need to be logged in to leave comments. Login now