##// END OF EJS Templates
deltas: add a `debug.revlog.debug-delta` config option enable output...
marmoute -
r50122:2bcf5e14 default
parent child Browse files
Show More
@@ -1,2804 +1,2809 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10 import re
11 11
12 12 from . import (
13 13 encoding,
14 14 error,
15 15 )
16 16
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config=b'warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31
32 32 class configitem:
33 33 """represent a known config item
34 34
35 35 :section: the official config section where to find this item,
36 36 :name: the official name within the section,
37 37 :default: default value for this item,
38 38 :alias: optional list of tuples as alternatives,
39 39 :generic: this is a generic definition, match name using regular expression.
40 40 """
41 41
42 42 def __init__(
43 43 self,
44 44 section,
45 45 name,
46 46 default=None,
47 47 alias=(),
48 48 generic=False,
49 49 priority=0,
50 50 experimental=False,
51 51 ):
52 52 self.section = section
53 53 self.name = name
54 54 self.default = default
55 55 self.alias = list(alias)
56 56 self.generic = generic
57 57 self.priority = priority
58 58 self.experimental = experimental
59 59 self._re = None
60 60 if generic:
61 61 self._re = re.compile(self.name)
62 62
63 63
64 64 class itemregister(dict):
65 65 """A specialized dictionary that can handle wild-card selection"""
66 66
67 67 def __init__(self):
68 68 super(itemregister, self).__init__()
69 69 self._generics = set()
70 70
71 71 def update(self, other):
72 72 super(itemregister, self).update(other)
73 73 self._generics.update(other._generics)
74 74
75 75 def __setitem__(self, key, item):
76 76 super(itemregister, self).__setitem__(key, item)
77 77 if item.generic:
78 78 self._generics.add(item)
79 79
80 80 def get(self, key):
81 81 baseitem = super(itemregister, self).get(key)
82 82 if baseitem is not None and not baseitem.generic:
83 83 return baseitem
84 84
85 85 # search for a matching generic item
86 86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 87 for item in generics:
88 88 # we use 'match' instead of 'search' to make the matching simpler
89 89 # for people unfamiliar with regular expression. Having the match
90 90 # rooted to the start of the string will produce less surprising
91 91 # result for user writing simple regex for sub-attribute.
92 92 #
93 93 # For example using "color\..*" match produces an unsurprising
94 94 # result, while using search could suddenly match apparently
95 95 # unrelated configuration that happens to contains "color."
96 96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 97 # some match to avoid the need to prefix most pattern with "^".
98 98 # The "^" seems more error prone.
99 99 if item._re.match(key):
100 100 return item
101 101
102 102 return None
103 103
104 104
105 105 coreitems = {}
106 106
107 107
108 108 def _register(configtable, *args, **kwargs):
109 109 item = configitem(*args, **kwargs)
110 110 section = configtable.setdefault(item.section, itemregister())
111 111 if item.name in section:
112 112 msg = b"duplicated config item registration for '%s.%s'"
113 113 raise error.ProgrammingError(msg % (item.section, item.name))
114 114 section[item.name] = item
115 115
116 116
117 117 # special value for case where the default is derived from other values
118 118 dynamicdefault = object()
119 119
120 120 # Registering actual config items
121 121
122 122
123 123 def getitemregister(configtable):
124 124 f = functools.partial(_register, configtable)
125 125 # export pseudo enum as configitem.*
126 126 f.dynamicdefault = dynamicdefault
127 127 return f
128 128
129 129
130 130 coreconfigitem = getitemregister(coreitems)
131 131
132 132
133 133 def _registerdiffopts(section, configprefix=b''):
134 134 coreconfigitem(
135 135 section,
136 136 configprefix + b'nodates',
137 137 default=False,
138 138 )
139 139 coreconfigitem(
140 140 section,
141 141 configprefix + b'showfunc',
142 142 default=False,
143 143 )
144 144 coreconfigitem(
145 145 section,
146 146 configprefix + b'unified',
147 147 default=None,
148 148 )
149 149 coreconfigitem(
150 150 section,
151 151 configprefix + b'git',
152 152 default=False,
153 153 )
154 154 coreconfigitem(
155 155 section,
156 156 configprefix + b'ignorews',
157 157 default=False,
158 158 )
159 159 coreconfigitem(
160 160 section,
161 161 configprefix + b'ignorewsamount',
162 162 default=False,
163 163 )
164 164 coreconfigitem(
165 165 section,
166 166 configprefix + b'ignoreblanklines',
167 167 default=False,
168 168 )
169 169 coreconfigitem(
170 170 section,
171 171 configprefix + b'ignorewseol',
172 172 default=False,
173 173 )
174 174 coreconfigitem(
175 175 section,
176 176 configprefix + b'nobinary',
177 177 default=False,
178 178 )
179 179 coreconfigitem(
180 180 section,
181 181 configprefix + b'noprefix',
182 182 default=False,
183 183 )
184 184 coreconfigitem(
185 185 section,
186 186 configprefix + b'word-diff',
187 187 default=False,
188 188 )
189 189
190 190
191 191 coreconfigitem(
192 192 b'alias',
193 193 b'.*',
194 194 default=dynamicdefault,
195 195 generic=True,
196 196 )
197 197 coreconfigitem(
198 198 b'auth',
199 199 b'cookiefile',
200 200 default=None,
201 201 )
202 202 _registerdiffopts(section=b'annotate')
203 203 # bookmarks.pushing: internal hack for discovery
204 204 coreconfigitem(
205 205 b'bookmarks',
206 206 b'pushing',
207 207 default=list,
208 208 )
209 209 # bundle.mainreporoot: internal hack for bundlerepo
210 210 coreconfigitem(
211 211 b'bundle',
212 212 b'mainreporoot',
213 213 default=b'',
214 214 )
215 215 coreconfigitem(
216 216 b'censor',
217 217 b'policy',
218 218 default=b'abort',
219 219 experimental=True,
220 220 )
221 221 coreconfigitem(
222 222 b'chgserver',
223 223 b'idletimeout',
224 224 default=3600,
225 225 )
226 226 coreconfigitem(
227 227 b'chgserver',
228 228 b'skiphash',
229 229 default=False,
230 230 )
231 231 coreconfigitem(
232 232 b'cmdserver',
233 233 b'log',
234 234 default=None,
235 235 )
236 236 coreconfigitem(
237 237 b'cmdserver',
238 238 b'max-log-files',
239 239 default=7,
240 240 )
241 241 coreconfigitem(
242 242 b'cmdserver',
243 243 b'max-log-size',
244 244 default=b'1 MB',
245 245 )
246 246 coreconfigitem(
247 247 b'cmdserver',
248 248 b'max-repo-cache',
249 249 default=0,
250 250 experimental=True,
251 251 )
252 252 coreconfigitem(
253 253 b'cmdserver',
254 254 b'message-encodings',
255 255 default=list,
256 256 )
257 257 coreconfigitem(
258 258 b'cmdserver',
259 259 b'track-log',
260 260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 261 )
262 262 coreconfigitem(
263 263 b'cmdserver',
264 264 b'shutdown-on-interrupt',
265 265 default=True,
266 266 )
267 267 coreconfigitem(
268 268 b'color',
269 269 b'.*',
270 270 default=None,
271 271 generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'color',
275 275 b'mode',
276 276 default=b'auto',
277 277 )
278 278 coreconfigitem(
279 279 b'color',
280 280 b'pagermode',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem(
284 284 b'command-templates',
285 285 b'graphnode',
286 286 default=None,
287 287 alias=[(b'ui', b'graphnodetemplate')],
288 288 )
289 289 coreconfigitem(
290 290 b'command-templates',
291 291 b'log',
292 292 default=None,
293 293 alias=[(b'ui', b'logtemplate')],
294 294 )
295 295 coreconfigitem(
296 296 b'command-templates',
297 297 b'mergemarker',
298 298 default=(
299 299 b'{node|short} '
300 300 b'{ifeq(tags, "tip", "", '
301 301 b'ifeq(tags, "", "", "{tags} "))}'
302 302 b'{if(bookmarks, "{bookmarks} ")}'
303 303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 304 b'- {author|user}: {desc|firstline}'
305 305 ),
306 306 alias=[(b'ui', b'mergemarkertemplate')],
307 307 )
308 308 coreconfigitem(
309 309 b'command-templates',
310 310 b'pre-merge-tool-output',
311 311 default=None,
312 312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 313 )
314 314 coreconfigitem(
315 315 b'command-templates',
316 316 b'oneline-summary',
317 317 default=None,
318 318 )
319 319 coreconfigitem(
320 320 b'command-templates',
321 321 b'oneline-summary.*',
322 322 default=dynamicdefault,
323 323 generic=True,
324 324 )
325 325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 326 coreconfigitem(
327 327 b'commands',
328 328 b'commit.post-status',
329 329 default=False,
330 330 )
331 331 coreconfigitem(
332 332 b'commands',
333 333 b'grep.all-files',
334 334 default=False,
335 335 experimental=True,
336 336 )
337 337 coreconfigitem(
338 338 b'commands',
339 339 b'merge.require-rev',
340 340 default=False,
341 341 )
342 342 coreconfigitem(
343 343 b'commands',
344 344 b'push.require-revs',
345 345 default=False,
346 346 )
347 347 coreconfigitem(
348 348 b'commands',
349 349 b'resolve.confirm',
350 350 default=False,
351 351 )
352 352 coreconfigitem(
353 353 b'commands',
354 354 b'resolve.explicit-re-merge',
355 355 default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'commands',
359 359 b'resolve.mark-check',
360 360 default=b'none',
361 361 )
362 362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 363 coreconfigitem(
364 364 b'commands',
365 365 b'show.aliasprefix',
366 366 default=list,
367 367 )
368 368 coreconfigitem(
369 369 b'commands',
370 370 b'status.relative',
371 371 default=False,
372 372 )
373 373 coreconfigitem(
374 374 b'commands',
375 375 b'status.skipstates',
376 376 default=[],
377 377 experimental=True,
378 378 )
379 379 coreconfigitem(
380 380 b'commands',
381 381 b'status.terse',
382 382 default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'commands',
386 386 b'status.verbose',
387 387 default=False,
388 388 )
389 389 coreconfigitem(
390 390 b'commands',
391 391 b'update.check',
392 392 default=None,
393 393 )
394 394 coreconfigitem(
395 395 b'commands',
396 396 b'update.requiredest',
397 397 default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'committemplate',
401 401 b'.*',
402 402 default=None,
403 403 generic=True,
404 404 )
405 405 coreconfigitem(
406 406 b'convert',
407 407 b'bzr.saverev',
408 408 default=True,
409 409 )
410 410 coreconfigitem(
411 411 b'convert',
412 412 b'cvsps.cache',
413 413 default=True,
414 414 )
415 415 coreconfigitem(
416 416 b'convert',
417 417 b'cvsps.fuzz',
418 418 default=60,
419 419 )
420 420 coreconfigitem(
421 421 b'convert',
422 422 b'cvsps.logencoding',
423 423 default=None,
424 424 )
425 425 coreconfigitem(
426 426 b'convert',
427 427 b'cvsps.mergefrom',
428 428 default=None,
429 429 )
430 430 coreconfigitem(
431 431 b'convert',
432 432 b'cvsps.mergeto',
433 433 default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'convert',
437 437 b'git.committeractions',
438 438 default=lambda: [b'messagedifferent'],
439 439 )
440 440 coreconfigitem(
441 441 b'convert',
442 442 b'git.extrakeys',
443 443 default=list,
444 444 )
445 445 coreconfigitem(
446 446 b'convert',
447 447 b'git.findcopiesharder',
448 448 default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'convert',
452 452 b'git.remoteprefix',
453 453 default=b'remote',
454 454 )
455 455 coreconfigitem(
456 456 b'convert',
457 457 b'git.renamelimit',
458 458 default=400,
459 459 )
460 460 coreconfigitem(
461 461 b'convert',
462 462 b'git.saverev',
463 463 default=True,
464 464 )
465 465 coreconfigitem(
466 466 b'convert',
467 467 b'git.similarity',
468 468 default=50,
469 469 )
470 470 coreconfigitem(
471 471 b'convert',
472 472 b'git.skipsubmodules',
473 473 default=False,
474 474 )
475 475 coreconfigitem(
476 476 b'convert',
477 477 b'hg.clonebranches',
478 478 default=False,
479 479 )
480 480 coreconfigitem(
481 481 b'convert',
482 482 b'hg.ignoreerrors',
483 483 default=False,
484 484 )
485 485 coreconfigitem(
486 486 b'convert',
487 487 b'hg.preserve-hash',
488 488 default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'convert',
492 492 b'hg.revs',
493 493 default=None,
494 494 )
495 495 coreconfigitem(
496 496 b'convert',
497 497 b'hg.saverev',
498 498 default=False,
499 499 )
500 500 coreconfigitem(
501 501 b'convert',
502 502 b'hg.sourcename',
503 503 default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'convert',
507 507 b'hg.startrev',
508 508 default=None,
509 509 )
510 510 coreconfigitem(
511 511 b'convert',
512 512 b'hg.tagsbranch',
513 513 default=b'default',
514 514 )
515 515 coreconfigitem(
516 516 b'convert',
517 517 b'hg.usebranchnames',
518 518 default=True,
519 519 )
520 520 coreconfigitem(
521 521 b'convert',
522 522 b'ignoreancestorcheck',
523 523 default=False,
524 524 experimental=True,
525 525 )
526 526 coreconfigitem(
527 527 b'convert',
528 528 b'localtimezone',
529 529 default=False,
530 530 )
531 531 coreconfigitem(
532 532 b'convert',
533 533 b'p4.encoding',
534 534 default=dynamicdefault,
535 535 )
536 536 coreconfigitem(
537 537 b'convert',
538 538 b'p4.startrev',
539 539 default=0,
540 540 )
541 541 coreconfigitem(
542 542 b'convert',
543 543 b'skiptags',
544 544 default=False,
545 545 )
546 546 coreconfigitem(
547 547 b'convert',
548 548 b'svn.debugsvnlog',
549 549 default=True,
550 550 )
551 551 coreconfigitem(
552 552 b'convert',
553 553 b'svn.trunk',
554 554 default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'convert',
558 558 b'svn.tags',
559 559 default=None,
560 560 )
561 561 coreconfigitem(
562 562 b'convert',
563 563 b'svn.branches',
564 564 default=None,
565 565 )
566 566 coreconfigitem(
567 567 b'convert',
568 568 b'svn.startrev',
569 569 default=0,
570 570 )
571 571 coreconfigitem(
572 572 b'convert',
573 573 b'svn.dangerous-set-commit-dates',
574 574 default=False,
575 575 )
576 576 coreconfigitem(
577 577 b'debug',
578 578 b'dirstate.delaywrite',
579 579 default=0,
580 580 )
581 581 coreconfigitem(
582 582 b'debug',
583 583 b'revlog.verifyposition.changelog',
584 584 default=b'',
585 585 )
586 586 coreconfigitem(
587 b'debug',
588 b'revlog.debug-delta',
589 default=False,
590 )
591 coreconfigitem(
587 592 b'defaults',
588 593 b'.*',
589 594 default=None,
590 595 generic=True,
591 596 )
592 597 coreconfigitem(
593 598 b'devel',
594 599 b'all-warnings',
595 600 default=False,
596 601 )
597 602 coreconfigitem(
598 603 b'devel',
599 604 b'bundle2.debug',
600 605 default=False,
601 606 )
602 607 coreconfigitem(
603 608 b'devel',
604 609 b'bundle.delta',
605 610 default=b'',
606 611 )
607 612 coreconfigitem(
608 613 b'devel',
609 614 b'cache-vfs',
610 615 default=None,
611 616 )
612 617 coreconfigitem(
613 618 b'devel',
614 619 b'check-locks',
615 620 default=False,
616 621 )
617 622 coreconfigitem(
618 623 b'devel',
619 624 b'check-relroot',
620 625 default=False,
621 626 )
622 627 # Track copy information for all file, not just "added" one (very slow)
623 628 coreconfigitem(
624 629 b'devel',
625 630 b'copy-tracing.trace-all-files',
626 631 default=False,
627 632 )
628 633 coreconfigitem(
629 634 b'devel',
630 635 b'default-date',
631 636 default=None,
632 637 )
633 638 coreconfigitem(
634 639 b'devel',
635 640 b'deprec-warn',
636 641 default=False,
637 642 )
638 643 coreconfigitem(
639 644 b'devel',
640 645 b'disableloaddefaultcerts',
641 646 default=False,
642 647 )
643 648 coreconfigitem(
644 649 b'devel',
645 650 b'warn-empty-changegroup',
646 651 default=False,
647 652 )
648 653 coreconfigitem(
649 654 b'devel',
650 655 b'legacy.exchange',
651 656 default=list,
652 657 )
653 658 # When True, revlogs use a special reference version of the nodemap, that is not
654 659 # performant but is "known" to behave properly.
655 660 coreconfigitem(
656 661 b'devel',
657 662 b'persistent-nodemap',
658 663 default=False,
659 664 )
660 665 coreconfigitem(
661 666 b'devel',
662 667 b'servercafile',
663 668 default=b'',
664 669 )
665 670 coreconfigitem(
666 671 b'devel',
667 672 b'serverexactprotocol',
668 673 default=b'',
669 674 )
670 675 coreconfigitem(
671 676 b'devel',
672 677 b'serverrequirecert',
673 678 default=False,
674 679 )
675 680 coreconfigitem(
676 681 b'devel',
677 682 b'strip-obsmarkers',
678 683 default=True,
679 684 )
680 685 coreconfigitem(
681 686 b'devel',
682 687 b'warn-config',
683 688 default=None,
684 689 )
685 690 coreconfigitem(
686 691 b'devel',
687 692 b'warn-config-default',
688 693 default=None,
689 694 )
690 695 coreconfigitem(
691 696 b'devel',
692 697 b'user.obsmarker',
693 698 default=None,
694 699 )
695 700 coreconfigitem(
696 701 b'devel',
697 702 b'warn-config-unknown',
698 703 default=None,
699 704 )
700 705 coreconfigitem(
701 706 b'devel',
702 707 b'debug.copies',
703 708 default=False,
704 709 )
705 710 coreconfigitem(
706 711 b'devel',
707 712 b'copy-tracing.multi-thread',
708 713 default=True,
709 714 )
710 715 coreconfigitem(
711 716 b'devel',
712 717 b'debug.extensions',
713 718 default=False,
714 719 )
715 720 coreconfigitem(
716 721 b'devel',
717 722 b'debug.repo-filters',
718 723 default=False,
719 724 )
720 725 coreconfigitem(
721 726 b'devel',
722 727 b'debug.peer-request',
723 728 default=False,
724 729 )
725 730 # If discovery.exchange-heads is False, the discovery will not start with
726 731 # remote head fetching and local head querying.
727 732 coreconfigitem(
728 733 b'devel',
729 734 b'discovery.exchange-heads',
730 735 default=True,
731 736 )
732 737 # If discovery.grow-sample is False, the sample size used in set discovery will
733 738 # not be increased through the process
734 739 coreconfigitem(
735 740 b'devel',
736 741 b'discovery.grow-sample',
737 742 default=True,
738 743 )
739 744 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 745 # adapted to the shape of the undecided set (it is set to the max of:
741 746 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 747 coreconfigitem(
743 748 b'devel',
744 749 b'discovery.grow-sample.dynamic',
745 750 default=True,
746 751 )
747 752 # discovery.grow-sample.rate control the rate at which the sample grow
748 753 coreconfigitem(
749 754 b'devel',
750 755 b'discovery.grow-sample.rate',
751 756 default=1.05,
752 757 )
753 758 # If discovery.randomize is False, random sampling during discovery are
754 759 # deterministic. It is meant for integration tests.
755 760 coreconfigitem(
756 761 b'devel',
757 762 b'discovery.randomize',
758 763 default=True,
759 764 )
760 765 # Control the initial size of the discovery sample
761 766 coreconfigitem(
762 767 b'devel',
763 768 b'discovery.sample-size',
764 769 default=200,
765 770 )
766 771 # Control the initial size of the discovery for initial change
767 772 coreconfigitem(
768 773 b'devel',
769 774 b'discovery.sample-size.initial',
770 775 default=100,
771 776 )
772 777 _registerdiffopts(section=b'diff')
773 778 coreconfigitem(
774 779 b'diff',
775 780 b'merge',
776 781 default=False,
777 782 experimental=True,
778 783 )
779 784 coreconfigitem(
780 785 b'email',
781 786 b'bcc',
782 787 default=None,
783 788 )
784 789 coreconfigitem(
785 790 b'email',
786 791 b'cc',
787 792 default=None,
788 793 )
789 794 coreconfigitem(
790 795 b'email',
791 796 b'charsets',
792 797 default=list,
793 798 )
794 799 coreconfigitem(
795 800 b'email',
796 801 b'from',
797 802 default=None,
798 803 )
799 804 coreconfigitem(
800 805 b'email',
801 806 b'method',
802 807 default=b'smtp',
803 808 )
804 809 coreconfigitem(
805 810 b'email',
806 811 b'reply-to',
807 812 default=None,
808 813 )
809 814 coreconfigitem(
810 815 b'email',
811 816 b'to',
812 817 default=None,
813 818 )
814 819 coreconfigitem(
815 820 b'experimental',
816 821 b'archivemetatemplate',
817 822 default=dynamicdefault,
818 823 )
819 824 coreconfigitem(
820 825 b'experimental',
821 826 b'auto-publish',
822 827 default=b'publish',
823 828 )
824 829 coreconfigitem(
825 830 b'experimental',
826 831 b'bundle-phases',
827 832 default=False,
828 833 )
829 834 coreconfigitem(
830 835 b'experimental',
831 836 b'bundle2-advertise',
832 837 default=True,
833 838 )
834 839 coreconfigitem(
835 840 b'experimental',
836 841 b'bundle2-output-capture',
837 842 default=False,
838 843 )
839 844 coreconfigitem(
840 845 b'experimental',
841 846 b'bundle2.pushback',
842 847 default=False,
843 848 )
844 849 coreconfigitem(
845 850 b'experimental',
846 851 b'bundle2lazylocking',
847 852 default=False,
848 853 )
849 854 coreconfigitem(
850 855 b'experimental',
851 856 b'bundlecomplevel',
852 857 default=None,
853 858 )
854 859 coreconfigitem(
855 860 b'experimental',
856 861 b'bundlecomplevel.bzip2',
857 862 default=None,
858 863 )
859 864 coreconfigitem(
860 865 b'experimental',
861 866 b'bundlecomplevel.gzip',
862 867 default=None,
863 868 )
864 869 coreconfigitem(
865 870 b'experimental',
866 871 b'bundlecomplevel.none',
867 872 default=None,
868 873 )
869 874 coreconfigitem(
870 875 b'experimental',
871 876 b'bundlecomplevel.zstd',
872 877 default=None,
873 878 )
874 879 coreconfigitem(
875 880 b'experimental',
876 881 b'bundlecompthreads',
877 882 default=None,
878 883 )
879 884 coreconfigitem(
880 885 b'experimental',
881 886 b'bundlecompthreads.bzip2',
882 887 default=None,
883 888 )
884 889 coreconfigitem(
885 890 b'experimental',
886 891 b'bundlecompthreads.gzip',
887 892 default=None,
888 893 )
889 894 coreconfigitem(
890 895 b'experimental',
891 896 b'bundlecompthreads.none',
892 897 default=None,
893 898 )
894 899 coreconfigitem(
895 900 b'experimental',
896 901 b'bundlecompthreads.zstd',
897 902 default=None,
898 903 )
899 904 coreconfigitem(
900 905 b'experimental',
901 906 b'changegroup3',
902 907 default=False,
903 908 )
904 909 coreconfigitem(
905 910 b'experimental',
906 911 b'changegroup4',
907 912 default=False,
908 913 )
909 914 coreconfigitem(
910 915 b'experimental',
911 916 b'cleanup-as-archived',
912 917 default=False,
913 918 )
914 919 coreconfigitem(
915 920 b'experimental',
916 921 b'clientcompressionengines',
917 922 default=list,
918 923 )
919 924 coreconfigitem(
920 925 b'experimental',
921 926 b'copytrace',
922 927 default=b'on',
923 928 )
924 929 coreconfigitem(
925 930 b'experimental',
926 931 b'copytrace.movecandidateslimit',
927 932 default=100,
928 933 )
929 934 coreconfigitem(
930 935 b'experimental',
931 936 b'copytrace.sourcecommitlimit',
932 937 default=100,
933 938 )
934 939 coreconfigitem(
935 940 b'experimental',
936 941 b'copies.read-from',
937 942 default=b"filelog-only",
938 943 )
939 944 coreconfigitem(
940 945 b'experimental',
941 946 b'copies.write-to',
942 947 default=b'filelog-only',
943 948 )
944 949 coreconfigitem(
945 950 b'experimental',
946 951 b'crecordtest',
947 952 default=None,
948 953 )
949 954 coreconfigitem(
950 955 b'experimental',
951 956 b'directaccess',
952 957 default=False,
953 958 )
954 959 coreconfigitem(
955 960 b'experimental',
956 961 b'directaccess.revnums',
957 962 default=False,
958 963 )
959 964 coreconfigitem(
960 965 b'experimental',
961 966 b'editortmpinhg',
962 967 default=False,
963 968 )
964 969 coreconfigitem(
965 970 b'experimental',
966 971 b'evolution',
967 972 default=list,
968 973 )
969 974 coreconfigitem(
970 975 b'experimental',
971 976 b'evolution.allowdivergence',
972 977 default=False,
973 978 alias=[(b'experimental', b'allowdivergence')],
974 979 )
975 980 coreconfigitem(
976 981 b'experimental',
977 982 b'evolution.allowunstable',
978 983 default=None,
979 984 )
980 985 coreconfigitem(
981 986 b'experimental',
982 987 b'evolution.createmarkers',
983 988 default=None,
984 989 )
985 990 coreconfigitem(
986 991 b'experimental',
987 992 b'evolution.effect-flags',
988 993 default=True,
989 994 alias=[(b'experimental', b'effect-flags')],
990 995 )
991 996 coreconfigitem(
992 997 b'experimental',
993 998 b'evolution.exchange',
994 999 default=None,
995 1000 )
996 1001 coreconfigitem(
997 1002 b'experimental',
998 1003 b'evolution.bundle-obsmarker',
999 1004 default=False,
1000 1005 )
1001 1006 coreconfigitem(
1002 1007 b'experimental',
1003 1008 b'evolution.bundle-obsmarker:mandatory',
1004 1009 default=True,
1005 1010 )
1006 1011 coreconfigitem(
1007 1012 b'experimental',
1008 1013 b'log.topo',
1009 1014 default=False,
1010 1015 )
1011 1016 coreconfigitem(
1012 1017 b'experimental',
1013 1018 b'evolution.report-instabilities',
1014 1019 default=True,
1015 1020 )
1016 1021 coreconfigitem(
1017 1022 b'experimental',
1018 1023 b'evolution.track-operation',
1019 1024 default=True,
1020 1025 )
1021 1026 # repo-level config to exclude a revset visibility
1022 1027 #
1023 1028 # The target use case is to use `share` to expose different subset of the same
1024 1029 # repository, especially server side. See also `server.view`.
1025 1030 coreconfigitem(
1026 1031 b'experimental',
1027 1032 b'extra-filter-revs',
1028 1033 default=None,
1029 1034 )
1030 1035 coreconfigitem(
1031 1036 b'experimental',
1032 1037 b'maxdeltachainspan',
1033 1038 default=-1,
1034 1039 )
1035 1040 # tracks files which were undeleted (merge might delete them but we explicitly
1036 1041 # kept/undeleted them) and creates new filenodes for them
1037 1042 coreconfigitem(
1038 1043 b'experimental',
1039 1044 b'merge-track-salvaged',
1040 1045 default=False,
1041 1046 )
1042 1047 coreconfigitem(
1043 1048 b'experimental',
1044 1049 b'mmapindexthreshold',
1045 1050 default=None,
1046 1051 )
1047 1052 coreconfigitem(
1048 1053 b'experimental',
1049 1054 b'narrow',
1050 1055 default=False,
1051 1056 )
1052 1057 coreconfigitem(
1053 1058 b'experimental',
1054 1059 b'nonnormalparanoidcheck',
1055 1060 default=False,
1056 1061 )
1057 1062 coreconfigitem(
1058 1063 b'experimental',
1059 1064 b'exportableenviron',
1060 1065 default=list,
1061 1066 )
1062 1067 coreconfigitem(
1063 1068 b'experimental',
1064 1069 b'extendedheader.index',
1065 1070 default=None,
1066 1071 )
1067 1072 coreconfigitem(
1068 1073 b'experimental',
1069 1074 b'extendedheader.similarity',
1070 1075 default=False,
1071 1076 )
1072 1077 coreconfigitem(
1073 1078 b'experimental',
1074 1079 b'graphshorten',
1075 1080 default=False,
1076 1081 )
1077 1082 coreconfigitem(
1078 1083 b'experimental',
1079 1084 b'graphstyle.parent',
1080 1085 default=dynamicdefault,
1081 1086 )
1082 1087 coreconfigitem(
1083 1088 b'experimental',
1084 1089 b'graphstyle.missing',
1085 1090 default=dynamicdefault,
1086 1091 )
1087 1092 coreconfigitem(
1088 1093 b'experimental',
1089 1094 b'graphstyle.grandparent',
1090 1095 default=dynamicdefault,
1091 1096 )
1092 1097 coreconfigitem(
1093 1098 b'experimental',
1094 1099 b'hook-track-tags',
1095 1100 default=False,
1096 1101 )
1097 1102 coreconfigitem(
1098 1103 b'experimental',
1099 1104 b'httppostargs',
1100 1105 default=False,
1101 1106 )
1102 1107 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1103 1108 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1104 1109
1105 1110 coreconfigitem(
1106 1111 b'experimental',
1107 1112 b'obsmarkers-exchange-debug',
1108 1113 default=False,
1109 1114 )
1110 1115 coreconfigitem(
1111 1116 b'experimental',
1112 1117 b'remotenames',
1113 1118 default=False,
1114 1119 )
1115 1120 coreconfigitem(
1116 1121 b'experimental',
1117 1122 b'removeemptydirs',
1118 1123 default=True,
1119 1124 )
1120 1125 coreconfigitem(
1121 1126 b'experimental',
1122 1127 b'revert.interactive.select-to-keep',
1123 1128 default=False,
1124 1129 )
1125 1130 coreconfigitem(
1126 1131 b'experimental',
1127 1132 b'revisions.prefixhexnode',
1128 1133 default=False,
1129 1134 )
1130 1135 # "out of experimental" todo list.
1131 1136 #
1132 1137 # * include management of a persistent nodemap in the main docket
1133 1138 # * enforce a "no-truncate" policy for mmap safety
1134 1139 # - for censoring operation
1135 1140 # - for stripping operation
1136 1141 # - for rollback operation
1137 1142 # * proper streaming (race free) of the docket file
1138 1143 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1139 1144 # * Exchange-wise, we will also need to do something more efficient than
1140 1145 # keeping references to the affected revlogs, especially memory-wise when
1141 1146 # rewriting sidedata.
1142 1147 # * introduce a proper solution to reduce the number of filelog related files.
1143 1148 # * use caching for reading sidedata (similar to what we do for data).
1144 1149 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1145 1150 # * Improvement to consider
1146 1151 # - avoid compression header in chunk using the default compression?
1147 1152 # - forbid "inline" compression mode entirely?
1148 1153 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1149 1154 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1150 1155 # - keep track of chain base or size (probably not that useful anymore)
1151 1156 coreconfigitem(
1152 1157 b'experimental',
1153 1158 b'revlogv2',
1154 1159 default=None,
1155 1160 )
1156 1161 coreconfigitem(
1157 1162 b'experimental',
1158 1163 b'revisions.disambiguatewithin',
1159 1164 default=None,
1160 1165 )
1161 1166 coreconfigitem(
1162 1167 b'experimental',
1163 1168 b'rust.index',
1164 1169 default=False,
1165 1170 )
1166 1171 coreconfigitem(
1167 1172 b'experimental',
1168 1173 b'server.filesdata.recommended-batch-size',
1169 1174 default=50000,
1170 1175 )
1171 1176 coreconfigitem(
1172 1177 b'experimental',
1173 1178 b'server.manifestdata.recommended-batch-size',
1174 1179 default=100000,
1175 1180 )
1176 1181 coreconfigitem(
1177 1182 b'experimental',
1178 1183 b'server.stream-narrow-clones',
1179 1184 default=False,
1180 1185 )
1181 1186 coreconfigitem(
1182 1187 b'experimental',
1183 1188 b'single-head-per-branch',
1184 1189 default=False,
1185 1190 )
1186 1191 coreconfigitem(
1187 1192 b'experimental',
1188 1193 b'single-head-per-branch:account-closed-heads',
1189 1194 default=False,
1190 1195 )
1191 1196 coreconfigitem(
1192 1197 b'experimental',
1193 1198 b'single-head-per-branch:public-changes-only',
1194 1199 default=False,
1195 1200 )
1196 1201 coreconfigitem(
1197 1202 b'experimental',
1198 1203 b'sparse-read',
1199 1204 default=False,
1200 1205 )
1201 1206 coreconfigitem(
1202 1207 b'experimental',
1203 1208 b'sparse-read.density-threshold',
1204 1209 default=0.50,
1205 1210 )
1206 1211 coreconfigitem(
1207 1212 b'experimental',
1208 1213 b'sparse-read.min-gap-size',
1209 1214 default=b'65K',
1210 1215 )
1211 1216 coreconfigitem(
1212 1217 b'experimental',
1213 1218 b'treemanifest',
1214 1219 default=False,
1215 1220 )
1216 1221 coreconfigitem(
1217 1222 b'experimental',
1218 1223 b'update.atomic-file',
1219 1224 default=False,
1220 1225 )
1221 1226 coreconfigitem(
1222 1227 b'experimental',
1223 1228 b'web.full-garbage-collection-rate',
1224 1229 default=1, # still forcing a full collection on each request
1225 1230 )
1226 1231 coreconfigitem(
1227 1232 b'experimental',
1228 1233 b'worker.wdir-get-thread-safe',
1229 1234 default=False,
1230 1235 )
1231 1236 coreconfigitem(
1232 1237 b'experimental',
1233 1238 b'worker.repository-upgrade',
1234 1239 default=False,
1235 1240 )
1236 1241 coreconfigitem(
1237 1242 b'experimental',
1238 1243 b'xdiff',
1239 1244 default=False,
1240 1245 )
1241 1246 coreconfigitem(
1242 1247 b'extensions',
1243 1248 b'[^:]*',
1244 1249 default=None,
1245 1250 generic=True,
1246 1251 )
1247 1252 coreconfigitem(
1248 1253 b'extensions',
1249 1254 b'[^:]*:required',
1250 1255 default=False,
1251 1256 generic=True,
1252 1257 )
1253 1258 coreconfigitem(
1254 1259 b'extdata',
1255 1260 b'.*',
1256 1261 default=None,
1257 1262 generic=True,
1258 1263 )
1259 1264 coreconfigitem(
1260 1265 b'format',
1261 1266 b'bookmarks-in-store',
1262 1267 default=False,
1263 1268 )
1264 1269 coreconfigitem(
1265 1270 b'format',
1266 1271 b'chunkcachesize',
1267 1272 default=None,
1268 1273 experimental=True,
1269 1274 )
1270 1275 coreconfigitem(
1271 1276 # Enable this dirstate format *when creating a new repository*.
1272 1277 # Which format to use for existing repos is controlled by .hg/requires
1273 1278 b'format',
1274 1279 b'use-dirstate-v2',
1275 1280 default=False,
1276 1281 experimental=True,
1277 1282 alias=[(b'format', b'exp-rc-dirstate-v2')],
1278 1283 )
1279 1284 coreconfigitem(
1280 1285 b'format',
1281 1286 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1282 1287 default=False,
1283 1288 experimental=True,
1284 1289 )
1285 1290 coreconfigitem(
1286 1291 b'format',
1287 1292 b'use-dirstate-tracked-hint',
1288 1293 default=False,
1289 1294 experimental=True,
1290 1295 )
1291 1296 coreconfigitem(
1292 1297 b'format',
1293 1298 b'use-dirstate-tracked-hint.version',
1294 1299 default=1,
1295 1300 experimental=True,
1296 1301 )
1297 1302 coreconfigitem(
1298 1303 b'format',
1299 1304 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1300 1305 default=False,
1301 1306 experimental=True,
1302 1307 )
1303 1308 coreconfigitem(
1304 1309 b'format',
1305 1310 b'dotencode',
1306 1311 default=True,
1307 1312 )
1308 1313 coreconfigitem(
1309 1314 b'format',
1310 1315 b'generaldelta',
1311 1316 default=False,
1312 1317 experimental=True,
1313 1318 )
1314 1319 coreconfigitem(
1315 1320 b'format',
1316 1321 b'manifestcachesize',
1317 1322 default=None,
1318 1323 experimental=True,
1319 1324 )
1320 1325 coreconfigitem(
1321 1326 b'format',
1322 1327 b'maxchainlen',
1323 1328 default=dynamicdefault,
1324 1329 experimental=True,
1325 1330 )
1326 1331 coreconfigitem(
1327 1332 b'format',
1328 1333 b'obsstore-version',
1329 1334 default=None,
1330 1335 )
1331 1336 coreconfigitem(
1332 1337 b'format',
1333 1338 b'sparse-revlog',
1334 1339 default=True,
1335 1340 )
1336 1341 coreconfigitem(
1337 1342 b'format',
1338 1343 b'revlog-compression',
1339 1344 default=lambda: [b'zstd', b'zlib'],
1340 1345 alias=[(b'experimental', b'format.compression')],
1341 1346 )
1342 1347 # Experimental TODOs:
1343 1348 #
1344 1349 # * Same as for revlogv2 (but for the reduction of the number of files)
1345 1350 # * Actually computing the rank of changesets
1346 1351 # * Improvement to investigate
1347 1352 # - storing .hgtags fnode
1348 1353 # - storing branch related identifier
1349 1354
1350 1355 coreconfigitem(
1351 1356 b'format',
1352 1357 b'exp-use-changelog-v2',
1353 1358 default=None,
1354 1359 experimental=True,
1355 1360 )
1356 1361 coreconfigitem(
1357 1362 b'format',
1358 1363 b'usefncache',
1359 1364 default=True,
1360 1365 )
1361 1366 coreconfigitem(
1362 1367 b'format',
1363 1368 b'usegeneraldelta',
1364 1369 default=True,
1365 1370 )
1366 1371 coreconfigitem(
1367 1372 b'format',
1368 1373 b'usestore',
1369 1374 default=True,
1370 1375 )
1371 1376
1372 1377
1373 1378 def _persistent_nodemap_default():
1374 1379 """compute `use-persistent-nodemap` default value
1375 1380
1376 1381 The feature is disabled unless a fast implementation is available.
1377 1382 """
1378 1383 from . import policy
1379 1384
1380 1385 return policy.importrust('revlog') is not None
1381 1386
1382 1387
1383 1388 coreconfigitem(
1384 1389 b'format',
1385 1390 b'use-persistent-nodemap',
1386 1391 default=_persistent_nodemap_default,
1387 1392 )
1388 1393 coreconfigitem(
1389 1394 b'format',
1390 1395 b'exp-use-copies-side-data-changeset',
1391 1396 default=False,
1392 1397 experimental=True,
1393 1398 )
1394 1399 coreconfigitem(
1395 1400 b'format',
1396 1401 b'use-share-safe',
1397 1402 default=True,
1398 1403 )
1399 1404 coreconfigitem(
1400 1405 b'format',
1401 1406 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1402 1407 default=False,
1403 1408 experimental=True,
1404 1409 )
1405 1410 coreconfigitem(
1406 1411 b'format',
1407 1412 b'internal-phase',
1408 1413 default=False,
1409 1414 experimental=True,
1410 1415 )
1411 1416 coreconfigitem(
1412 1417 b'fsmonitor',
1413 1418 b'warn_when_unused',
1414 1419 default=True,
1415 1420 )
1416 1421 coreconfigitem(
1417 1422 b'fsmonitor',
1418 1423 b'warn_update_file_count',
1419 1424 default=50000,
1420 1425 )
1421 1426 coreconfigitem(
1422 1427 b'fsmonitor',
1423 1428 b'warn_update_file_count_rust',
1424 1429 default=400000,
1425 1430 )
1426 1431 coreconfigitem(
1427 1432 b'help',
1428 1433 br'hidden-command\..*',
1429 1434 default=False,
1430 1435 generic=True,
1431 1436 )
1432 1437 coreconfigitem(
1433 1438 b'help',
1434 1439 br'hidden-topic\..*',
1435 1440 default=False,
1436 1441 generic=True,
1437 1442 )
1438 1443 coreconfigitem(
1439 1444 b'hooks',
1440 1445 b'[^:]*',
1441 1446 default=dynamicdefault,
1442 1447 generic=True,
1443 1448 )
1444 1449 coreconfigitem(
1445 1450 b'hooks',
1446 1451 b'.*:run-with-plain',
1447 1452 default=True,
1448 1453 generic=True,
1449 1454 )
1450 1455 coreconfigitem(
1451 1456 b'hgweb-paths',
1452 1457 b'.*',
1453 1458 default=list,
1454 1459 generic=True,
1455 1460 )
1456 1461 coreconfigitem(
1457 1462 b'hostfingerprints',
1458 1463 b'.*',
1459 1464 default=list,
1460 1465 generic=True,
1461 1466 )
1462 1467 coreconfigitem(
1463 1468 b'hostsecurity',
1464 1469 b'ciphers',
1465 1470 default=None,
1466 1471 )
1467 1472 coreconfigitem(
1468 1473 b'hostsecurity',
1469 1474 b'minimumprotocol',
1470 1475 default=dynamicdefault,
1471 1476 )
1472 1477 coreconfigitem(
1473 1478 b'hostsecurity',
1474 1479 b'.*:minimumprotocol$',
1475 1480 default=dynamicdefault,
1476 1481 generic=True,
1477 1482 )
1478 1483 coreconfigitem(
1479 1484 b'hostsecurity',
1480 1485 b'.*:ciphers$',
1481 1486 default=dynamicdefault,
1482 1487 generic=True,
1483 1488 )
1484 1489 coreconfigitem(
1485 1490 b'hostsecurity',
1486 1491 b'.*:fingerprints$',
1487 1492 default=list,
1488 1493 generic=True,
1489 1494 )
1490 1495 coreconfigitem(
1491 1496 b'hostsecurity',
1492 1497 b'.*:verifycertsfile$',
1493 1498 default=None,
1494 1499 generic=True,
1495 1500 )
1496 1501
1497 1502 coreconfigitem(
1498 1503 b'http_proxy',
1499 1504 b'always',
1500 1505 default=False,
1501 1506 )
1502 1507 coreconfigitem(
1503 1508 b'http_proxy',
1504 1509 b'host',
1505 1510 default=None,
1506 1511 )
1507 1512 coreconfigitem(
1508 1513 b'http_proxy',
1509 1514 b'no',
1510 1515 default=list,
1511 1516 )
1512 1517 coreconfigitem(
1513 1518 b'http_proxy',
1514 1519 b'passwd',
1515 1520 default=None,
1516 1521 )
1517 1522 coreconfigitem(
1518 1523 b'http_proxy',
1519 1524 b'user',
1520 1525 default=None,
1521 1526 )
1522 1527
1523 1528 coreconfigitem(
1524 1529 b'http',
1525 1530 b'timeout',
1526 1531 default=None,
1527 1532 )
1528 1533
1529 1534 coreconfigitem(
1530 1535 b'logtoprocess',
1531 1536 b'commandexception',
1532 1537 default=None,
1533 1538 )
1534 1539 coreconfigitem(
1535 1540 b'logtoprocess',
1536 1541 b'commandfinish',
1537 1542 default=None,
1538 1543 )
1539 1544 coreconfigitem(
1540 1545 b'logtoprocess',
1541 1546 b'command',
1542 1547 default=None,
1543 1548 )
1544 1549 coreconfigitem(
1545 1550 b'logtoprocess',
1546 1551 b'develwarn',
1547 1552 default=None,
1548 1553 )
1549 1554 coreconfigitem(
1550 1555 b'logtoprocess',
1551 1556 b'uiblocked',
1552 1557 default=None,
1553 1558 )
1554 1559 coreconfigitem(
1555 1560 b'merge',
1556 1561 b'checkunknown',
1557 1562 default=b'abort',
1558 1563 )
1559 1564 coreconfigitem(
1560 1565 b'merge',
1561 1566 b'checkignored',
1562 1567 default=b'abort',
1563 1568 )
1564 1569 coreconfigitem(
1565 1570 b'experimental',
1566 1571 b'merge.checkpathconflicts',
1567 1572 default=False,
1568 1573 )
1569 1574 coreconfigitem(
1570 1575 b'merge',
1571 1576 b'followcopies',
1572 1577 default=True,
1573 1578 )
1574 1579 coreconfigitem(
1575 1580 b'merge',
1576 1581 b'on-failure',
1577 1582 default=b'continue',
1578 1583 )
1579 1584 coreconfigitem(
1580 1585 b'merge',
1581 1586 b'preferancestor',
1582 1587 default=lambda: [b'*'],
1583 1588 experimental=True,
1584 1589 )
1585 1590 coreconfigitem(
1586 1591 b'merge',
1587 1592 b'strict-capability-check',
1588 1593 default=False,
1589 1594 )
1590 1595 coreconfigitem(
1591 1596 b'merge',
1592 1597 b'disable-partial-tools',
1593 1598 default=False,
1594 1599 experimental=True,
1595 1600 )
1596 1601 coreconfigitem(
1597 1602 b'partial-merge-tools',
1598 1603 b'.*',
1599 1604 default=None,
1600 1605 generic=True,
1601 1606 experimental=True,
1602 1607 )
1603 1608 coreconfigitem(
1604 1609 b'partial-merge-tools',
1605 1610 br'.*\.patterns',
1606 1611 default=dynamicdefault,
1607 1612 generic=True,
1608 1613 priority=-1,
1609 1614 experimental=True,
1610 1615 )
1611 1616 coreconfigitem(
1612 1617 b'partial-merge-tools',
1613 1618 br'.*\.executable$',
1614 1619 default=dynamicdefault,
1615 1620 generic=True,
1616 1621 priority=-1,
1617 1622 experimental=True,
1618 1623 )
1619 1624 coreconfigitem(
1620 1625 b'partial-merge-tools',
1621 1626 br'.*\.order',
1622 1627 default=0,
1623 1628 generic=True,
1624 1629 priority=-1,
1625 1630 experimental=True,
1626 1631 )
1627 1632 coreconfigitem(
1628 1633 b'partial-merge-tools',
1629 1634 br'.*\.args',
1630 1635 default=b"$local $base $other",
1631 1636 generic=True,
1632 1637 priority=-1,
1633 1638 experimental=True,
1634 1639 )
1635 1640 coreconfigitem(
1636 1641 b'partial-merge-tools',
1637 1642 br'.*\.disable',
1638 1643 default=False,
1639 1644 generic=True,
1640 1645 priority=-1,
1641 1646 experimental=True,
1642 1647 )
1643 1648 coreconfigitem(
1644 1649 b'merge-tools',
1645 1650 b'.*',
1646 1651 default=None,
1647 1652 generic=True,
1648 1653 )
1649 1654 coreconfigitem(
1650 1655 b'merge-tools',
1651 1656 br'.*\.args$',
1652 1657 default=b"$local $base $other",
1653 1658 generic=True,
1654 1659 priority=-1,
1655 1660 )
1656 1661 coreconfigitem(
1657 1662 b'merge-tools',
1658 1663 br'.*\.binary$',
1659 1664 default=False,
1660 1665 generic=True,
1661 1666 priority=-1,
1662 1667 )
1663 1668 coreconfigitem(
1664 1669 b'merge-tools',
1665 1670 br'.*\.check$',
1666 1671 default=list,
1667 1672 generic=True,
1668 1673 priority=-1,
1669 1674 )
1670 1675 coreconfigitem(
1671 1676 b'merge-tools',
1672 1677 br'.*\.checkchanged$',
1673 1678 default=False,
1674 1679 generic=True,
1675 1680 priority=-1,
1676 1681 )
1677 1682 coreconfigitem(
1678 1683 b'merge-tools',
1679 1684 br'.*\.executable$',
1680 1685 default=dynamicdefault,
1681 1686 generic=True,
1682 1687 priority=-1,
1683 1688 )
1684 1689 coreconfigitem(
1685 1690 b'merge-tools',
1686 1691 br'.*\.fixeol$',
1687 1692 default=False,
1688 1693 generic=True,
1689 1694 priority=-1,
1690 1695 )
1691 1696 coreconfigitem(
1692 1697 b'merge-tools',
1693 1698 br'.*\.gui$',
1694 1699 default=False,
1695 1700 generic=True,
1696 1701 priority=-1,
1697 1702 )
1698 1703 coreconfigitem(
1699 1704 b'merge-tools',
1700 1705 br'.*\.mergemarkers$',
1701 1706 default=b'basic',
1702 1707 generic=True,
1703 1708 priority=-1,
1704 1709 )
1705 1710 coreconfigitem(
1706 1711 b'merge-tools',
1707 1712 br'.*\.mergemarkertemplate$',
1708 1713 default=dynamicdefault, # take from command-templates.mergemarker
1709 1714 generic=True,
1710 1715 priority=-1,
1711 1716 )
1712 1717 coreconfigitem(
1713 1718 b'merge-tools',
1714 1719 br'.*\.priority$',
1715 1720 default=0,
1716 1721 generic=True,
1717 1722 priority=-1,
1718 1723 )
1719 1724 coreconfigitem(
1720 1725 b'merge-tools',
1721 1726 br'.*\.premerge$',
1722 1727 default=dynamicdefault,
1723 1728 generic=True,
1724 1729 priority=-1,
1725 1730 )
1726 1731 coreconfigitem(
1727 1732 b'merge-tools',
1728 1733 br'.*\.symlink$',
1729 1734 default=False,
1730 1735 generic=True,
1731 1736 priority=-1,
1732 1737 )
1733 1738 coreconfigitem(
1734 1739 b'pager',
1735 1740 b'attend-.*',
1736 1741 default=dynamicdefault,
1737 1742 generic=True,
1738 1743 )
1739 1744 coreconfigitem(
1740 1745 b'pager',
1741 1746 b'ignore',
1742 1747 default=list,
1743 1748 )
1744 1749 coreconfigitem(
1745 1750 b'pager',
1746 1751 b'pager',
1747 1752 default=dynamicdefault,
1748 1753 )
1749 1754 coreconfigitem(
1750 1755 b'patch',
1751 1756 b'eol',
1752 1757 default=b'strict',
1753 1758 )
1754 1759 coreconfigitem(
1755 1760 b'patch',
1756 1761 b'fuzz',
1757 1762 default=2,
1758 1763 )
1759 1764 coreconfigitem(
1760 1765 b'paths',
1761 1766 b'default',
1762 1767 default=None,
1763 1768 )
1764 1769 coreconfigitem(
1765 1770 b'paths',
1766 1771 b'default-push',
1767 1772 default=None,
1768 1773 )
1769 1774 coreconfigitem(
1770 1775 b'paths',
1771 1776 b'.*',
1772 1777 default=None,
1773 1778 generic=True,
1774 1779 )
1775 1780 coreconfigitem(
1776 1781 b'paths',
1777 1782 b'.*:bookmarks.mode',
1778 1783 default='default',
1779 1784 generic=True,
1780 1785 )
1781 1786 coreconfigitem(
1782 1787 b'paths',
1783 1788 b'.*:multi-urls',
1784 1789 default=False,
1785 1790 generic=True,
1786 1791 )
1787 1792 coreconfigitem(
1788 1793 b'paths',
1789 1794 b'.*:pushrev',
1790 1795 default=None,
1791 1796 generic=True,
1792 1797 )
1793 1798 coreconfigitem(
1794 1799 b'paths',
1795 1800 b'.*:pushurl',
1796 1801 default=None,
1797 1802 generic=True,
1798 1803 )
1799 1804 coreconfigitem(
1800 1805 b'phases',
1801 1806 b'checksubrepos',
1802 1807 default=b'follow',
1803 1808 )
1804 1809 coreconfigitem(
1805 1810 b'phases',
1806 1811 b'new-commit',
1807 1812 default=b'draft',
1808 1813 )
1809 1814 coreconfigitem(
1810 1815 b'phases',
1811 1816 b'publish',
1812 1817 default=True,
1813 1818 )
1814 1819 coreconfigitem(
1815 1820 b'profiling',
1816 1821 b'enabled',
1817 1822 default=False,
1818 1823 )
1819 1824 coreconfigitem(
1820 1825 b'profiling',
1821 1826 b'format',
1822 1827 default=b'text',
1823 1828 )
1824 1829 coreconfigitem(
1825 1830 b'profiling',
1826 1831 b'freq',
1827 1832 default=1000,
1828 1833 )
1829 1834 coreconfigitem(
1830 1835 b'profiling',
1831 1836 b'limit',
1832 1837 default=30,
1833 1838 )
1834 1839 coreconfigitem(
1835 1840 b'profiling',
1836 1841 b'nested',
1837 1842 default=0,
1838 1843 )
1839 1844 coreconfigitem(
1840 1845 b'profiling',
1841 1846 b'output',
1842 1847 default=None,
1843 1848 )
1844 1849 coreconfigitem(
1845 1850 b'profiling',
1846 1851 b'showmax',
1847 1852 default=0.999,
1848 1853 )
1849 1854 coreconfigitem(
1850 1855 b'profiling',
1851 1856 b'showmin',
1852 1857 default=dynamicdefault,
1853 1858 )
1854 1859 coreconfigitem(
1855 1860 b'profiling',
1856 1861 b'showtime',
1857 1862 default=True,
1858 1863 )
1859 1864 coreconfigitem(
1860 1865 b'profiling',
1861 1866 b'sort',
1862 1867 default=b'inlinetime',
1863 1868 )
1864 1869 coreconfigitem(
1865 1870 b'profiling',
1866 1871 b'statformat',
1867 1872 default=b'hotpath',
1868 1873 )
1869 1874 coreconfigitem(
1870 1875 b'profiling',
1871 1876 b'time-track',
1872 1877 default=dynamicdefault,
1873 1878 )
1874 1879 coreconfigitem(
1875 1880 b'profiling',
1876 1881 b'type',
1877 1882 default=b'stat',
1878 1883 )
1879 1884 coreconfigitem(
1880 1885 b'progress',
1881 1886 b'assume-tty',
1882 1887 default=False,
1883 1888 )
1884 1889 coreconfigitem(
1885 1890 b'progress',
1886 1891 b'changedelay',
1887 1892 default=1,
1888 1893 )
1889 1894 coreconfigitem(
1890 1895 b'progress',
1891 1896 b'clear-complete',
1892 1897 default=True,
1893 1898 )
1894 1899 coreconfigitem(
1895 1900 b'progress',
1896 1901 b'debug',
1897 1902 default=False,
1898 1903 )
1899 1904 coreconfigitem(
1900 1905 b'progress',
1901 1906 b'delay',
1902 1907 default=3,
1903 1908 )
1904 1909 coreconfigitem(
1905 1910 b'progress',
1906 1911 b'disable',
1907 1912 default=False,
1908 1913 )
1909 1914 coreconfigitem(
1910 1915 b'progress',
1911 1916 b'estimateinterval',
1912 1917 default=60.0,
1913 1918 )
1914 1919 coreconfigitem(
1915 1920 b'progress',
1916 1921 b'format',
1917 1922 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1918 1923 )
1919 1924 coreconfigitem(
1920 1925 b'progress',
1921 1926 b'refresh',
1922 1927 default=0.1,
1923 1928 )
1924 1929 coreconfigitem(
1925 1930 b'progress',
1926 1931 b'width',
1927 1932 default=dynamicdefault,
1928 1933 )
1929 1934 coreconfigitem(
1930 1935 b'pull',
1931 1936 b'confirm',
1932 1937 default=False,
1933 1938 )
1934 1939 coreconfigitem(
1935 1940 b'push',
1936 1941 b'pushvars.server',
1937 1942 default=False,
1938 1943 )
1939 1944 coreconfigitem(
1940 1945 b'rewrite',
1941 1946 b'backup-bundle',
1942 1947 default=True,
1943 1948 alias=[(b'ui', b'history-editing-backup')],
1944 1949 )
1945 1950 coreconfigitem(
1946 1951 b'rewrite',
1947 1952 b'update-timestamp',
1948 1953 default=False,
1949 1954 )
1950 1955 coreconfigitem(
1951 1956 b'rewrite',
1952 1957 b'empty-successor',
1953 1958 default=b'skip',
1954 1959 experimental=True,
1955 1960 )
1956 1961 # experimental as long as format.use-dirstate-v2 is.
1957 1962 coreconfigitem(
1958 1963 b'storage',
1959 1964 b'dirstate-v2.slow-path',
1960 1965 default=b"abort",
1961 1966 experimental=True,
1962 1967 )
1963 1968 coreconfigitem(
1964 1969 b'storage',
1965 1970 b'new-repo-backend',
1966 1971 default=b'revlogv1',
1967 1972 experimental=True,
1968 1973 )
1969 1974 coreconfigitem(
1970 1975 b'storage',
1971 1976 b'revlog.optimize-delta-parent-choice',
1972 1977 default=True,
1973 1978 alias=[(b'format', b'aggressivemergedeltas')],
1974 1979 )
1975 1980 coreconfigitem(
1976 1981 b'storage',
1977 1982 b'revlog.issue6528.fix-incoming',
1978 1983 default=True,
1979 1984 )
1980 1985 # experimental as long as rust is experimental (or a C version is implemented)
1981 1986 coreconfigitem(
1982 1987 b'storage',
1983 1988 b'revlog.persistent-nodemap.mmap',
1984 1989 default=True,
1985 1990 )
1986 1991 # experimental as long as format.use-persistent-nodemap is.
1987 1992 coreconfigitem(
1988 1993 b'storage',
1989 1994 b'revlog.persistent-nodemap.slow-path',
1990 1995 default=b"abort",
1991 1996 )
1992 1997
1993 1998 coreconfigitem(
1994 1999 b'storage',
1995 2000 b'revlog.reuse-external-delta',
1996 2001 default=True,
1997 2002 )
1998 2003 coreconfigitem(
1999 2004 b'storage',
2000 2005 b'revlog.reuse-external-delta-parent',
2001 2006 default=None,
2002 2007 )
2003 2008 coreconfigitem(
2004 2009 b'storage',
2005 2010 b'revlog.zlib.level',
2006 2011 default=None,
2007 2012 )
2008 2013 coreconfigitem(
2009 2014 b'storage',
2010 2015 b'revlog.zstd.level',
2011 2016 default=None,
2012 2017 )
2013 2018 coreconfigitem(
2014 2019 b'server',
2015 2020 b'bookmarks-pushkey-compat',
2016 2021 default=True,
2017 2022 )
2018 2023 coreconfigitem(
2019 2024 b'server',
2020 2025 b'bundle1',
2021 2026 default=True,
2022 2027 )
2023 2028 coreconfigitem(
2024 2029 b'server',
2025 2030 b'bundle1gd',
2026 2031 default=None,
2027 2032 )
2028 2033 coreconfigitem(
2029 2034 b'server',
2030 2035 b'bundle1.pull',
2031 2036 default=None,
2032 2037 )
2033 2038 coreconfigitem(
2034 2039 b'server',
2035 2040 b'bundle1gd.pull',
2036 2041 default=None,
2037 2042 )
2038 2043 coreconfigitem(
2039 2044 b'server',
2040 2045 b'bundle1.push',
2041 2046 default=None,
2042 2047 )
2043 2048 coreconfigitem(
2044 2049 b'server',
2045 2050 b'bundle1gd.push',
2046 2051 default=None,
2047 2052 )
2048 2053 coreconfigitem(
2049 2054 b'server',
2050 2055 b'bundle2.stream',
2051 2056 default=True,
2052 2057 alias=[(b'experimental', b'bundle2.stream')],
2053 2058 )
2054 2059 coreconfigitem(
2055 2060 b'server',
2056 2061 b'compressionengines',
2057 2062 default=list,
2058 2063 )
2059 2064 coreconfigitem(
2060 2065 b'server',
2061 2066 b'concurrent-push-mode',
2062 2067 default=b'check-related',
2063 2068 )
2064 2069 coreconfigitem(
2065 2070 b'server',
2066 2071 b'disablefullbundle',
2067 2072 default=False,
2068 2073 )
2069 2074 coreconfigitem(
2070 2075 b'server',
2071 2076 b'maxhttpheaderlen',
2072 2077 default=1024,
2073 2078 )
2074 2079 coreconfigitem(
2075 2080 b'server',
2076 2081 b'pullbundle',
2077 2082 default=False,
2078 2083 )
2079 2084 coreconfigitem(
2080 2085 b'server',
2081 2086 b'preferuncompressed',
2082 2087 default=False,
2083 2088 )
2084 2089 coreconfigitem(
2085 2090 b'server',
2086 2091 b'streamunbundle',
2087 2092 default=False,
2088 2093 )
2089 2094 coreconfigitem(
2090 2095 b'server',
2091 2096 b'uncompressed',
2092 2097 default=True,
2093 2098 )
2094 2099 coreconfigitem(
2095 2100 b'server',
2096 2101 b'uncompressedallowsecret',
2097 2102 default=False,
2098 2103 )
2099 2104 coreconfigitem(
2100 2105 b'server',
2101 2106 b'view',
2102 2107 default=b'served',
2103 2108 )
2104 2109 coreconfigitem(
2105 2110 b'server',
2106 2111 b'validate',
2107 2112 default=False,
2108 2113 )
2109 2114 coreconfigitem(
2110 2115 b'server',
2111 2116 b'zliblevel',
2112 2117 default=-1,
2113 2118 )
2114 2119 coreconfigitem(
2115 2120 b'server',
2116 2121 b'zstdlevel',
2117 2122 default=3,
2118 2123 )
2119 2124 coreconfigitem(
2120 2125 b'share',
2121 2126 b'pool',
2122 2127 default=None,
2123 2128 )
2124 2129 coreconfigitem(
2125 2130 b'share',
2126 2131 b'poolnaming',
2127 2132 default=b'identity',
2128 2133 )
2129 2134 coreconfigitem(
2130 2135 b'share',
2131 2136 b'safe-mismatch.source-not-safe',
2132 2137 default=b'abort',
2133 2138 )
2134 2139 coreconfigitem(
2135 2140 b'share',
2136 2141 b'safe-mismatch.source-safe',
2137 2142 default=b'abort',
2138 2143 )
2139 2144 coreconfigitem(
2140 2145 b'share',
2141 2146 b'safe-mismatch.source-not-safe.warn',
2142 2147 default=True,
2143 2148 )
2144 2149 coreconfigitem(
2145 2150 b'share',
2146 2151 b'safe-mismatch.source-safe.warn',
2147 2152 default=True,
2148 2153 )
2149 2154 coreconfigitem(
2150 2155 b'shelve',
2151 2156 b'maxbackups',
2152 2157 default=10,
2153 2158 )
2154 2159 coreconfigitem(
2155 2160 b'smtp',
2156 2161 b'host',
2157 2162 default=None,
2158 2163 )
2159 2164 coreconfigitem(
2160 2165 b'smtp',
2161 2166 b'local_hostname',
2162 2167 default=None,
2163 2168 )
2164 2169 coreconfigitem(
2165 2170 b'smtp',
2166 2171 b'password',
2167 2172 default=None,
2168 2173 )
2169 2174 coreconfigitem(
2170 2175 b'smtp',
2171 2176 b'port',
2172 2177 default=dynamicdefault,
2173 2178 )
2174 2179 coreconfigitem(
2175 2180 b'smtp',
2176 2181 b'tls',
2177 2182 default=b'none',
2178 2183 )
2179 2184 coreconfigitem(
2180 2185 b'smtp',
2181 2186 b'username',
2182 2187 default=None,
2183 2188 )
2184 2189 coreconfigitem(
2185 2190 b'sparse',
2186 2191 b'missingwarning',
2187 2192 default=True,
2188 2193 experimental=True,
2189 2194 )
2190 2195 coreconfigitem(
2191 2196 b'subrepos',
2192 2197 b'allowed',
2193 2198 default=dynamicdefault, # to make backporting simpler
2194 2199 )
2195 2200 coreconfigitem(
2196 2201 b'subrepos',
2197 2202 b'hg:allowed',
2198 2203 default=dynamicdefault,
2199 2204 )
2200 2205 coreconfigitem(
2201 2206 b'subrepos',
2202 2207 b'git:allowed',
2203 2208 default=dynamicdefault,
2204 2209 )
2205 2210 coreconfigitem(
2206 2211 b'subrepos',
2207 2212 b'svn:allowed',
2208 2213 default=dynamicdefault,
2209 2214 )
2210 2215 coreconfigitem(
2211 2216 b'templates',
2212 2217 b'.*',
2213 2218 default=None,
2214 2219 generic=True,
2215 2220 )
2216 2221 coreconfigitem(
2217 2222 b'templateconfig',
2218 2223 b'.*',
2219 2224 default=dynamicdefault,
2220 2225 generic=True,
2221 2226 )
2222 2227 coreconfigitem(
2223 2228 b'trusted',
2224 2229 b'groups',
2225 2230 default=list,
2226 2231 )
2227 2232 coreconfigitem(
2228 2233 b'trusted',
2229 2234 b'users',
2230 2235 default=list,
2231 2236 )
2232 2237 coreconfigitem(
2233 2238 b'ui',
2234 2239 b'_usedassubrepo',
2235 2240 default=False,
2236 2241 )
2237 2242 coreconfigitem(
2238 2243 b'ui',
2239 2244 b'allowemptycommit',
2240 2245 default=False,
2241 2246 )
2242 2247 coreconfigitem(
2243 2248 b'ui',
2244 2249 b'archivemeta',
2245 2250 default=True,
2246 2251 )
2247 2252 coreconfigitem(
2248 2253 b'ui',
2249 2254 b'askusername',
2250 2255 default=False,
2251 2256 )
2252 2257 coreconfigitem(
2253 2258 b'ui',
2254 2259 b'available-memory',
2255 2260 default=None,
2256 2261 )
2257 2262
2258 2263 coreconfigitem(
2259 2264 b'ui',
2260 2265 b'clonebundlefallback',
2261 2266 default=False,
2262 2267 )
2263 2268 coreconfigitem(
2264 2269 b'ui',
2265 2270 b'clonebundleprefers',
2266 2271 default=list,
2267 2272 )
2268 2273 coreconfigitem(
2269 2274 b'ui',
2270 2275 b'clonebundles',
2271 2276 default=True,
2272 2277 )
2273 2278 coreconfigitem(
2274 2279 b'ui',
2275 2280 b'color',
2276 2281 default=b'auto',
2277 2282 )
2278 2283 coreconfigitem(
2279 2284 b'ui',
2280 2285 b'commitsubrepos',
2281 2286 default=False,
2282 2287 )
2283 2288 coreconfigitem(
2284 2289 b'ui',
2285 2290 b'debug',
2286 2291 default=False,
2287 2292 )
2288 2293 coreconfigitem(
2289 2294 b'ui',
2290 2295 b'debugger',
2291 2296 default=None,
2292 2297 )
2293 2298 coreconfigitem(
2294 2299 b'ui',
2295 2300 b'editor',
2296 2301 default=dynamicdefault,
2297 2302 )
2298 2303 coreconfigitem(
2299 2304 b'ui',
2300 2305 b'detailed-exit-code',
2301 2306 default=False,
2302 2307 experimental=True,
2303 2308 )
2304 2309 coreconfigitem(
2305 2310 b'ui',
2306 2311 b'fallbackencoding',
2307 2312 default=None,
2308 2313 )
2309 2314 coreconfigitem(
2310 2315 b'ui',
2311 2316 b'forcecwd',
2312 2317 default=None,
2313 2318 )
2314 2319 coreconfigitem(
2315 2320 b'ui',
2316 2321 b'forcemerge',
2317 2322 default=None,
2318 2323 )
2319 2324 coreconfigitem(
2320 2325 b'ui',
2321 2326 b'formatdebug',
2322 2327 default=False,
2323 2328 )
2324 2329 coreconfigitem(
2325 2330 b'ui',
2326 2331 b'formatjson',
2327 2332 default=False,
2328 2333 )
2329 2334 coreconfigitem(
2330 2335 b'ui',
2331 2336 b'formatted',
2332 2337 default=None,
2333 2338 )
2334 2339 coreconfigitem(
2335 2340 b'ui',
2336 2341 b'interactive',
2337 2342 default=None,
2338 2343 )
2339 2344 coreconfigitem(
2340 2345 b'ui',
2341 2346 b'interface',
2342 2347 default=None,
2343 2348 )
2344 2349 coreconfigitem(
2345 2350 b'ui',
2346 2351 b'interface.chunkselector',
2347 2352 default=None,
2348 2353 )
2349 2354 coreconfigitem(
2350 2355 b'ui',
2351 2356 b'large-file-limit',
2352 2357 default=10 * (2 ** 20),
2353 2358 )
2354 2359 coreconfigitem(
2355 2360 b'ui',
2356 2361 b'logblockedtimes',
2357 2362 default=False,
2358 2363 )
2359 2364 coreconfigitem(
2360 2365 b'ui',
2361 2366 b'merge',
2362 2367 default=None,
2363 2368 )
2364 2369 coreconfigitem(
2365 2370 b'ui',
2366 2371 b'mergemarkers',
2367 2372 default=b'basic',
2368 2373 )
2369 2374 coreconfigitem(
2370 2375 b'ui',
2371 2376 b'message-output',
2372 2377 default=b'stdio',
2373 2378 )
2374 2379 coreconfigitem(
2375 2380 b'ui',
2376 2381 b'nontty',
2377 2382 default=False,
2378 2383 )
2379 2384 coreconfigitem(
2380 2385 b'ui',
2381 2386 b'origbackuppath',
2382 2387 default=None,
2383 2388 )
2384 2389 coreconfigitem(
2385 2390 b'ui',
2386 2391 b'paginate',
2387 2392 default=True,
2388 2393 )
2389 2394 coreconfigitem(
2390 2395 b'ui',
2391 2396 b'patch',
2392 2397 default=None,
2393 2398 )
2394 2399 coreconfigitem(
2395 2400 b'ui',
2396 2401 b'portablefilenames',
2397 2402 default=b'warn',
2398 2403 )
2399 2404 coreconfigitem(
2400 2405 b'ui',
2401 2406 b'promptecho',
2402 2407 default=False,
2403 2408 )
2404 2409 coreconfigitem(
2405 2410 b'ui',
2406 2411 b'quiet',
2407 2412 default=False,
2408 2413 )
2409 2414 coreconfigitem(
2410 2415 b'ui',
2411 2416 b'quietbookmarkmove',
2412 2417 default=False,
2413 2418 )
2414 2419 coreconfigitem(
2415 2420 b'ui',
2416 2421 b'relative-paths',
2417 2422 default=b'legacy',
2418 2423 )
2419 2424 coreconfigitem(
2420 2425 b'ui',
2421 2426 b'remotecmd',
2422 2427 default=b'hg',
2423 2428 )
2424 2429 coreconfigitem(
2425 2430 b'ui',
2426 2431 b'report_untrusted',
2427 2432 default=True,
2428 2433 )
2429 2434 coreconfigitem(
2430 2435 b'ui',
2431 2436 b'rollback',
2432 2437 default=True,
2433 2438 )
2434 2439 coreconfigitem(
2435 2440 b'ui',
2436 2441 b'signal-safe-lock',
2437 2442 default=True,
2438 2443 )
2439 2444 coreconfigitem(
2440 2445 b'ui',
2441 2446 b'slash',
2442 2447 default=False,
2443 2448 )
2444 2449 coreconfigitem(
2445 2450 b'ui',
2446 2451 b'ssh',
2447 2452 default=b'ssh',
2448 2453 )
2449 2454 coreconfigitem(
2450 2455 b'ui',
2451 2456 b'ssherrorhint',
2452 2457 default=None,
2453 2458 )
2454 2459 coreconfigitem(
2455 2460 b'ui',
2456 2461 b'statuscopies',
2457 2462 default=False,
2458 2463 )
2459 2464 coreconfigitem(
2460 2465 b'ui',
2461 2466 b'strict',
2462 2467 default=False,
2463 2468 )
2464 2469 coreconfigitem(
2465 2470 b'ui',
2466 2471 b'style',
2467 2472 default=b'',
2468 2473 )
2469 2474 coreconfigitem(
2470 2475 b'ui',
2471 2476 b'supportcontact',
2472 2477 default=None,
2473 2478 )
2474 2479 coreconfigitem(
2475 2480 b'ui',
2476 2481 b'textwidth',
2477 2482 default=78,
2478 2483 )
2479 2484 coreconfigitem(
2480 2485 b'ui',
2481 2486 b'timeout',
2482 2487 default=b'600',
2483 2488 )
2484 2489 coreconfigitem(
2485 2490 b'ui',
2486 2491 b'timeout.warn',
2487 2492 default=0,
2488 2493 )
2489 2494 coreconfigitem(
2490 2495 b'ui',
2491 2496 b'timestamp-output',
2492 2497 default=False,
2493 2498 )
2494 2499 coreconfigitem(
2495 2500 b'ui',
2496 2501 b'traceback',
2497 2502 default=False,
2498 2503 )
2499 2504 coreconfigitem(
2500 2505 b'ui',
2501 2506 b'tweakdefaults',
2502 2507 default=False,
2503 2508 )
2504 2509 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2505 2510 coreconfigitem(
2506 2511 b'ui',
2507 2512 b'verbose',
2508 2513 default=False,
2509 2514 )
2510 2515 coreconfigitem(
2511 2516 b'verify',
2512 2517 b'skipflags',
2513 2518 default=None,
2514 2519 )
2515 2520 coreconfigitem(
2516 2521 b'web',
2517 2522 b'allowbz2',
2518 2523 default=False,
2519 2524 )
2520 2525 coreconfigitem(
2521 2526 b'web',
2522 2527 b'allowgz',
2523 2528 default=False,
2524 2529 )
2525 2530 coreconfigitem(
2526 2531 b'web',
2527 2532 b'allow-pull',
2528 2533 alias=[(b'web', b'allowpull')],
2529 2534 default=True,
2530 2535 )
2531 2536 coreconfigitem(
2532 2537 b'web',
2533 2538 b'allow-push',
2534 2539 alias=[(b'web', b'allow_push')],
2535 2540 default=list,
2536 2541 )
2537 2542 coreconfigitem(
2538 2543 b'web',
2539 2544 b'allowzip',
2540 2545 default=False,
2541 2546 )
2542 2547 coreconfigitem(
2543 2548 b'web',
2544 2549 b'archivesubrepos',
2545 2550 default=False,
2546 2551 )
2547 2552 coreconfigitem(
2548 2553 b'web',
2549 2554 b'cache',
2550 2555 default=True,
2551 2556 )
2552 2557 coreconfigitem(
2553 2558 b'web',
2554 2559 b'comparisoncontext',
2555 2560 default=5,
2556 2561 )
2557 2562 coreconfigitem(
2558 2563 b'web',
2559 2564 b'contact',
2560 2565 default=None,
2561 2566 )
2562 2567 coreconfigitem(
2563 2568 b'web',
2564 2569 b'deny_push',
2565 2570 default=list,
2566 2571 )
2567 2572 coreconfigitem(
2568 2573 b'web',
2569 2574 b'guessmime',
2570 2575 default=False,
2571 2576 )
2572 2577 coreconfigitem(
2573 2578 b'web',
2574 2579 b'hidden',
2575 2580 default=False,
2576 2581 )
2577 2582 coreconfigitem(
2578 2583 b'web',
2579 2584 b'labels',
2580 2585 default=list,
2581 2586 )
2582 2587 coreconfigitem(
2583 2588 b'web',
2584 2589 b'logoimg',
2585 2590 default=b'hglogo.png',
2586 2591 )
2587 2592 coreconfigitem(
2588 2593 b'web',
2589 2594 b'logourl',
2590 2595 default=b'https://mercurial-scm.org/',
2591 2596 )
2592 2597 coreconfigitem(
2593 2598 b'web',
2594 2599 b'accesslog',
2595 2600 default=b'-',
2596 2601 )
2597 2602 coreconfigitem(
2598 2603 b'web',
2599 2604 b'address',
2600 2605 default=b'',
2601 2606 )
2602 2607 coreconfigitem(
2603 2608 b'web',
2604 2609 b'allow-archive',
2605 2610 alias=[(b'web', b'allow_archive')],
2606 2611 default=list,
2607 2612 )
2608 2613 coreconfigitem(
2609 2614 b'web',
2610 2615 b'allow_read',
2611 2616 default=list,
2612 2617 )
2613 2618 coreconfigitem(
2614 2619 b'web',
2615 2620 b'baseurl',
2616 2621 default=None,
2617 2622 )
2618 2623 coreconfigitem(
2619 2624 b'web',
2620 2625 b'cacerts',
2621 2626 default=None,
2622 2627 )
2623 2628 coreconfigitem(
2624 2629 b'web',
2625 2630 b'certificate',
2626 2631 default=None,
2627 2632 )
2628 2633 coreconfigitem(
2629 2634 b'web',
2630 2635 b'collapse',
2631 2636 default=False,
2632 2637 )
2633 2638 coreconfigitem(
2634 2639 b'web',
2635 2640 b'csp',
2636 2641 default=None,
2637 2642 )
2638 2643 coreconfigitem(
2639 2644 b'web',
2640 2645 b'deny_read',
2641 2646 default=list,
2642 2647 )
2643 2648 coreconfigitem(
2644 2649 b'web',
2645 2650 b'descend',
2646 2651 default=True,
2647 2652 )
2648 2653 coreconfigitem(
2649 2654 b'web',
2650 2655 b'description',
2651 2656 default=b"",
2652 2657 )
2653 2658 coreconfigitem(
2654 2659 b'web',
2655 2660 b'encoding',
2656 2661 default=lambda: encoding.encoding,
2657 2662 )
2658 2663 coreconfigitem(
2659 2664 b'web',
2660 2665 b'errorlog',
2661 2666 default=b'-',
2662 2667 )
2663 2668 coreconfigitem(
2664 2669 b'web',
2665 2670 b'ipv6',
2666 2671 default=False,
2667 2672 )
2668 2673 coreconfigitem(
2669 2674 b'web',
2670 2675 b'maxchanges',
2671 2676 default=10,
2672 2677 )
2673 2678 coreconfigitem(
2674 2679 b'web',
2675 2680 b'maxfiles',
2676 2681 default=10,
2677 2682 )
2678 2683 coreconfigitem(
2679 2684 b'web',
2680 2685 b'maxshortchanges',
2681 2686 default=60,
2682 2687 )
2683 2688 coreconfigitem(
2684 2689 b'web',
2685 2690 b'motd',
2686 2691 default=b'',
2687 2692 )
2688 2693 coreconfigitem(
2689 2694 b'web',
2690 2695 b'name',
2691 2696 default=dynamicdefault,
2692 2697 )
2693 2698 coreconfigitem(
2694 2699 b'web',
2695 2700 b'port',
2696 2701 default=8000,
2697 2702 )
2698 2703 coreconfigitem(
2699 2704 b'web',
2700 2705 b'prefix',
2701 2706 default=b'',
2702 2707 )
2703 2708 coreconfigitem(
2704 2709 b'web',
2705 2710 b'push_ssl',
2706 2711 default=True,
2707 2712 )
2708 2713 coreconfigitem(
2709 2714 b'web',
2710 2715 b'refreshinterval',
2711 2716 default=20,
2712 2717 )
2713 2718 coreconfigitem(
2714 2719 b'web',
2715 2720 b'server-header',
2716 2721 default=None,
2717 2722 )
2718 2723 coreconfigitem(
2719 2724 b'web',
2720 2725 b'static',
2721 2726 default=None,
2722 2727 )
2723 2728 coreconfigitem(
2724 2729 b'web',
2725 2730 b'staticurl',
2726 2731 default=None,
2727 2732 )
2728 2733 coreconfigitem(
2729 2734 b'web',
2730 2735 b'stripes',
2731 2736 default=1,
2732 2737 )
2733 2738 coreconfigitem(
2734 2739 b'web',
2735 2740 b'style',
2736 2741 default=b'paper',
2737 2742 )
2738 2743 coreconfigitem(
2739 2744 b'web',
2740 2745 b'templates',
2741 2746 default=None,
2742 2747 )
2743 2748 coreconfigitem(
2744 2749 b'web',
2745 2750 b'view',
2746 2751 default=b'served',
2747 2752 experimental=True,
2748 2753 )
2749 2754 coreconfigitem(
2750 2755 b'worker',
2751 2756 b'backgroundclose',
2752 2757 default=dynamicdefault,
2753 2758 )
2754 2759 # Windows defaults to a limit of 512 open files. A buffer of 128
2755 2760 # should give us enough headway.
2756 2761 coreconfigitem(
2757 2762 b'worker',
2758 2763 b'backgroundclosemaxqueue',
2759 2764 default=384,
2760 2765 )
2761 2766 coreconfigitem(
2762 2767 b'worker',
2763 2768 b'backgroundcloseminfilecount',
2764 2769 default=2048,
2765 2770 )
2766 2771 coreconfigitem(
2767 2772 b'worker',
2768 2773 b'backgroundclosethreadcount',
2769 2774 default=4,
2770 2775 )
2771 2776 coreconfigitem(
2772 2777 b'worker',
2773 2778 b'enabled',
2774 2779 default=True,
2775 2780 )
2776 2781 coreconfigitem(
2777 2782 b'worker',
2778 2783 b'numcpus',
2779 2784 default=None,
2780 2785 )
2781 2786
2782 2787 # Rebase related configuration moved to core because other extension are doing
2783 2788 # strange things. For example, shelve import the extensions to reuse some bit
2784 2789 # without formally loading it.
2785 2790 coreconfigitem(
2786 2791 b'commands',
2787 2792 b'rebase.requiredest',
2788 2793 default=False,
2789 2794 )
2790 2795 coreconfigitem(
2791 2796 b'experimental',
2792 2797 b'rebaseskipobsolete',
2793 2798 default=True,
2794 2799 )
2795 2800 coreconfigitem(
2796 2801 b'rebase',
2797 2802 b'singletransaction',
2798 2803 default=False,
2799 2804 )
2800 2805 coreconfigitem(
2801 2806 b'rebase',
2802 2807 b'experimental.inmemory',
2803 2808 default=False,
2804 2809 )
@@ -1,3939 +1,3940 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullrev,
24 24 sha1nodeconstants,
25 25 short,
26 26 )
27 27 from .pycompat import (
28 28 delattr,
29 29 getattr,
30 30 )
31 31 from . import (
32 32 bookmarks,
33 33 branchmap,
34 34 bundle2,
35 35 bundlecaches,
36 36 changegroup,
37 37 color,
38 38 commit,
39 39 context,
40 40 dirstate,
41 41 dirstateguard,
42 42 discovery,
43 43 encoding,
44 44 error,
45 45 exchange,
46 46 extensions,
47 47 filelog,
48 48 hook,
49 49 lock as lockmod,
50 50 match as matchmod,
51 51 mergestate as mergestatemod,
52 52 mergeutil,
53 53 namespaces,
54 54 narrowspec,
55 55 obsolete,
56 56 pathutil,
57 57 phases,
58 58 pushkey,
59 59 pycompat,
60 60 rcutil,
61 61 repoview,
62 62 requirements as requirementsmod,
63 63 revlog,
64 64 revset,
65 65 revsetlang,
66 66 scmutil,
67 67 sparse,
68 68 store as storemod,
69 69 subrepoutil,
70 70 tags as tagsmod,
71 71 transaction,
72 72 txnutil,
73 73 util,
74 74 vfs as vfsmod,
75 75 wireprototypes,
76 76 )
77 77
78 78 from .interfaces import (
79 79 repository,
80 80 util as interfaceutil,
81 81 )
82 82
83 83 from .utils import (
84 84 hashutil,
85 85 procutil,
86 86 stringutil,
87 87 urlutil,
88 88 )
89 89
90 90 from .revlogutils import (
91 91 concurrency_checker as revlogchecker,
92 92 constants as revlogconst,
93 93 sidedata as sidedatamod,
94 94 )
95 95
96 96 release = lockmod.release
97 97 urlerr = util.urlerr
98 98 urlreq = util.urlreq
99 99
100 100 # set of (path, vfs-location) tuples. vfs-location is:
101 101 # - 'plain for vfs relative paths
102 102 # - '' for svfs relative paths
103 103 _cachedfiles = set()
104 104
105 105
106 106 class _basefilecache(scmutil.filecache):
107 107 """All filecache usage on repo are done for logic that should be unfiltered"""
108 108
109 109 def __get__(self, repo, type=None):
110 110 if repo is None:
111 111 return self
112 112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 113 unfi = repo.unfiltered()
114 114 try:
115 115 return unfi.__dict__[self.sname]
116 116 except KeyError:
117 117 pass
118 118 return super(_basefilecache, self).__get__(unfi, type)
119 119
120 120 def set(self, repo, value):
121 121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122 122
123 123
124 124 class repofilecache(_basefilecache):
125 125 """filecache for files in .hg but outside of .hg/store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(repofilecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b'plain'))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.vfs.join(fname)
134 134
135 135
136 136 class storecache(_basefilecache):
137 137 """filecache for files in the store"""
138 138
139 139 def __init__(self, *paths):
140 140 super(storecache, self).__init__(*paths)
141 141 for path in paths:
142 142 _cachedfiles.add((path, b''))
143 143
144 144 def join(self, obj, fname):
145 145 return obj.sjoin(fname)
146 146
147 147
148 148 class changelogcache(storecache):
149 149 """filecache for the changelog"""
150 150
151 151 def __init__(self):
152 152 super(changelogcache, self).__init__()
153 153 _cachedfiles.add((b'00changelog.i', b''))
154 154 _cachedfiles.add((b'00changelog.n', b''))
155 155
156 156 def tracked_paths(self, obj):
157 157 paths = [self.join(obj, b'00changelog.i')]
158 158 if obj.store.opener.options.get(b'persistent-nodemap', False):
159 159 paths.append(self.join(obj, b'00changelog.n'))
160 160 return paths
161 161
162 162
163 163 class manifestlogcache(storecache):
164 164 """filecache for the manifestlog"""
165 165
166 166 def __init__(self):
167 167 super(manifestlogcache, self).__init__()
168 168 _cachedfiles.add((b'00manifest.i', b''))
169 169 _cachedfiles.add((b'00manifest.n', b''))
170 170
171 171 def tracked_paths(self, obj):
172 172 paths = [self.join(obj, b'00manifest.i')]
173 173 if obj.store.opener.options.get(b'persistent-nodemap', False):
174 174 paths.append(self.join(obj, b'00manifest.n'))
175 175 return paths
176 176
177 177
178 178 class mixedrepostorecache(_basefilecache):
179 179 """filecache for a mix files in .hg/store and outside"""
180 180
181 181 def __init__(self, *pathsandlocations):
182 182 # scmutil.filecache only uses the path for passing back into our
183 183 # join(), so we can safely pass a list of paths and locations
184 184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
185 185 _cachedfiles.update(pathsandlocations)
186 186
187 187 def join(self, obj, fnameandlocation):
188 188 fname, location = fnameandlocation
189 189 if location == b'plain':
190 190 return obj.vfs.join(fname)
191 191 else:
192 192 if location != b'':
193 193 raise error.ProgrammingError(
194 194 b'unexpected location: %s' % location
195 195 )
196 196 return obj.sjoin(fname)
197 197
198 198
199 199 def isfilecached(repo, name):
200 200 """check if a repo has already cached "name" filecache-ed property
201 201
202 202 This returns (cachedobj-or-None, iscached) tuple.
203 203 """
204 204 cacheentry = repo.unfiltered()._filecache.get(name, None)
205 205 if not cacheentry:
206 206 return None, False
207 207 return cacheentry.obj, True
208 208
209 209
210 210 class unfilteredpropertycache(util.propertycache):
211 211 """propertycache that apply to unfiltered repo only"""
212 212
213 213 def __get__(self, repo, type=None):
214 214 unfi = repo.unfiltered()
215 215 if unfi is repo:
216 216 return super(unfilteredpropertycache, self).__get__(unfi)
217 217 return getattr(unfi, self.name)
218 218
219 219
220 220 class filteredpropertycache(util.propertycache):
221 221 """propertycache that must take filtering in account"""
222 222
223 223 def cachevalue(self, obj, value):
224 224 object.__setattr__(obj, self.name, value)
225 225
226 226
227 227 def hasunfilteredcache(repo, name):
228 228 """check if a repo has an unfilteredpropertycache value for <name>"""
229 229 return name in vars(repo.unfiltered())
230 230
231 231
232 232 def unfilteredmethod(orig):
233 233 """decorate method that always need to be run on unfiltered version"""
234 234
235 235 @functools.wraps(orig)
236 236 def wrapper(repo, *args, **kwargs):
237 237 return orig(repo.unfiltered(), *args, **kwargs)
238 238
239 239 return wrapper
240 240
241 241
242 242 moderncaps = {
243 243 b'lookup',
244 244 b'branchmap',
245 245 b'pushkey',
246 246 b'known',
247 247 b'getbundle',
248 248 b'unbundle',
249 249 }
250 250 legacycaps = moderncaps.union({b'changegroupsubset'})
251 251
252 252
253 253 @interfaceutil.implementer(repository.ipeercommandexecutor)
254 254 class localcommandexecutor:
255 255 def __init__(self, peer):
256 256 self._peer = peer
257 257 self._sent = False
258 258 self._closed = False
259 259
260 260 def __enter__(self):
261 261 return self
262 262
263 263 def __exit__(self, exctype, excvalue, exctb):
264 264 self.close()
265 265
266 266 def callcommand(self, command, args):
267 267 if self._sent:
268 268 raise error.ProgrammingError(
269 269 b'callcommand() cannot be used after sendcommands()'
270 270 )
271 271
272 272 if self._closed:
273 273 raise error.ProgrammingError(
274 274 b'callcommand() cannot be used after close()'
275 275 )
276 276
277 277 # We don't need to support anything fancy. Just call the named
278 278 # method on the peer and return a resolved future.
279 279 fn = getattr(self._peer, pycompat.sysstr(command))
280 280
281 281 f = futures.Future()
282 282
283 283 try:
284 284 result = fn(**pycompat.strkwargs(args))
285 285 except Exception:
286 286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
287 287 else:
288 288 f.set_result(result)
289 289
290 290 return f
291 291
292 292 def sendcommands(self):
293 293 self._sent = True
294 294
295 295 def close(self):
296 296 self._closed = True
297 297
298 298
299 299 @interfaceutil.implementer(repository.ipeercommands)
300 300 class localpeer(repository.peer):
301 301 '''peer for a local repo; reflects only the most recent API'''
302 302
303 303 def __init__(self, repo, caps=None):
304 304 super(localpeer, self).__init__()
305 305
306 306 if caps is None:
307 307 caps = moderncaps.copy()
308 308 self._repo = repo.filtered(b'served')
309 309 self.ui = repo.ui
310 310
311 311 if repo._wanted_sidedata:
312 312 formatted = bundle2.format_remote_wanted_sidedata(repo)
313 313 caps.add(b'exp-wanted-sidedata=' + formatted)
314 314
315 315 self._caps = repo._restrictcapabilities(caps)
316 316
317 317 # Begin of _basepeer interface.
318 318
319 319 def url(self):
320 320 return self._repo.url()
321 321
322 322 def local(self):
323 323 return self._repo
324 324
325 325 def peer(self):
326 326 return self
327 327
328 328 def canpush(self):
329 329 return True
330 330
331 331 def close(self):
332 332 self._repo.close()
333 333
334 334 # End of _basepeer interface.
335 335
336 336 # Begin of _basewirecommands interface.
337 337
338 338 def branchmap(self):
339 339 return self._repo.branchmap()
340 340
341 341 def capabilities(self):
342 342 return self._caps
343 343
344 344 def clonebundles(self):
345 345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346 346
347 347 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 348 """Used to test argument passing over the wire"""
349 349 return b"%s %s %s %s %s" % (
350 350 one,
351 351 two,
352 352 pycompat.bytestr(three),
353 353 pycompat.bytestr(four),
354 354 pycompat.bytestr(five),
355 355 )
356 356
357 357 def getbundle(
358 358 self,
359 359 source,
360 360 heads=None,
361 361 common=None,
362 362 bundlecaps=None,
363 363 remote_sidedata=None,
364 364 **kwargs
365 365 ):
366 366 chunks = exchange.getbundlechunks(
367 367 self._repo,
368 368 source,
369 369 heads=heads,
370 370 common=common,
371 371 bundlecaps=bundlecaps,
372 372 remote_sidedata=remote_sidedata,
373 373 **kwargs
374 374 )[1]
375 375 cb = util.chunkbuffer(chunks)
376 376
377 377 if exchange.bundle2requested(bundlecaps):
378 378 # When requesting a bundle2, getbundle returns a stream to make the
379 379 # wire level function happier. We need to build a proper object
380 380 # from it in local peer.
381 381 return bundle2.getunbundler(self.ui, cb)
382 382 else:
383 383 return changegroup.getunbundler(b'01', cb, None)
384 384
385 385 def heads(self):
386 386 return self._repo.heads()
387 387
388 388 def known(self, nodes):
389 389 return self._repo.known(nodes)
390 390
391 391 def listkeys(self, namespace):
392 392 return self._repo.listkeys(namespace)
393 393
394 394 def lookup(self, key):
395 395 return self._repo.lookup(key)
396 396
397 397 def pushkey(self, namespace, key, old, new):
398 398 return self._repo.pushkey(namespace, key, old, new)
399 399
400 400 def stream_out(self):
401 401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402 402
403 403 def unbundle(self, bundle, heads, url):
404 404 """apply a bundle on a repo
405 405
406 406 This function handles the repo locking itself."""
407 407 try:
408 408 try:
409 409 bundle = exchange.readbundle(self.ui, bundle, None)
410 410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 411 if util.safehasattr(ret, b'getchunks'):
412 412 # This is a bundle20 object, turn it into an unbundler.
413 413 # This little dance should be dropped eventually when the
414 414 # API is finally improved.
415 415 stream = util.chunkbuffer(ret.getchunks())
416 416 ret = bundle2.getunbundler(self.ui, stream)
417 417 return ret
418 418 except Exception as exc:
419 419 # If the exception contains output salvaged from a bundle2
420 420 # reply, we need to make sure it is printed before continuing
421 421 # to fail. So we build a bundle2 with such output and consume
422 422 # it directly.
423 423 #
424 424 # This is not very elegant but allows a "simple" solution for
425 425 # issue4594
426 426 output = getattr(exc, '_bundle2salvagedoutput', ())
427 427 if output:
428 428 bundler = bundle2.bundle20(self._repo.ui)
429 429 for out in output:
430 430 bundler.addpart(out)
431 431 stream = util.chunkbuffer(bundler.getchunks())
432 432 b = bundle2.getunbundler(self.ui, stream)
433 433 bundle2.processbundle(self._repo, b)
434 434 raise
435 435 except error.PushRaced as exc:
436 436 raise error.ResponseError(
437 437 _(b'push failed:'), stringutil.forcebytestr(exc)
438 438 )
439 439
440 440 # End of _basewirecommands interface.
441 441
442 442 # Begin of peer interface.
443 443
444 444 def commandexecutor(self):
445 445 return localcommandexecutor(self)
446 446
447 447 # End of peer interface.
448 448
449 449
450 450 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 451 class locallegacypeer(localpeer):
452 452 """peer extension which implements legacy methods too; used for tests with
453 453 restricted capabilities"""
454 454
455 455 def __init__(self, repo):
456 456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457 457
458 458 # Begin of baselegacywirecommands interface.
459 459
460 460 def between(self, pairs):
461 461 return self._repo.between(pairs)
462 462
463 463 def branches(self, nodes):
464 464 return self._repo.branches(nodes)
465 465
466 466 def changegroup(self, nodes, source):
467 467 outgoing = discovery.outgoing(
468 468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 469 )
470 470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471 471
472 472 def changegroupsubset(self, bases, heads, source):
473 473 outgoing = discovery.outgoing(
474 474 self._repo, missingroots=bases, ancestorsof=heads
475 475 )
476 476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477 477
478 478 # End of baselegacywirecommands interface.
479 479
480 480
481 481 # Functions receiving (ui, features) that extensions can register to impact
482 482 # the ability to load repositories with custom requirements. Only
483 483 # functions defined in loaded extensions are called.
484 484 #
485 485 # The function receives a set of requirement strings that the repository
486 486 # is capable of opening. Functions will typically add elements to the
487 487 # set to reflect that the extension knows how to handle that requirements.
488 488 featuresetupfuncs = set()
489 489
490 490
491 491 def _getsharedvfs(hgvfs, requirements):
492 492 """returns the vfs object pointing to root of shared source
493 493 repo for a shared repository
494 494
495 495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 496 requirements is a set of requirements of current repo (shared one)
497 497 """
498 498 # The ``shared`` or ``relshared`` requirements indicate the
499 499 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 500 # This is an absolute path for ``shared`` and relative to
501 501 # ``.hg/`` for ``relshared``.
502 502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 504 sharedpath = util.normpath(hgvfs.join(sharedpath))
505 505
506 506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507 507
508 508 if not sharedvfs.exists():
509 509 raise error.RepoError(
510 510 _(b'.hg/sharedpath points to nonexistent directory %s')
511 511 % sharedvfs.base
512 512 )
513 513 return sharedvfs
514 514
515 515
516 516 def _readrequires(vfs, allowmissing):
517 517 """reads the require file present at root of this vfs
518 518 and return a set of requirements
519 519
520 520 If allowmissing is True, we suppress ENOENT if raised"""
521 521 # requires file contains a newline-delimited list of
522 522 # features/capabilities the opener (us) must have in order to use
523 523 # the repository. This file was introduced in Mercurial 0.9.2,
524 524 # which means very old repositories may not have one. We assume
525 525 # a missing file translates to no requirements.
526 526 try:
527 527 requirements = set(vfs.read(b'requires').splitlines())
528 528 except IOError as e:
529 529 if not (allowmissing and e.errno == errno.ENOENT):
530 530 raise
531 531 requirements = set()
532 532 return requirements
533 533
534 534
535 535 def makelocalrepository(baseui, path, intents=None):
536 536 """Create a local repository object.
537 537
538 538 Given arguments needed to construct a local repository, this function
539 539 performs various early repository loading functionality (such as
540 540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
541 541 the repository can be opened, derives a type suitable for representing
542 542 that repository, and returns an instance of it.
543 543
544 544 The returned object conforms to the ``repository.completelocalrepository``
545 545 interface.
546 546
547 547 The repository type is derived by calling a series of factory functions
548 548 for each aspect/interface of the final repository. These are defined by
549 549 ``REPO_INTERFACES``.
550 550
551 551 Each factory function is called to produce a type implementing a specific
552 552 interface. The cumulative list of returned types will be combined into a
553 553 new type and that type will be instantiated to represent the local
554 554 repository.
555 555
556 556 The factory functions each receive various state that may be consulted
557 557 as part of deriving a type.
558 558
559 559 Extensions should wrap these factory functions to customize repository type
560 560 creation. Note that an extension's wrapped function may be called even if
561 561 that extension is not loaded for the repo being constructed. Extensions
562 562 should check if their ``__name__`` appears in the
563 563 ``extensionmodulenames`` set passed to the factory function and no-op if
564 564 not.
565 565 """
566 566 ui = baseui.copy()
567 567 # Prevent copying repo configuration.
568 568 ui.copy = baseui.copy
569 569
570 570 # Working directory VFS rooted at repository root.
571 571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
572 572
573 573 # Main VFS for .hg/ directory.
574 574 hgpath = wdirvfs.join(b'.hg')
575 575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
576 576 # Whether this repository is shared one or not
577 577 shared = False
578 578 # If this repository is shared, vfs pointing to shared repo
579 579 sharedvfs = None
580 580
581 581 # The .hg/ path should exist and should be a directory. All other
582 582 # cases are errors.
583 583 if not hgvfs.isdir():
584 584 try:
585 585 hgvfs.stat()
586 586 except OSError as e:
587 587 if e.errno != errno.ENOENT:
588 588 raise
589 589 except ValueError as e:
590 590 # Can be raised on Python 3.8 when path is invalid.
591 591 raise error.Abort(
592 592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
593 593 )
594 594
595 595 raise error.RepoError(_(b'repository %s not found') % path)
596 596
597 597 requirements = _readrequires(hgvfs, True)
598 598 shared = (
599 599 requirementsmod.SHARED_REQUIREMENT in requirements
600 600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
601 601 )
602 602 storevfs = None
603 603 if shared:
604 604 # This is a shared repo
605 605 sharedvfs = _getsharedvfs(hgvfs, requirements)
606 606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
607 607 else:
608 608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
609 609
610 610 # if .hg/requires contains the sharesafe requirement, it means
611 611 # there exists a `.hg/store/requires` too and we should read it
612 612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
613 613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
614 614 # is not present, refer checkrequirementscompat() for that
615 615 #
616 616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
617 617 # repository was shared the old way. We check the share source .hg/requires
618 618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
619 619 # to be reshared
620 620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
621 621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
622 622
623 623 if (
624 624 shared
625 625 and requirementsmod.SHARESAFE_REQUIREMENT
626 626 not in _readrequires(sharedvfs, True)
627 627 ):
628 628 mismatch_warn = ui.configbool(
629 629 b'share', b'safe-mismatch.source-not-safe.warn'
630 630 )
631 631 mismatch_config = ui.config(
632 632 b'share', b'safe-mismatch.source-not-safe'
633 633 )
634 634 if mismatch_config in (
635 635 b'downgrade-allow',
636 636 b'allow',
637 637 b'downgrade-abort',
638 638 ):
639 639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 640 from . import upgrade
641 641
642 642 upgrade.downgrade_share_to_non_safe(
643 643 ui,
644 644 hgvfs,
645 645 sharedvfs,
646 646 requirements,
647 647 mismatch_config,
648 648 mismatch_warn,
649 649 )
650 650 elif mismatch_config == b'abort':
651 651 raise error.Abort(
652 652 _(b"share source does not support share-safe requirement"),
653 653 hint=hint,
654 654 )
655 655 else:
656 656 raise error.Abort(
657 657 _(
658 658 b"share-safe mismatch with source.\nUnrecognized"
659 659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 660 b" set."
661 661 )
662 662 % mismatch_config,
663 663 hint=hint,
664 664 )
665 665 else:
666 666 requirements |= _readrequires(storevfs, False)
667 667 elif shared:
668 668 sourcerequires = _readrequires(sharedvfs, False)
669 669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 671 mismatch_warn = ui.configbool(
672 672 b'share', b'safe-mismatch.source-safe.warn'
673 673 )
674 674 if mismatch_config in (
675 675 b'upgrade-allow',
676 676 b'allow',
677 677 b'upgrade-abort',
678 678 ):
679 679 # prevent cyclic import localrepo -> upgrade -> localrepo
680 680 from . import upgrade
681 681
682 682 upgrade.upgrade_share_to_safe(
683 683 ui,
684 684 hgvfs,
685 685 storevfs,
686 686 requirements,
687 687 mismatch_config,
688 688 mismatch_warn,
689 689 )
690 690 elif mismatch_config == b'abort':
691 691 raise error.Abort(
692 692 _(
693 693 b'version mismatch: source uses share-safe'
694 694 b' functionality while the current share does not'
695 695 ),
696 696 hint=hint,
697 697 )
698 698 else:
699 699 raise error.Abort(
700 700 _(
701 701 b"share-safe mismatch with source.\nUnrecognized"
702 702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 703 )
704 704 % mismatch_config,
705 705 hint=hint,
706 706 )
707 707
708 708 # The .hg/hgrc file may load extensions or contain config options
709 709 # that influence repository construction. Attempt to load it and
710 710 # process any new extensions that it may have pulled in.
711 711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 713 extensions.loadall(ui)
714 714 extensions.populateui(ui)
715 715
716 716 # Set of module names of extensions loaded for this repository.
717 717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718 718
719 719 supportedrequirements = gathersupportedrequirements(ui)
720 720
721 721 # We first validate the requirements are known.
722 722 ensurerequirementsrecognized(requirements, supportedrequirements)
723 723
724 724 # Then we validate that the known set is reasonable to use together.
725 725 ensurerequirementscompatible(ui, requirements)
726 726
727 727 # TODO there are unhandled edge cases related to opening repositories with
728 728 # shared storage. If storage is shared, we should also test for requirements
729 729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 730 # that repo, as that repo may load extensions needed to open it. This is a
731 731 # bit complicated because we don't want the other hgrc to overwrite settings
732 732 # in this hgrc.
733 733 #
734 734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 735 # file when sharing repos. But if a requirement is added after the share is
736 736 # performed, thereby introducing a new requirement for the opener, we may
737 737 # will not see that and could encounter a run-time error interacting with
738 738 # that shared store since it has an unknown-to-us requirement.
739 739
740 740 # At this point, we know we should be capable of opening the repository.
741 741 # Now get on with doing that.
742 742
743 743 features = set()
744 744
745 745 # The "store" part of the repository holds versioned data. How it is
746 746 # accessed is determined by various requirements. If `shared` or
747 747 # `relshared` requirements are present, this indicates current repository
748 748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 749 if shared:
750 750 storebasepath = sharedvfs.base
751 751 cachepath = sharedvfs.join(b'cache')
752 752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 753 else:
754 754 storebasepath = hgvfs.base
755 755 cachepath = hgvfs.join(b'cache')
756 756 wcachepath = hgvfs.join(b'wcache')
757 757
758 758 # The store has changed over time and the exact layout is dictated by
759 759 # requirements. The store interface abstracts differences across all
760 760 # of them.
761 761 store = makestore(
762 762 requirements,
763 763 storebasepath,
764 764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 765 )
766 766 hgvfs.createmode = store.createmode
767 767
768 768 storevfs = store.vfs
769 769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770 770
771 771 if (
772 772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 774 ):
775 775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 776 # the revlogv2 docket introduced race condition that we need to fix
777 777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778 778
779 779 # The cache vfs is used to manage cache files.
780 780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 781 cachevfs.createmode = store.createmode
782 782 # The cache vfs is used to manage cache files related to the working copy
783 783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 784 wcachevfs.createmode = store.createmode
785 785
786 786 # Now resolve the type for the repository object. We do this by repeatedly
787 787 # calling a factory function to produces types for specific aspects of the
788 788 # repo's operation. The aggregate returned types are used as base classes
789 789 # for a dynamically-derived type, which will represent our new repository.
790 790
791 791 bases = []
792 792 extrastate = {}
793 793
794 794 for iface, fn in REPO_INTERFACES:
795 795 # We pass all potentially useful state to give extensions tons of
796 796 # flexibility.
797 797 typ = fn()(
798 798 ui=ui,
799 799 intents=intents,
800 800 requirements=requirements,
801 801 features=features,
802 802 wdirvfs=wdirvfs,
803 803 hgvfs=hgvfs,
804 804 store=store,
805 805 storevfs=storevfs,
806 806 storeoptions=storevfs.options,
807 807 cachevfs=cachevfs,
808 808 wcachevfs=wcachevfs,
809 809 extensionmodulenames=extensionmodulenames,
810 810 extrastate=extrastate,
811 811 baseclasses=bases,
812 812 )
813 813
814 814 if not isinstance(typ, type):
815 815 raise error.ProgrammingError(
816 816 b'unable to construct type for %s' % iface
817 817 )
818 818
819 819 bases.append(typ)
820 820
821 821 # type() allows you to use characters in type names that wouldn't be
822 822 # recognized as Python symbols in source code. We abuse that to add
823 823 # rich information about our constructed repo.
824 824 name = pycompat.sysstr(
825 825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 826 )
827 827
828 828 cls = type(name, tuple(bases), {})
829 829
830 830 return cls(
831 831 baseui=baseui,
832 832 ui=ui,
833 833 origroot=path,
834 834 wdirvfs=wdirvfs,
835 835 hgvfs=hgvfs,
836 836 requirements=requirements,
837 837 supportedrequirements=supportedrequirements,
838 838 sharedpath=storebasepath,
839 839 store=store,
840 840 cachevfs=cachevfs,
841 841 wcachevfs=wcachevfs,
842 842 features=features,
843 843 intents=intents,
844 844 )
845 845
846 846
847 847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
848 848 """Load hgrc files/content into a ui instance.
849 849
850 850 This is called during repository opening to load any additional
851 851 config files or settings relevant to the current repository.
852 852
853 853 Returns a bool indicating whether any additional configs were loaded.
854 854
855 855 Extensions should monkeypatch this function to modify how per-repo
856 856 configs are loaded. For example, an extension may wish to pull in
857 857 configs from alternate files or sources.
858 858
859 859 sharedvfs is vfs object pointing to source repo if the current one is a
860 860 shared one
861 861 """
862 862 if not rcutil.use_repo_hgrc():
863 863 return False
864 864
865 865 ret = False
866 866 # first load config from shared source if we has to
867 867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
868 868 try:
869 869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
870 870 ret = True
871 871 except IOError:
872 872 pass
873 873
874 874 try:
875 875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
876 876 ret = True
877 877 except IOError:
878 878 pass
879 879
880 880 try:
881 881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
882 882 ret = True
883 883 except IOError:
884 884 pass
885 885
886 886 return ret
887 887
888 888
889 889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
890 890 """Perform additional actions after .hg/hgrc is loaded.
891 891
892 892 This function is called during repository loading immediately after
893 893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
894 894
895 895 The function can be used to validate configs, automatically add
896 896 options (including extensions) based on requirements, etc.
897 897 """
898 898
899 899 # Map of requirements to list of extensions to load automatically when
900 900 # requirement is present.
901 901 autoextensions = {
902 902 b'git': [b'git'],
903 903 b'largefiles': [b'largefiles'],
904 904 b'lfs': [b'lfs'],
905 905 }
906 906
907 907 for requirement, names in sorted(autoextensions.items()):
908 908 if requirement not in requirements:
909 909 continue
910 910
911 911 for name in names:
912 912 if not ui.hasconfig(b'extensions', name):
913 913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
914 914
915 915
916 916 def gathersupportedrequirements(ui):
917 917 """Determine the complete set of recognized requirements."""
918 918 # Start with all requirements supported by this file.
919 919 supported = set(localrepository._basesupported)
920 920
921 921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
922 922 # relevant to this ui instance.
923 923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
924 924
925 925 for fn in featuresetupfuncs:
926 926 if fn.__module__ in modules:
927 927 fn(ui, supported)
928 928
929 929 # Add derived requirements from registered compression engines.
930 930 for name in util.compengines:
931 931 engine = util.compengines[name]
932 932 if engine.available() and engine.revlogheader():
933 933 supported.add(b'exp-compression-%s' % name)
934 934 if engine.name() == b'zstd':
935 935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
936 936
937 937 return supported
938 938
939 939
940 940 def ensurerequirementsrecognized(requirements, supported):
941 941 """Validate that a set of local requirements is recognized.
942 942
943 943 Receives a set of requirements. Raises an ``error.RepoError`` if there
944 944 exists any requirement in that set that currently loaded code doesn't
945 945 recognize.
946 946
947 947 Returns a set of supported requirements.
948 948 """
949 949 missing = set()
950 950
951 951 for requirement in requirements:
952 952 if requirement in supported:
953 953 continue
954 954
955 955 if not requirement or not requirement[0:1].isalnum():
956 956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
957 957
958 958 missing.add(requirement)
959 959
960 960 if missing:
961 961 raise error.RequirementError(
962 962 _(b'repository requires features unknown to this Mercurial: %s')
963 963 % b' '.join(sorted(missing)),
964 964 hint=_(
965 965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
966 966 b'for more information'
967 967 ),
968 968 )
969 969
970 970
971 971 def ensurerequirementscompatible(ui, requirements):
972 972 """Validates that a set of recognized requirements is mutually compatible.
973 973
974 974 Some requirements may not be compatible with others or require
975 975 config options that aren't enabled. This function is called during
976 976 repository opening to ensure that the set of requirements needed
977 977 to open a repository is sane and compatible with config options.
978 978
979 979 Extensions can monkeypatch this function to perform additional
980 980 checking.
981 981
982 982 ``error.RepoError`` should be raised on failure.
983 983 """
984 984 if (
985 985 requirementsmod.SPARSE_REQUIREMENT in requirements
986 986 and not sparse.enabled
987 987 ):
988 988 raise error.RepoError(
989 989 _(
990 990 b'repository is using sparse feature but '
991 991 b'sparse is not enabled; enable the '
992 992 b'"sparse" extensions to access'
993 993 )
994 994 )
995 995
996 996
997 997 def makestore(requirements, path, vfstype):
998 998 """Construct a storage object for a repository."""
999 999 if requirementsmod.STORE_REQUIREMENT in requirements:
1000 1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1001 1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1002 1002 return storemod.fncachestore(path, vfstype, dotencode)
1003 1003
1004 1004 return storemod.encodedstore(path, vfstype)
1005 1005
1006 1006 return storemod.basicstore(path, vfstype)
1007 1007
1008 1008
1009 1009 def resolvestorevfsoptions(ui, requirements, features):
1010 1010 """Resolve the options to pass to the store vfs opener.
1011 1011
1012 1012 The returned dict is used to influence behavior of the storage layer.
1013 1013 """
1014 1014 options = {}
1015 1015
1016 1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1017 1017 options[b'treemanifest'] = True
1018 1018
1019 1019 # experimental config: format.manifestcachesize
1020 1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1021 1021 if manifestcachesize is not None:
1022 1022 options[b'manifestcachesize'] = manifestcachesize
1023 1023
1024 1024 # In the absence of another requirement superseding a revlog-related
1025 1025 # requirement, we have to assume the repo is using revlog version 0.
1026 1026 # This revlog format is super old and we don't bother trying to parse
1027 1027 # opener options for it because those options wouldn't do anything
1028 1028 # meaningful on such old repos.
1029 1029 if (
1030 1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1031 1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1032 1032 ):
1033 1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1034 1034 else: # explicitly mark repo as using revlogv0
1035 1035 options[b'revlogv0'] = True
1036 1036
1037 1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1038 1038 options[b'copies-storage'] = b'changeset-sidedata'
1039 1039 else:
1040 1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1041 1041 copiesextramode = (b'changeset-only', b'compatibility')
1042 1042 if writecopiesto in copiesextramode:
1043 1043 options[b'copies-storage'] = b'extra'
1044 1044
1045 1045 return options
1046 1046
1047 1047
1048 1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1049 1049 """Resolve opener options specific to revlogs."""
1050 1050
1051 1051 options = {}
1052 1052 options[b'flagprocessors'] = {}
1053 1053
1054 1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1055 1055 options[b'revlogv1'] = True
1056 1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1057 1057 options[b'revlogv2'] = True
1058 1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1059 1059 options[b'changelogv2'] = True
1060 1060
1061 1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1062 1062 options[b'generaldelta'] = True
1063 1063
1064 1064 # experimental config: format.chunkcachesize
1065 1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1066 1066 if chunkcachesize is not None:
1067 1067 options[b'chunkcachesize'] = chunkcachesize
1068 1068
1069 1069 deltabothparents = ui.configbool(
1070 1070 b'storage', b'revlog.optimize-delta-parent-choice'
1071 1071 )
1072 1072 options[b'deltabothparents'] = deltabothparents
1073 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1073 1074
1074 1075 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 1076 options[b'issue6528.fix-incoming'] = issue6528
1076 1077
1077 1078 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 1079 lazydeltabase = False
1079 1080 if lazydelta:
1080 1081 lazydeltabase = ui.configbool(
1081 1082 b'storage', b'revlog.reuse-external-delta-parent'
1082 1083 )
1083 1084 if lazydeltabase is None:
1084 1085 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 1086 options[b'lazydelta'] = lazydelta
1086 1087 options[b'lazydeltabase'] = lazydeltabase
1087 1088
1088 1089 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 1090 if 0 <= chainspan:
1090 1091 options[b'maxdeltachainspan'] = chainspan
1091 1092
1092 1093 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 1094 if mmapindexthreshold is not None:
1094 1095 options[b'mmapindexthreshold'] = mmapindexthreshold
1095 1096
1096 1097 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 1098 srdensitythres = float(
1098 1099 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 1100 )
1100 1101 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 1102 options[b'with-sparse-read'] = withsparseread
1102 1103 options[b'sparse-read-density-threshold'] = srdensitythres
1103 1104 options[b'sparse-read-min-gap-size'] = srmingapsize
1104 1105
1105 1106 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 1107 options[b'sparse-revlog'] = sparserevlog
1107 1108 if sparserevlog:
1108 1109 options[b'generaldelta'] = True
1109 1110
1110 1111 maxchainlen = None
1111 1112 if sparserevlog:
1112 1113 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 1114 # experimental config: format.maxchainlen
1114 1115 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 1116 if maxchainlen is not None:
1116 1117 options[b'maxchainlen'] = maxchainlen
1117 1118
1118 1119 for r in requirements:
1119 1120 # we allow multiple compression engine requirement to co-exist because
1120 1121 # strickly speaking, revlog seems to support mixed compression style.
1121 1122 #
1122 1123 # The compression used for new entries will be "the last one"
1123 1124 prefix = r.startswith
1124 1125 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 1126 options[b'compengine'] = r.split(b'-', 2)[2]
1126 1127
1127 1128 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 1129 if options[b'zlib.level'] is not None:
1129 1130 if not (0 <= options[b'zlib.level'] <= 9):
1130 1131 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 1132 raise error.Abort(msg % options[b'zlib.level'])
1132 1133 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 1134 if options[b'zstd.level'] is not None:
1134 1135 if not (0 <= options[b'zstd.level'] <= 22):
1135 1136 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 1137 raise error.Abort(msg % options[b'zstd.level'])
1137 1138
1138 1139 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 1140 options[b'enableellipsis'] = True
1140 1141
1141 1142 if ui.configbool(b'experimental', b'rust.index'):
1142 1143 options[b'rust.index'] = True
1143 1144 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 1145 slow_path = ui.config(
1145 1146 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 1147 )
1147 1148 if slow_path not in (b'allow', b'warn', b'abort'):
1148 1149 default = ui.config_default(
1149 1150 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 1151 )
1151 1152 msg = _(
1152 1153 b'unknown value for config '
1153 1154 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 1155 )
1155 1156 ui.warn(msg % slow_path)
1156 1157 if not ui.quiet:
1157 1158 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 1159 slow_path = default
1159 1160
1160 1161 msg = _(
1161 1162 b"accessing `persistent-nodemap` repository without associated "
1162 1163 b"fast implementation."
1163 1164 )
1164 1165 hint = _(
1165 1166 b"check `hg help config.format.use-persistent-nodemap` "
1166 1167 b"for details"
1167 1168 )
1168 1169 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 1170 if slow_path == b'warn':
1170 1171 msg = b"warning: " + msg + b'\n'
1171 1172 ui.warn(msg)
1172 1173 if not ui.quiet:
1173 1174 hint = b'(' + hint + b')\n'
1174 1175 ui.warn(hint)
1175 1176 if slow_path == b'abort':
1176 1177 raise error.Abort(msg, hint=hint)
1177 1178 options[b'persistent-nodemap'] = True
1178 1179 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 1180 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 1181 if slow_path not in (b'allow', b'warn', b'abort'):
1181 1182 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 1183 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 1184 ui.warn(msg % slow_path)
1184 1185 if not ui.quiet:
1185 1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 1187 slow_path = default
1187 1188
1188 1189 msg = _(
1189 1190 b"accessing `dirstate-v2` repository without associated "
1190 1191 b"fast implementation."
1191 1192 )
1192 1193 hint = _(
1193 1194 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1194 1195 )
1195 1196 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 1197 if slow_path == b'warn':
1197 1198 msg = b"warning: " + msg + b'\n'
1198 1199 ui.warn(msg)
1199 1200 if not ui.quiet:
1200 1201 hint = b'(' + hint + b')\n'
1201 1202 ui.warn(hint)
1202 1203 if slow_path == b'abort':
1203 1204 raise error.Abort(msg, hint=hint)
1204 1205 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 1206 options[b'persistent-nodemap.mmap'] = True
1206 1207 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 1208 options[b'devel-force-nodemap'] = True
1208 1209
1209 1210 return options
1210 1211
1211 1212
1212 1213 def makemain(**kwargs):
1213 1214 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 1215 return localrepository
1215 1216
1216 1217
1217 1218 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 1219 class revlogfilestorage:
1219 1220 """File storage when using revlogs."""
1220 1221
1221 1222 def file(self, path):
1222 1223 if path.startswith(b'/'):
1223 1224 path = path[1:]
1224 1225
1225 1226 return filelog.filelog(self.svfs, path)
1226 1227
1227 1228
1228 1229 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 1230 class revlognarrowfilestorage:
1230 1231 """File storage when using revlogs and narrow files."""
1231 1232
1232 1233 def file(self, path):
1233 1234 if path.startswith(b'/'):
1234 1235 path = path[1:]
1235 1236
1236 1237 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237 1238
1238 1239
1239 1240 def makefilestorage(requirements, features, **kwargs):
1240 1241 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 1242 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 1243 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243 1244
1244 1245 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 1246 return revlognarrowfilestorage
1246 1247 else:
1247 1248 return revlogfilestorage
1248 1249
1249 1250
1250 1251 # List of repository interfaces and factory functions for them. Each
1251 1252 # will be called in order during ``makelocalrepository()`` to iteratively
1252 1253 # derive the final type for a local repository instance. We capture the
1253 1254 # function as a lambda so we don't hold a reference and the module-level
1254 1255 # functions can be wrapped.
1255 1256 REPO_INTERFACES = [
1256 1257 (repository.ilocalrepositorymain, lambda: makemain),
1257 1258 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 1259 ]
1259 1260
1260 1261
1261 1262 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 1263 class localrepository:
1263 1264 """Main class for representing local repositories.
1264 1265
1265 1266 All local repositories are instances of this class.
1266 1267
1267 1268 Constructed on its own, instances of this class are not usable as
1268 1269 repository objects. To obtain a usable repository object, call
1269 1270 ``hg.repository()``, ``localrepo.instance()``, or
1270 1271 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 1272 ``instance()`` adds support for creating new repositories.
1272 1273 ``hg.repository()`` adds more extension integration, including calling
1273 1274 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 1275 used.
1275 1276 """
1276 1277
1277 1278 _basesupported = {
1278 1279 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1279 1280 requirementsmod.CHANGELOGV2_REQUIREMENT,
1280 1281 requirementsmod.COPIESSDC_REQUIREMENT,
1281 1282 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1282 1283 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1283 1284 requirementsmod.DOTENCODE_REQUIREMENT,
1284 1285 requirementsmod.FNCACHE_REQUIREMENT,
1285 1286 requirementsmod.GENERALDELTA_REQUIREMENT,
1286 1287 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1287 1288 requirementsmod.NODEMAP_REQUIREMENT,
1288 1289 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1289 1290 requirementsmod.REVLOGV1_REQUIREMENT,
1290 1291 requirementsmod.REVLOGV2_REQUIREMENT,
1291 1292 requirementsmod.SHARED_REQUIREMENT,
1292 1293 requirementsmod.SHARESAFE_REQUIREMENT,
1293 1294 requirementsmod.SPARSE_REQUIREMENT,
1294 1295 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1295 1296 requirementsmod.STORE_REQUIREMENT,
1296 1297 requirementsmod.TREEMANIFEST_REQUIREMENT,
1297 1298 }
1298 1299
1299 1300 # list of prefix for file which can be written without 'wlock'
1300 1301 # Extensions should extend this list when needed
1301 1302 _wlockfreeprefix = {
1302 1303 # We migh consider requiring 'wlock' for the next
1303 1304 # two, but pretty much all the existing code assume
1304 1305 # wlock is not needed so we keep them excluded for
1305 1306 # now.
1306 1307 b'hgrc',
1307 1308 b'requires',
1308 1309 # XXX cache is a complicatged business someone
1309 1310 # should investigate this in depth at some point
1310 1311 b'cache/',
1311 1312 # XXX shouldn't be dirstate covered by the wlock?
1312 1313 b'dirstate',
1313 1314 # XXX bisect was still a bit too messy at the time
1314 1315 # this changeset was introduced. Someone should fix
1315 1316 # the remainig bit and drop this line
1316 1317 b'bisect.state',
1317 1318 }
1318 1319
1319 1320 def __init__(
1320 1321 self,
1321 1322 baseui,
1322 1323 ui,
1323 1324 origroot,
1324 1325 wdirvfs,
1325 1326 hgvfs,
1326 1327 requirements,
1327 1328 supportedrequirements,
1328 1329 sharedpath,
1329 1330 store,
1330 1331 cachevfs,
1331 1332 wcachevfs,
1332 1333 features,
1333 1334 intents=None,
1334 1335 ):
1335 1336 """Create a new local repository instance.
1336 1337
1337 1338 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1338 1339 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1339 1340 object.
1340 1341
1341 1342 Arguments:
1342 1343
1343 1344 baseui
1344 1345 ``ui.ui`` instance that ``ui`` argument was based off of.
1345 1346
1346 1347 ui
1347 1348 ``ui.ui`` instance for use by the repository.
1348 1349
1349 1350 origroot
1350 1351 ``bytes`` path to working directory root of this repository.
1351 1352
1352 1353 wdirvfs
1353 1354 ``vfs.vfs`` rooted at the working directory.
1354 1355
1355 1356 hgvfs
1356 1357 ``vfs.vfs`` rooted at .hg/
1357 1358
1358 1359 requirements
1359 1360 ``set`` of bytestrings representing repository opening requirements.
1360 1361
1361 1362 supportedrequirements
1362 1363 ``set`` of bytestrings representing repository requirements that we
1363 1364 know how to open. May be a supetset of ``requirements``.
1364 1365
1365 1366 sharedpath
1366 1367 ``bytes`` Defining path to storage base directory. Points to a
1367 1368 ``.hg/`` directory somewhere.
1368 1369
1369 1370 store
1370 1371 ``store.basicstore`` (or derived) instance providing access to
1371 1372 versioned storage.
1372 1373
1373 1374 cachevfs
1374 1375 ``vfs.vfs`` used for cache files.
1375 1376
1376 1377 wcachevfs
1377 1378 ``vfs.vfs`` used for cache files related to the working copy.
1378 1379
1379 1380 features
1380 1381 ``set`` of bytestrings defining features/capabilities of this
1381 1382 instance.
1382 1383
1383 1384 intents
1384 1385 ``set`` of system strings indicating what this repo will be used
1385 1386 for.
1386 1387 """
1387 1388 self.baseui = baseui
1388 1389 self.ui = ui
1389 1390 self.origroot = origroot
1390 1391 # vfs rooted at working directory.
1391 1392 self.wvfs = wdirvfs
1392 1393 self.root = wdirvfs.base
1393 1394 # vfs rooted at .hg/. Used to access most non-store paths.
1394 1395 self.vfs = hgvfs
1395 1396 self.path = hgvfs.base
1396 1397 self.requirements = requirements
1397 1398 self.nodeconstants = sha1nodeconstants
1398 1399 self.nullid = self.nodeconstants.nullid
1399 1400 self.supported = supportedrequirements
1400 1401 self.sharedpath = sharedpath
1401 1402 self.store = store
1402 1403 self.cachevfs = cachevfs
1403 1404 self.wcachevfs = wcachevfs
1404 1405 self.features = features
1405 1406
1406 1407 self.filtername = None
1407 1408
1408 1409 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1409 1410 b'devel', b'check-locks'
1410 1411 ):
1411 1412 self.vfs.audit = self._getvfsward(self.vfs.audit)
1412 1413 # A list of callback to shape the phase if no data were found.
1413 1414 # Callback are in the form: func(repo, roots) --> processed root.
1414 1415 # This list it to be filled by extension during repo setup
1415 1416 self._phasedefaults = []
1416 1417
1417 1418 color.setup(self.ui)
1418 1419
1419 1420 self.spath = self.store.path
1420 1421 self.svfs = self.store.vfs
1421 1422 self.sjoin = self.store.join
1422 1423 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 1424 b'devel', b'check-locks'
1424 1425 ):
1425 1426 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1426 1427 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1427 1428 else: # standard vfs
1428 1429 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1429 1430
1430 1431 self._dirstatevalidatewarned = False
1431 1432
1432 1433 self._branchcaches = branchmap.BranchMapCache()
1433 1434 self._revbranchcache = None
1434 1435 self._filterpats = {}
1435 1436 self._datafilters = {}
1436 1437 self._transref = self._lockref = self._wlockref = None
1437 1438
1438 1439 # A cache for various files under .hg/ that tracks file changes,
1439 1440 # (used by the filecache decorator)
1440 1441 #
1441 1442 # Maps a property name to its util.filecacheentry
1442 1443 self._filecache = {}
1443 1444
1444 1445 # hold sets of revision to be filtered
1445 1446 # should be cleared when something might have changed the filter value:
1446 1447 # - new changesets,
1447 1448 # - phase change,
1448 1449 # - new obsolescence marker,
1449 1450 # - working directory parent change,
1450 1451 # - bookmark changes
1451 1452 self.filteredrevcache = {}
1452 1453
1453 1454 # post-dirstate-status hooks
1454 1455 self._postdsstatus = []
1455 1456
1456 1457 # generic mapping between names and nodes
1457 1458 self.names = namespaces.namespaces()
1458 1459
1459 1460 # Key to signature value.
1460 1461 self._sparsesignaturecache = {}
1461 1462 # Signature to cached matcher instance.
1462 1463 self._sparsematchercache = {}
1463 1464
1464 1465 self._extrafilterid = repoview.extrafilter(ui)
1465 1466
1466 1467 self.filecopiesmode = None
1467 1468 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1468 1469 self.filecopiesmode = b'changeset-sidedata'
1469 1470
1470 1471 self._wanted_sidedata = set()
1471 1472 self._sidedata_computers = {}
1472 1473 sidedatamod.set_sidedata_spec_for_repo(self)
1473 1474
1474 1475 def _getvfsward(self, origfunc):
1475 1476 """build a ward for self.vfs"""
1476 1477 rref = weakref.ref(self)
1477 1478
1478 1479 def checkvfs(path, mode=None):
1479 1480 ret = origfunc(path, mode=mode)
1480 1481 repo = rref()
1481 1482 if (
1482 1483 repo is None
1483 1484 or not util.safehasattr(repo, b'_wlockref')
1484 1485 or not util.safehasattr(repo, b'_lockref')
1485 1486 ):
1486 1487 return
1487 1488 if mode in (None, b'r', b'rb'):
1488 1489 return
1489 1490 if path.startswith(repo.path):
1490 1491 # truncate name relative to the repository (.hg)
1491 1492 path = path[len(repo.path) + 1 :]
1492 1493 if path.startswith(b'cache/'):
1493 1494 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1494 1495 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1495 1496 # path prefixes covered by 'lock'
1496 1497 vfs_path_prefixes = (
1497 1498 b'journal.',
1498 1499 b'undo.',
1499 1500 b'strip-backup/',
1500 1501 b'cache/',
1501 1502 )
1502 1503 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1503 1504 if repo._currentlock(repo._lockref) is None:
1504 1505 repo.ui.develwarn(
1505 1506 b'write with no lock: "%s"' % path,
1506 1507 stacklevel=3,
1507 1508 config=b'check-locks',
1508 1509 )
1509 1510 elif repo._currentlock(repo._wlockref) is None:
1510 1511 # rest of vfs files are covered by 'wlock'
1511 1512 #
1512 1513 # exclude special files
1513 1514 for prefix in self._wlockfreeprefix:
1514 1515 if path.startswith(prefix):
1515 1516 return
1516 1517 repo.ui.develwarn(
1517 1518 b'write with no wlock: "%s"' % path,
1518 1519 stacklevel=3,
1519 1520 config=b'check-locks',
1520 1521 )
1521 1522 return ret
1522 1523
1523 1524 return checkvfs
1524 1525
1525 1526 def _getsvfsward(self, origfunc):
1526 1527 """build a ward for self.svfs"""
1527 1528 rref = weakref.ref(self)
1528 1529
1529 1530 def checksvfs(path, mode=None):
1530 1531 ret = origfunc(path, mode=mode)
1531 1532 repo = rref()
1532 1533 if repo is None or not util.safehasattr(repo, b'_lockref'):
1533 1534 return
1534 1535 if mode in (None, b'r', b'rb'):
1535 1536 return
1536 1537 if path.startswith(repo.sharedpath):
1537 1538 # truncate name relative to the repository (.hg)
1538 1539 path = path[len(repo.sharedpath) + 1 :]
1539 1540 if repo._currentlock(repo._lockref) is None:
1540 1541 repo.ui.develwarn(
1541 1542 b'write with no lock: "%s"' % path, stacklevel=4
1542 1543 )
1543 1544 return ret
1544 1545
1545 1546 return checksvfs
1546 1547
1547 1548 def close(self):
1548 1549 self._writecaches()
1549 1550
1550 1551 def _writecaches(self):
1551 1552 if self._revbranchcache:
1552 1553 self._revbranchcache.write()
1553 1554
1554 1555 def _restrictcapabilities(self, caps):
1555 1556 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1556 1557 caps = set(caps)
1557 1558 capsblob = bundle2.encodecaps(
1558 1559 bundle2.getrepocaps(self, role=b'client')
1559 1560 )
1560 1561 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1561 1562 if self.ui.configbool(b'experimental', b'narrow'):
1562 1563 caps.add(wireprototypes.NARROWCAP)
1563 1564 return caps
1564 1565
1565 1566 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1566 1567 # self -> auditor -> self._checknested -> self
1567 1568
1568 1569 @property
1569 1570 def auditor(self):
1570 1571 # This is only used by context.workingctx.match in order to
1571 1572 # detect files in subrepos.
1572 1573 return pathutil.pathauditor(self.root, callback=self._checknested)
1573 1574
1574 1575 @property
1575 1576 def nofsauditor(self):
1576 1577 # This is only used by context.basectx.match in order to detect
1577 1578 # files in subrepos.
1578 1579 return pathutil.pathauditor(
1579 1580 self.root, callback=self._checknested, realfs=False, cached=True
1580 1581 )
1581 1582
1582 1583 def _checknested(self, path):
1583 1584 """Determine if path is a legal nested repository."""
1584 1585 if not path.startswith(self.root):
1585 1586 return False
1586 1587 subpath = path[len(self.root) + 1 :]
1587 1588 normsubpath = util.pconvert(subpath)
1588 1589
1589 1590 # XXX: Checking against the current working copy is wrong in
1590 1591 # the sense that it can reject things like
1591 1592 #
1592 1593 # $ hg cat -r 10 sub/x.txt
1593 1594 #
1594 1595 # if sub/ is no longer a subrepository in the working copy
1595 1596 # parent revision.
1596 1597 #
1597 1598 # However, it can of course also allow things that would have
1598 1599 # been rejected before, such as the above cat command if sub/
1599 1600 # is a subrepository now, but was a normal directory before.
1600 1601 # The old path auditor would have rejected by mistake since it
1601 1602 # panics when it sees sub/.hg/.
1602 1603 #
1603 1604 # All in all, checking against the working copy seems sensible
1604 1605 # since we want to prevent access to nested repositories on
1605 1606 # the filesystem *now*.
1606 1607 ctx = self[None]
1607 1608 parts = util.splitpath(subpath)
1608 1609 while parts:
1609 1610 prefix = b'/'.join(parts)
1610 1611 if prefix in ctx.substate:
1611 1612 if prefix == normsubpath:
1612 1613 return True
1613 1614 else:
1614 1615 sub = ctx.sub(prefix)
1615 1616 return sub.checknested(subpath[len(prefix) + 1 :])
1616 1617 else:
1617 1618 parts.pop()
1618 1619 return False
1619 1620
1620 1621 def peer(self):
1621 1622 return localpeer(self) # not cached to avoid reference cycle
1622 1623
1623 1624 def unfiltered(self):
1624 1625 """Return unfiltered version of the repository
1625 1626
1626 1627 Intended to be overwritten by filtered repo."""
1627 1628 return self
1628 1629
1629 1630 def filtered(self, name, visibilityexceptions=None):
1630 1631 """Return a filtered version of a repository
1631 1632
1632 1633 The `name` parameter is the identifier of the requested view. This
1633 1634 will return a repoview object set "exactly" to the specified view.
1634 1635
1635 1636 This function does not apply recursive filtering to a repository. For
1636 1637 example calling `repo.filtered("served")` will return a repoview using
1637 1638 the "served" view, regardless of the initial view used by `repo`.
1638 1639
1639 1640 In other word, there is always only one level of `repoview` "filtering".
1640 1641 """
1641 1642 if self._extrafilterid is not None and b'%' not in name:
1642 1643 name = name + b'%' + self._extrafilterid
1643 1644
1644 1645 cls = repoview.newtype(self.unfiltered().__class__)
1645 1646 return cls(self, name, visibilityexceptions)
1646 1647
1647 1648 @mixedrepostorecache(
1648 1649 (b'bookmarks', b'plain'),
1649 1650 (b'bookmarks.current', b'plain'),
1650 1651 (b'bookmarks', b''),
1651 1652 (b'00changelog.i', b''),
1652 1653 )
1653 1654 def _bookmarks(self):
1654 1655 # Since the multiple files involved in the transaction cannot be
1655 1656 # written atomically (with current repository format), there is a race
1656 1657 # condition here.
1657 1658 #
1658 1659 # 1) changelog content A is read
1659 1660 # 2) outside transaction update changelog to content B
1660 1661 # 3) outside transaction update bookmark file referring to content B
1661 1662 # 4) bookmarks file content is read and filtered against changelog-A
1662 1663 #
1663 1664 # When this happens, bookmarks against nodes missing from A are dropped.
1664 1665 #
1665 1666 # Having this happening during read is not great, but it become worse
1666 1667 # when this happen during write because the bookmarks to the "unknown"
1667 1668 # nodes will be dropped for good. However, writes happen within locks.
1668 1669 # This locking makes it possible to have a race free consistent read.
1669 1670 # For this purpose data read from disc before locking are
1670 1671 # "invalidated" right after the locks are taken. This invalidations are
1671 1672 # "light", the `filecache` mechanism keep the data in memory and will
1672 1673 # reuse them if the underlying files did not changed. Not parsing the
1673 1674 # same data multiple times helps performances.
1674 1675 #
1675 1676 # Unfortunately in the case describe above, the files tracked by the
1676 1677 # bookmarks file cache might not have changed, but the in-memory
1677 1678 # content is still "wrong" because we used an older changelog content
1678 1679 # to process the on-disk data. So after locking, the changelog would be
1679 1680 # refreshed but `_bookmarks` would be preserved.
1680 1681 # Adding `00changelog.i` to the list of tracked file is not
1681 1682 # enough, because at the time we build the content for `_bookmarks` in
1682 1683 # (4), the changelog file has already diverged from the content used
1683 1684 # for loading `changelog` in (1)
1684 1685 #
1685 1686 # To prevent the issue, we force the changelog to be explicitly
1686 1687 # reloaded while computing `_bookmarks`. The data race can still happen
1687 1688 # without the lock (with a narrower window), but it would no longer go
1688 1689 # undetected during the lock time refresh.
1689 1690 #
1690 1691 # The new schedule is as follow
1691 1692 #
1692 1693 # 1) filecache logic detect that `_bookmarks` needs to be computed
1693 1694 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1694 1695 # 3) We force `changelog` filecache to be tested
1695 1696 # 4) cachestat for `changelog` are captured (for changelog)
1696 1697 # 5) `_bookmarks` is computed and cached
1697 1698 #
1698 1699 # The step in (3) ensure we have a changelog at least as recent as the
1699 1700 # cache stat computed in (1). As a result at locking time:
1700 1701 # * if the changelog did not changed since (1) -> we can reuse the data
1701 1702 # * otherwise -> the bookmarks get refreshed.
1702 1703 self._refreshchangelog()
1703 1704 return bookmarks.bmstore(self)
1704 1705
1705 1706 def _refreshchangelog(self):
1706 1707 """make sure the in memory changelog match the on-disk one"""
1707 1708 if 'changelog' in vars(self) and self.currenttransaction() is None:
1708 1709 del self.changelog
1709 1710
1710 1711 @property
1711 1712 def _activebookmark(self):
1712 1713 return self._bookmarks.active
1713 1714
1714 1715 # _phasesets depend on changelog. what we need is to call
1715 1716 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1716 1717 # can't be easily expressed in filecache mechanism.
1717 1718 @storecache(b'phaseroots', b'00changelog.i')
1718 1719 def _phasecache(self):
1719 1720 return phases.phasecache(self, self._phasedefaults)
1720 1721
1721 1722 @storecache(b'obsstore')
1722 1723 def obsstore(self):
1723 1724 return obsolete.makestore(self.ui, self)
1724 1725
1725 1726 @changelogcache()
1726 1727 def changelog(repo):
1727 1728 # load dirstate before changelog to avoid race see issue6303
1728 1729 repo.dirstate.prefetch_parents()
1729 1730 return repo.store.changelog(
1730 1731 txnutil.mayhavepending(repo.root),
1731 1732 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1732 1733 )
1733 1734
1734 1735 @manifestlogcache()
1735 1736 def manifestlog(self):
1736 1737 return self.store.manifestlog(self, self._storenarrowmatch)
1737 1738
1738 1739 @repofilecache(b'dirstate')
1739 1740 def dirstate(self):
1740 1741 return self._makedirstate()
1741 1742
1742 1743 def _makedirstate(self):
1743 1744 """Extension point for wrapping the dirstate per-repo."""
1744 1745 sparsematchfn = lambda: sparse.matcher(self)
1745 1746 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1746 1747 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1747 1748 use_dirstate_v2 = v2_req in self.requirements
1748 1749 use_tracked_hint = th in self.requirements
1749 1750
1750 1751 return dirstate.dirstate(
1751 1752 self.vfs,
1752 1753 self.ui,
1753 1754 self.root,
1754 1755 self._dirstatevalidate,
1755 1756 sparsematchfn,
1756 1757 self.nodeconstants,
1757 1758 use_dirstate_v2,
1758 1759 use_tracked_hint=use_tracked_hint,
1759 1760 )
1760 1761
1761 1762 def _dirstatevalidate(self, node):
1762 1763 try:
1763 1764 self.changelog.rev(node)
1764 1765 return node
1765 1766 except error.LookupError:
1766 1767 if not self._dirstatevalidatewarned:
1767 1768 self._dirstatevalidatewarned = True
1768 1769 self.ui.warn(
1769 1770 _(b"warning: ignoring unknown working parent %s!\n")
1770 1771 % short(node)
1771 1772 )
1772 1773 return self.nullid
1773 1774
1774 1775 @storecache(narrowspec.FILENAME)
1775 1776 def narrowpats(self):
1776 1777 """matcher patterns for this repository's narrowspec
1777 1778
1778 1779 A tuple of (includes, excludes).
1779 1780 """
1780 1781 return narrowspec.load(self)
1781 1782
1782 1783 @storecache(narrowspec.FILENAME)
1783 1784 def _storenarrowmatch(self):
1784 1785 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1785 1786 return matchmod.always()
1786 1787 include, exclude = self.narrowpats
1787 1788 return narrowspec.match(self.root, include=include, exclude=exclude)
1788 1789
1789 1790 @storecache(narrowspec.FILENAME)
1790 1791 def _narrowmatch(self):
1791 1792 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1792 1793 return matchmod.always()
1793 1794 narrowspec.checkworkingcopynarrowspec(self)
1794 1795 include, exclude = self.narrowpats
1795 1796 return narrowspec.match(self.root, include=include, exclude=exclude)
1796 1797
1797 1798 def narrowmatch(self, match=None, includeexact=False):
1798 1799 """matcher corresponding the the repo's narrowspec
1799 1800
1800 1801 If `match` is given, then that will be intersected with the narrow
1801 1802 matcher.
1802 1803
1803 1804 If `includeexact` is True, then any exact matches from `match` will
1804 1805 be included even if they're outside the narrowspec.
1805 1806 """
1806 1807 if match:
1807 1808 if includeexact and not self._narrowmatch.always():
1808 1809 # do not exclude explicitly-specified paths so that they can
1809 1810 # be warned later on
1810 1811 em = matchmod.exact(match.files())
1811 1812 nm = matchmod.unionmatcher([self._narrowmatch, em])
1812 1813 return matchmod.intersectmatchers(match, nm)
1813 1814 return matchmod.intersectmatchers(match, self._narrowmatch)
1814 1815 return self._narrowmatch
1815 1816
1816 1817 def setnarrowpats(self, newincludes, newexcludes):
1817 1818 narrowspec.save(self, newincludes, newexcludes)
1818 1819 self.invalidate(clearfilecache=True)
1819 1820
1820 1821 @unfilteredpropertycache
1821 1822 def _quick_access_changeid_null(self):
1822 1823 return {
1823 1824 b'null': (nullrev, self.nodeconstants.nullid),
1824 1825 nullrev: (nullrev, self.nodeconstants.nullid),
1825 1826 self.nullid: (nullrev, self.nullid),
1826 1827 }
1827 1828
1828 1829 @unfilteredpropertycache
1829 1830 def _quick_access_changeid_wc(self):
1830 1831 # also fast path access to the working copy parents
1831 1832 # however, only do it for filter that ensure wc is visible.
1832 1833 quick = self._quick_access_changeid_null.copy()
1833 1834 cl = self.unfiltered().changelog
1834 1835 for node in self.dirstate.parents():
1835 1836 if node == self.nullid:
1836 1837 continue
1837 1838 rev = cl.index.get_rev(node)
1838 1839 if rev is None:
1839 1840 # unknown working copy parent case:
1840 1841 #
1841 1842 # skip the fast path and let higher code deal with it
1842 1843 continue
1843 1844 pair = (rev, node)
1844 1845 quick[rev] = pair
1845 1846 quick[node] = pair
1846 1847 # also add the parents of the parents
1847 1848 for r in cl.parentrevs(rev):
1848 1849 if r == nullrev:
1849 1850 continue
1850 1851 n = cl.node(r)
1851 1852 pair = (r, n)
1852 1853 quick[r] = pair
1853 1854 quick[n] = pair
1854 1855 p1node = self.dirstate.p1()
1855 1856 if p1node != self.nullid:
1856 1857 quick[b'.'] = quick[p1node]
1857 1858 return quick
1858 1859
1859 1860 @unfilteredmethod
1860 1861 def _quick_access_changeid_invalidate(self):
1861 1862 if '_quick_access_changeid_wc' in vars(self):
1862 1863 del self.__dict__['_quick_access_changeid_wc']
1863 1864
1864 1865 @property
1865 1866 def _quick_access_changeid(self):
1866 1867 """an helper dictionnary for __getitem__ calls
1867 1868
1868 1869 This contains a list of symbol we can recognise right away without
1869 1870 further processing.
1870 1871 """
1871 1872 if self.filtername in repoview.filter_has_wc:
1872 1873 return self._quick_access_changeid_wc
1873 1874 return self._quick_access_changeid_null
1874 1875
1875 1876 def __getitem__(self, changeid):
1876 1877 # dealing with special cases
1877 1878 if changeid is None:
1878 1879 return context.workingctx(self)
1879 1880 if isinstance(changeid, context.basectx):
1880 1881 return changeid
1881 1882
1882 1883 # dealing with multiple revisions
1883 1884 if isinstance(changeid, slice):
1884 1885 # wdirrev isn't contiguous so the slice shouldn't include it
1885 1886 return [
1886 1887 self[i]
1887 1888 for i in pycompat.xrange(*changeid.indices(len(self)))
1888 1889 if i not in self.changelog.filteredrevs
1889 1890 ]
1890 1891
1891 1892 # dealing with some special values
1892 1893 quick_access = self._quick_access_changeid.get(changeid)
1893 1894 if quick_access is not None:
1894 1895 rev, node = quick_access
1895 1896 return context.changectx(self, rev, node, maybe_filtered=False)
1896 1897 if changeid == b'tip':
1897 1898 node = self.changelog.tip()
1898 1899 rev = self.changelog.rev(node)
1899 1900 return context.changectx(self, rev, node)
1900 1901
1901 1902 # dealing with arbitrary values
1902 1903 try:
1903 1904 if isinstance(changeid, int):
1904 1905 node = self.changelog.node(changeid)
1905 1906 rev = changeid
1906 1907 elif changeid == b'.':
1907 1908 # this is a hack to delay/avoid loading obsmarkers
1908 1909 # when we know that '.' won't be hidden
1909 1910 node = self.dirstate.p1()
1910 1911 rev = self.unfiltered().changelog.rev(node)
1911 1912 elif len(changeid) == self.nodeconstants.nodelen:
1912 1913 try:
1913 1914 node = changeid
1914 1915 rev = self.changelog.rev(changeid)
1915 1916 except error.FilteredLookupError:
1916 1917 changeid = hex(changeid) # for the error message
1917 1918 raise
1918 1919 except LookupError:
1919 1920 # check if it might have come from damaged dirstate
1920 1921 #
1921 1922 # XXX we could avoid the unfiltered if we had a recognizable
1922 1923 # exception for filtered changeset access
1923 1924 if (
1924 1925 self.local()
1925 1926 and changeid in self.unfiltered().dirstate.parents()
1926 1927 ):
1927 1928 msg = _(b"working directory has unknown parent '%s'!")
1928 1929 raise error.Abort(msg % short(changeid))
1929 1930 changeid = hex(changeid) # for the error message
1930 1931 raise
1931 1932
1932 1933 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1933 1934 node = bin(changeid)
1934 1935 rev = self.changelog.rev(node)
1935 1936 else:
1936 1937 raise error.ProgrammingError(
1937 1938 b"unsupported changeid '%s' of type %s"
1938 1939 % (changeid, pycompat.bytestr(type(changeid)))
1939 1940 )
1940 1941
1941 1942 return context.changectx(self, rev, node)
1942 1943
1943 1944 except (error.FilteredIndexError, error.FilteredLookupError):
1944 1945 raise error.FilteredRepoLookupError(
1945 1946 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1946 1947 )
1947 1948 except (IndexError, LookupError):
1948 1949 raise error.RepoLookupError(
1949 1950 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1950 1951 )
1951 1952 except error.WdirUnsupported:
1952 1953 return context.workingctx(self)
1953 1954
1954 1955 def __contains__(self, changeid):
1955 1956 """True if the given changeid exists"""
1956 1957 try:
1957 1958 self[changeid]
1958 1959 return True
1959 1960 except error.RepoLookupError:
1960 1961 return False
1961 1962
1962 1963 def __nonzero__(self):
1963 1964 return True
1964 1965
1965 1966 __bool__ = __nonzero__
1966 1967
1967 1968 def __len__(self):
1968 1969 # no need to pay the cost of repoview.changelog
1969 1970 unfi = self.unfiltered()
1970 1971 return len(unfi.changelog)
1971 1972
1972 1973 def __iter__(self):
1973 1974 return iter(self.changelog)
1974 1975
1975 1976 def revs(self, expr, *args):
1976 1977 """Find revisions matching a revset.
1977 1978
1978 1979 The revset is specified as a string ``expr`` that may contain
1979 1980 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1980 1981
1981 1982 Revset aliases from the configuration are not expanded. To expand
1982 1983 user aliases, consider calling ``scmutil.revrange()`` or
1983 1984 ``repo.anyrevs([expr], user=True)``.
1984 1985
1985 1986 Returns a smartset.abstractsmartset, which is a list-like interface
1986 1987 that contains integer revisions.
1987 1988 """
1988 1989 tree = revsetlang.spectree(expr, *args)
1989 1990 return revset.makematcher(tree)(self)
1990 1991
1991 1992 def set(self, expr, *args):
1992 1993 """Find revisions matching a revset and emit changectx instances.
1993 1994
1994 1995 This is a convenience wrapper around ``revs()`` that iterates the
1995 1996 result and is a generator of changectx instances.
1996 1997
1997 1998 Revset aliases from the configuration are not expanded. To expand
1998 1999 user aliases, consider calling ``scmutil.revrange()``.
1999 2000 """
2000 2001 for r in self.revs(expr, *args):
2001 2002 yield self[r]
2002 2003
2003 2004 def anyrevs(self, specs, user=False, localalias=None):
2004 2005 """Find revisions matching one of the given revsets.
2005 2006
2006 2007 Revset aliases from the configuration are not expanded by default. To
2007 2008 expand user aliases, specify ``user=True``. To provide some local
2008 2009 definitions overriding user aliases, set ``localalias`` to
2009 2010 ``{name: definitionstring}``.
2010 2011 """
2011 2012 if specs == [b'null']:
2012 2013 return revset.baseset([nullrev])
2013 2014 if specs == [b'.']:
2014 2015 quick_data = self._quick_access_changeid.get(b'.')
2015 2016 if quick_data is not None:
2016 2017 return revset.baseset([quick_data[0]])
2017 2018 if user:
2018 2019 m = revset.matchany(
2019 2020 self.ui,
2020 2021 specs,
2021 2022 lookup=revset.lookupfn(self),
2022 2023 localalias=localalias,
2023 2024 )
2024 2025 else:
2025 2026 m = revset.matchany(None, specs, localalias=localalias)
2026 2027 return m(self)
2027 2028
2028 2029 def url(self):
2029 2030 return b'file:' + self.root
2030 2031
2031 2032 def hook(self, name, throw=False, **args):
2032 2033 """Call a hook, passing this repo instance.
2033 2034
2034 2035 This a convenience method to aid invoking hooks. Extensions likely
2035 2036 won't call this unless they have registered a custom hook or are
2036 2037 replacing code that is expected to call a hook.
2037 2038 """
2038 2039 return hook.hook(self.ui, self, name, throw, **args)
2039 2040
2040 2041 @filteredpropertycache
2041 2042 def _tagscache(self):
2042 2043 """Returns a tagscache object that contains various tags related
2043 2044 caches."""
2044 2045
2045 2046 # This simplifies its cache management by having one decorated
2046 2047 # function (this one) and the rest simply fetch things from it.
2047 2048 class tagscache:
2048 2049 def __init__(self):
2049 2050 # These two define the set of tags for this repository. tags
2050 2051 # maps tag name to node; tagtypes maps tag name to 'global' or
2051 2052 # 'local'. (Global tags are defined by .hgtags across all
2052 2053 # heads, and local tags are defined in .hg/localtags.)
2053 2054 # They constitute the in-memory cache of tags.
2054 2055 self.tags = self.tagtypes = None
2055 2056
2056 2057 self.nodetagscache = self.tagslist = None
2057 2058
2058 2059 cache = tagscache()
2059 2060 cache.tags, cache.tagtypes = self._findtags()
2060 2061
2061 2062 return cache
2062 2063
2063 2064 def tags(self):
2064 2065 '''return a mapping of tag to node'''
2065 2066 t = {}
2066 2067 if self.changelog.filteredrevs:
2067 2068 tags, tt = self._findtags()
2068 2069 else:
2069 2070 tags = self._tagscache.tags
2070 2071 rev = self.changelog.rev
2071 2072 for k, v in tags.items():
2072 2073 try:
2073 2074 # ignore tags to unknown nodes
2074 2075 rev(v)
2075 2076 t[k] = v
2076 2077 except (error.LookupError, ValueError):
2077 2078 pass
2078 2079 return t
2079 2080
2080 2081 def _findtags(self):
2081 2082 """Do the hard work of finding tags. Return a pair of dicts
2082 2083 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2083 2084 maps tag name to a string like \'global\' or \'local\'.
2084 2085 Subclasses or extensions are free to add their own tags, but
2085 2086 should be aware that the returned dicts will be retained for the
2086 2087 duration of the localrepo object."""
2087 2088
2088 2089 # XXX what tagtype should subclasses/extensions use? Currently
2089 2090 # mq and bookmarks add tags, but do not set the tagtype at all.
2090 2091 # Should each extension invent its own tag type? Should there
2091 2092 # be one tagtype for all such "virtual" tags? Or is the status
2092 2093 # quo fine?
2093 2094
2094 2095 # map tag name to (node, hist)
2095 2096 alltags = tagsmod.findglobaltags(self.ui, self)
2096 2097 # map tag name to tag type
2097 2098 tagtypes = {tag: b'global' for tag in alltags}
2098 2099
2099 2100 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2100 2101
2101 2102 # Build the return dicts. Have to re-encode tag names because
2102 2103 # the tags module always uses UTF-8 (in order not to lose info
2103 2104 # writing to the cache), but the rest of Mercurial wants them in
2104 2105 # local encoding.
2105 2106 tags = {}
2106 2107 for (name, (node, hist)) in alltags.items():
2107 2108 if node != self.nullid:
2108 2109 tags[encoding.tolocal(name)] = node
2109 2110 tags[b'tip'] = self.changelog.tip()
2110 2111 tagtypes = {
2111 2112 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2112 2113 }
2113 2114 return (tags, tagtypes)
2114 2115
2115 2116 def tagtype(self, tagname):
2116 2117 """
2117 2118 return the type of the given tag. result can be:
2118 2119
2119 2120 'local' : a local tag
2120 2121 'global' : a global tag
2121 2122 None : tag does not exist
2122 2123 """
2123 2124
2124 2125 return self._tagscache.tagtypes.get(tagname)
2125 2126
2126 2127 def tagslist(self):
2127 2128 '''return a list of tags ordered by revision'''
2128 2129 if not self._tagscache.tagslist:
2129 2130 l = []
2130 2131 for t, n in self.tags().items():
2131 2132 l.append((self.changelog.rev(n), t, n))
2132 2133 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2133 2134
2134 2135 return self._tagscache.tagslist
2135 2136
2136 2137 def nodetags(self, node):
2137 2138 '''return the tags associated with a node'''
2138 2139 if not self._tagscache.nodetagscache:
2139 2140 nodetagscache = {}
2140 2141 for t, n in self._tagscache.tags.items():
2141 2142 nodetagscache.setdefault(n, []).append(t)
2142 2143 for tags in nodetagscache.values():
2143 2144 tags.sort()
2144 2145 self._tagscache.nodetagscache = nodetagscache
2145 2146 return self._tagscache.nodetagscache.get(node, [])
2146 2147
2147 2148 def nodebookmarks(self, node):
2148 2149 """return the list of bookmarks pointing to the specified node"""
2149 2150 return self._bookmarks.names(node)
2150 2151
2151 2152 def branchmap(self):
2152 2153 """returns a dictionary {branch: [branchheads]} with branchheads
2153 2154 ordered by increasing revision number"""
2154 2155 return self._branchcaches[self]
2155 2156
2156 2157 @unfilteredmethod
2157 2158 def revbranchcache(self):
2158 2159 if not self._revbranchcache:
2159 2160 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2160 2161 return self._revbranchcache
2161 2162
2162 2163 def register_changeset(self, rev, changelogrevision):
2163 2164 self.revbranchcache().setdata(rev, changelogrevision)
2164 2165
2165 2166 def branchtip(self, branch, ignoremissing=False):
2166 2167 """return the tip node for a given branch
2167 2168
2168 2169 If ignoremissing is True, then this method will not raise an error.
2169 2170 This is helpful for callers that only expect None for a missing branch
2170 2171 (e.g. namespace).
2171 2172
2172 2173 """
2173 2174 try:
2174 2175 return self.branchmap().branchtip(branch)
2175 2176 except KeyError:
2176 2177 if not ignoremissing:
2177 2178 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2178 2179 else:
2179 2180 pass
2180 2181
2181 2182 def lookup(self, key):
2182 2183 node = scmutil.revsymbol(self, key).node()
2183 2184 if node is None:
2184 2185 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2185 2186 return node
2186 2187
2187 2188 def lookupbranch(self, key):
2188 2189 if self.branchmap().hasbranch(key):
2189 2190 return key
2190 2191
2191 2192 return scmutil.revsymbol(self, key).branch()
2192 2193
2193 2194 def known(self, nodes):
2194 2195 cl = self.changelog
2195 2196 get_rev = cl.index.get_rev
2196 2197 filtered = cl.filteredrevs
2197 2198 result = []
2198 2199 for n in nodes:
2199 2200 r = get_rev(n)
2200 2201 resp = not (r is None or r in filtered)
2201 2202 result.append(resp)
2202 2203 return result
2203 2204
2204 2205 def local(self):
2205 2206 return self
2206 2207
2207 2208 def publishing(self):
2208 2209 # it's safe (and desirable) to trust the publish flag unconditionally
2209 2210 # so that we don't finalize changes shared between users via ssh or nfs
2210 2211 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2211 2212
2212 2213 def cancopy(self):
2213 2214 # so statichttprepo's override of local() works
2214 2215 if not self.local():
2215 2216 return False
2216 2217 if not self.publishing():
2217 2218 return True
2218 2219 # if publishing we can't copy if there is filtered content
2219 2220 return not self.filtered(b'visible').changelog.filteredrevs
2220 2221
2221 2222 def shared(self):
2222 2223 '''the type of shared repository (None if not shared)'''
2223 2224 if self.sharedpath != self.path:
2224 2225 return b'store'
2225 2226 return None
2226 2227
2227 2228 def wjoin(self, f, *insidef):
2228 2229 return self.vfs.reljoin(self.root, f, *insidef)
2229 2230
2230 2231 def setparents(self, p1, p2=None):
2231 2232 if p2 is None:
2232 2233 p2 = self.nullid
2233 2234 self[None].setparents(p1, p2)
2234 2235 self._quick_access_changeid_invalidate()
2235 2236
2236 2237 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2237 2238 """changeid must be a changeset revision, if specified.
2238 2239 fileid can be a file revision or node."""
2239 2240 return context.filectx(
2240 2241 self, path, changeid, fileid, changectx=changectx
2241 2242 )
2242 2243
2243 2244 def getcwd(self):
2244 2245 return self.dirstate.getcwd()
2245 2246
2246 2247 def pathto(self, f, cwd=None):
2247 2248 return self.dirstate.pathto(f, cwd)
2248 2249
2249 2250 def _loadfilter(self, filter):
2250 2251 if filter not in self._filterpats:
2251 2252 l = []
2252 2253 for pat, cmd in self.ui.configitems(filter):
2253 2254 if cmd == b'!':
2254 2255 continue
2255 2256 mf = matchmod.match(self.root, b'', [pat])
2256 2257 fn = None
2257 2258 params = cmd
2258 2259 for name, filterfn in self._datafilters.items():
2259 2260 if cmd.startswith(name):
2260 2261 fn = filterfn
2261 2262 params = cmd[len(name) :].lstrip()
2262 2263 break
2263 2264 if not fn:
2264 2265 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2265 2266 fn.__name__ = 'commandfilter'
2266 2267 # Wrap old filters not supporting keyword arguments
2267 2268 if not pycompat.getargspec(fn)[2]:
2268 2269 oldfn = fn
2269 2270 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2270 2271 fn.__name__ = 'compat-' + oldfn.__name__
2271 2272 l.append((mf, fn, params))
2272 2273 self._filterpats[filter] = l
2273 2274 return self._filterpats[filter]
2274 2275
2275 2276 def _filter(self, filterpats, filename, data):
2276 2277 for mf, fn, cmd in filterpats:
2277 2278 if mf(filename):
2278 2279 self.ui.debug(
2279 2280 b"filtering %s through %s\n"
2280 2281 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2281 2282 )
2282 2283 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2283 2284 break
2284 2285
2285 2286 return data
2286 2287
2287 2288 @unfilteredpropertycache
2288 2289 def _encodefilterpats(self):
2289 2290 return self._loadfilter(b'encode')
2290 2291
2291 2292 @unfilteredpropertycache
2292 2293 def _decodefilterpats(self):
2293 2294 return self._loadfilter(b'decode')
2294 2295
2295 2296 def adddatafilter(self, name, filter):
2296 2297 self._datafilters[name] = filter
2297 2298
2298 2299 def wread(self, filename):
2299 2300 if self.wvfs.islink(filename):
2300 2301 data = self.wvfs.readlink(filename)
2301 2302 else:
2302 2303 data = self.wvfs.read(filename)
2303 2304 return self._filter(self._encodefilterpats, filename, data)
2304 2305
2305 2306 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2306 2307 """write ``data`` into ``filename`` in the working directory
2307 2308
2308 2309 This returns length of written (maybe decoded) data.
2309 2310 """
2310 2311 data = self._filter(self._decodefilterpats, filename, data)
2311 2312 if b'l' in flags:
2312 2313 self.wvfs.symlink(data, filename)
2313 2314 else:
2314 2315 self.wvfs.write(
2315 2316 filename, data, backgroundclose=backgroundclose, **kwargs
2316 2317 )
2317 2318 if b'x' in flags:
2318 2319 self.wvfs.setflags(filename, False, True)
2319 2320 else:
2320 2321 self.wvfs.setflags(filename, False, False)
2321 2322 return len(data)
2322 2323
2323 2324 def wwritedata(self, filename, data):
2324 2325 return self._filter(self._decodefilterpats, filename, data)
2325 2326
2326 2327 def currenttransaction(self):
2327 2328 """return the current transaction or None if non exists"""
2328 2329 if self._transref:
2329 2330 tr = self._transref()
2330 2331 else:
2331 2332 tr = None
2332 2333
2333 2334 if tr and tr.running():
2334 2335 return tr
2335 2336 return None
2336 2337
2337 2338 def transaction(self, desc, report=None):
2338 2339 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2339 2340 b'devel', b'check-locks'
2340 2341 ):
2341 2342 if self._currentlock(self._lockref) is None:
2342 2343 raise error.ProgrammingError(b'transaction requires locking')
2343 2344 tr = self.currenttransaction()
2344 2345 if tr is not None:
2345 2346 return tr.nest(name=desc)
2346 2347
2347 2348 # abort here if the journal already exists
2348 2349 if self.svfs.exists(b"journal"):
2349 2350 raise error.RepoError(
2350 2351 _(b"abandoned transaction found"),
2351 2352 hint=_(b"run 'hg recover' to clean up transaction"),
2352 2353 )
2353 2354
2354 2355 idbase = b"%.40f#%f" % (random.random(), time.time())
2355 2356 ha = hex(hashutil.sha1(idbase).digest())
2356 2357 txnid = b'TXN:' + ha
2357 2358 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2358 2359
2359 2360 self._writejournal(desc)
2360 2361 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2361 2362 if report:
2362 2363 rp = report
2363 2364 else:
2364 2365 rp = self.ui.warn
2365 2366 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2366 2367 # we must avoid cyclic reference between repo and transaction.
2367 2368 reporef = weakref.ref(self)
2368 2369 # Code to track tag movement
2369 2370 #
2370 2371 # Since tags are all handled as file content, it is actually quite hard
2371 2372 # to track these movement from a code perspective. So we fallback to a
2372 2373 # tracking at the repository level. One could envision to track changes
2373 2374 # to the '.hgtags' file through changegroup apply but that fails to
2374 2375 # cope with case where transaction expose new heads without changegroup
2375 2376 # being involved (eg: phase movement).
2376 2377 #
2377 2378 # For now, We gate the feature behind a flag since this likely comes
2378 2379 # with performance impacts. The current code run more often than needed
2379 2380 # and do not use caches as much as it could. The current focus is on
2380 2381 # the behavior of the feature so we disable it by default. The flag
2381 2382 # will be removed when we are happy with the performance impact.
2382 2383 #
2383 2384 # Once this feature is no longer experimental move the following
2384 2385 # documentation to the appropriate help section:
2385 2386 #
2386 2387 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2387 2388 # tags (new or changed or deleted tags). In addition the details of
2388 2389 # these changes are made available in a file at:
2389 2390 # ``REPOROOT/.hg/changes/tags.changes``.
2390 2391 # Make sure you check for HG_TAG_MOVED before reading that file as it
2391 2392 # might exist from a previous transaction even if no tag were touched
2392 2393 # in this one. Changes are recorded in a line base format::
2393 2394 #
2394 2395 # <action> <hex-node> <tag-name>\n
2395 2396 #
2396 2397 # Actions are defined as follow:
2397 2398 # "-R": tag is removed,
2398 2399 # "+A": tag is added,
2399 2400 # "-M": tag is moved (old value),
2400 2401 # "+M": tag is moved (new value),
2401 2402 tracktags = lambda x: None
2402 2403 # experimental config: experimental.hook-track-tags
2403 2404 shouldtracktags = self.ui.configbool(
2404 2405 b'experimental', b'hook-track-tags'
2405 2406 )
2406 2407 if desc != b'strip' and shouldtracktags:
2407 2408 oldheads = self.changelog.headrevs()
2408 2409
2409 2410 def tracktags(tr2):
2410 2411 repo = reporef()
2411 2412 assert repo is not None # help pytype
2412 2413 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2413 2414 newheads = repo.changelog.headrevs()
2414 2415 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2415 2416 # notes: we compare lists here.
2416 2417 # As we do it only once buiding set would not be cheaper
2417 2418 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2418 2419 if changes:
2419 2420 tr2.hookargs[b'tag_moved'] = b'1'
2420 2421 with repo.vfs(
2421 2422 b'changes/tags.changes', b'w', atomictemp=True
2422 2423 ) as changesfile:
2423 2424 # note: we do not register the file to the transaction
2424 2425 # because we needs it to still exist on the transaction
2425 2426 # is close (for txnclose hooks)
2426 2427 tagsmod.writediff(changesfile, changes)
2427 2428
2428 2429 def validate(tr2):
2429 2430 """will run pre-closing hooks"""
2430 2431 # XXX the transaction API is a bit lacking here so we take a hacky
2431 2432 # path for now
2432 2433 #
2433 2434 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2434 2435 # dict is copied before these run. In addition we needs the data
2435 2436 # available to in memory hooks too.
2436 2437 #
2437 2438 # Moreover, we also need to make sure this runs before txnclose
2438 2439 # hooks and there is no "pending" mechanism that would execute
2439 2440 # logic only if hooks are about to run.
2440 2441 #
2441 2442 # Fixing this limitation of the transaction is also needed to track
2442 2443 # other families of changes (bookmarks, phases, obsolescence).
2443 2444 #
2444 2445 # This will have to be fixed before we remove the experimental
2445 2446 # gating.
2446 2447 tracktags(tr2)
2447 2448 repo = reporef()
2448 2449 assert repo is not None # help pytype
2449 2450
2450 2451 singleheadopt = (b'experimental', b'single-head-per-branch')
2451 2452 singlehead = repo.ui.configbool(*singleheadopt)
2452 2453 if singlehead:
2453 2454 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2454 2455 accountclosed = singleheadsub.get(
2455 2456 b"account-closed-heads", False
2456 2457 )
2457 2458 if singleheadsub.get(b"public-changes-only", False):
2458 2459 filtername = b"immutable"
2459 2460 else:
2460 2461 filtername = b"visible"
2461 2462 scmutil.enforcesinglehead(
2462 2463 repo, tr2, desc, accountclosed, filtername
2463 2464 )
2464 2465 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2465 2466 for name, (old, new) in sorted(
2466 2467 tr.changes[b'bookmarks'].items()
2467 2468 ):
2468 2469 args = tr.hookargs.copy()
2469 2470 args.update(bookmarks.preparehookargs(name, old, new))
2470 2471 repo.hook(
2471 2472 b'pretxnclose-bookmark',
2472 2473 throw=True,
2473 2474 **pycompat.strkwargs(args)
2474 2475 )
2475 2476 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2476 2477 cl = repo.unfiltered().changelog
2477 2478 for revs, (old, new) in tr.changes[b'phases']:
2478 2479 for rev in revs:
2479 2480 args = tr.hookargs.copy()
2480 2481 node = hex(cl.node(rev))
2481 2482 args.update(phases.preparehookargs(node, old, new))
2482 2483 repo.hook(
2483 2484 b'pretxnclose-phase',
2484 2485 throw=True,
2485 2486 **pycompat.strkwargs(args)
2486 2487 )
2487 2488
2488 2489 repo.hook(
2489 2490 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2490 2491 )
2491 2492
2492 2493 def releasefn(tr, success):
2493 2494 repo = reporef()
2494 2495 if repo is None:
2495 2496 # If the repo has been GC'd (and this release function is being
2496 2497 # called from transaction.__del__), there's not much we can do,
2497 2498 # so just leave the unfinished transaction there and let the
2498 2499 # user run `hg recover`.
2499 2500 return
2500 2501 if success:
2501 2502 # this should be explicitly invoked here, because
2502 2503 # in-memory changes aren't written out at closing
2503 2504 # transaction, if tr.addfilegenerator (via
2504 2505 # dirstate.write or so) isn't invoked while
2505 2506 # transaction running
2506 2507 repo.dirstate.write(None)
2507 2508 else:
2508 2509 # discard all changes (including ones already written
2509 2510 # out) in this transaction
2510 2511 narrowspec.restorebackup(self, b'journal.narrowspec')
2511 2512 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2512 2513 repo.dirstate.restorebackup(None, b'journal.dirstate')
2513 2514
2514 2515 repo.invalidate(clearfilecache=True)
2515 2516
2516 2517 tr = transaction.transaction(
2517 2518 rp,
2518 2519 self.svfs,
2519 2520 vfsmap,
2520 2521 b"journal",
2521 2522 b"undo",
2522 2523 aftertrans(renames),
2523 2524 self.store.createmode,
2524 2525 validator=validate,
2525 2526 releasefn=releasefn,
2526 2527 checkambigfiles=_cachedfiles,
2527 2528 name=desc,
2528 2529 )
2529 2530 tr.changes[b'origrepolen'] = len(self)
2530 2531 tr.changes[b'obsmarkers'] = set()
2531 2532 tr.changes[b'phases'] = []
2532 2533 tr.changes[b'bookmarks'] = {}
2533 2534
2534 2535 tr.hookargs[b'txnid'] = txnid
2535 2536 tr.hookargs[b'txnname'] = desc
2536 2537 tr.hookargs[b'changes'] = tr.changes
2537 2538 # note: writing the fncache only during finalize mean that the file is
2538 2539 # outdated when running hooks. As fncache is used for streaming clone,
2539 2540 # this is not expected to break anything that happen during the hooks.
2540 2541 tr.addfinalize(b'flush-fncache', self.store.write)
2541 2542
2542 2543 def txnclosehook(tr2):
2543 2544 """To be run if transaction is successful, will schedule a hook run"""
2544 2545 # Don't reference tr2 in hook() so we don't hold a reference.
2545 2546 # This reduces memory consumption when there are multiple
2546 2547 # transactions per lock. This can likely go away if issue5045
2547 2548 # fixes the function accumulation.
2548 2549 hookargs = tr2.hookargs
2549 2550
2550 2551 def hookfunc(unused_success):
2551 2552 repo = reporef()
2552 2553 assert repo is not None # help pytype
2553 2554
2554 2555 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2555 2556 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2556 2557 for name, (old, new) in bmchanges:
2557 2558 args = tr.hookargs.copy()
2558 2559 args.update(bookmarks.preparehookargs(name, old, new))
2559 2560 repo.hook(
2560 2561 b'txnclose-bookmark',
2561 2562 throw=False,
2562 2563 **pycompat.strkwargs(args)
2563 2564 )
2564 2565
2565 2566 if hook.hashook(repo.ui, b'txnclose-phase'):
2566 2567 cl = repo.unfiltered().changelog
2567 2568 phasemv = sorted(
2568 2569 tr.changes[b'phases'], key=lambda r: r[0][0]
2569 2570 )
2570 2571 for revs, (old, new) in phasemv:
2571 2572 for rev in revs:
2572 2573 args = tr.hookargs.copy()
2573 2574 node = hex(cl.node(rev))
2574 2575 args.update(phases.preparehookargs(node, old, new))
2575 2576 repo.hook(
2576 2577 b'txnclose-phase',
2577 2578 throw=False,
2578 2579 **pycompat.strkwargs(args)
2579 2580 )
2580 2581
2581 2582 repo.hook(
2582 2583 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2583 2584 )
2584 2585
2585 2586 repo = reporef()
2586 2587 assert repo is not None # help pytype
2587 2588 repo._afterlock(hookfunc)
2588 2589
2589 2590 tr.addfinalize(b'txnclose-hook', txnclosehook)
2590 2591 # Include a leading "-" to make it happen before the transaction summary
2591 2592 # reports registered via scmutil.registersummarycallback() whose names
2592 2593 # are 00-txnreport etc. That way, the caches will be warm when the
2593 2594 # callbacks run.
2594 2595 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2595 2596
2596 2597 def txnaborthook(tr2):
2597 2598 """To be run if transaction is aborted"""
2598 2599 repo = reporef()
2599 2600 assert repo is not None # help pytype
2600 2601 repo.hook(
2601 2602 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2602 2603 )
2603 2604
2604 2605 tr.addabort(b'txnabort-hook', txnaborthook)
2605 2606 # avoid eager cache invalidation. in-memory data should be identical
2606 2607 # to stored data if transaction has no error.
2607 2608 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2608 2609 self._transref = weakref.ref(tr)
2609 2610 scmutil.registersummarycallback(self, tr, desc)
2610 2611 return tr
2611 2612
2612 2613 def _journalfiles(self):
2613 2614 return (
2614 2615 (self.svfs, b'journal'),
2615 2616 (self.svfs, b'journal.narrowspec'),
2616 2617 (self.vfs, b'journal.narrowspec.dirstate'),
2617 2618 (self.vfs, b'journal.dirstate'),
2618 2619 (self.vfs, b'journal.branch'),
2619 2620 (self.vfs, b'journal.desc'),
2620 2621 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2621 2622 (self.svfs, b'journal.phaseroots'),
2622 2623 )
2623 2624
2624 2625 def undofiles(self):
2625 2626 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2626 2627
2627 2628 @unfilteredmethod
2628 2629 def _writejournal(self, desc):
2629 2630 self.dirstate.savebackup(None, b'journal.dirstate')
2630 2631 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2631 2632 narrowspec.savebackup(self, b'journal.narrowspec')
2632 2633 self.vfs.write(
2633 2634 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2634 2635 )
2635 2636 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2636 2637 bookmarksvfs = bookmarks.bookmarksvfs(self)
2637 2638 bookmarksvfs.write(
2638 2639 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2639 2640 )
2640 2641 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2641 2642
2642 2643 def recover(self):
2643 2644 with self.lock():
2644 2645 if self.svfs.exists(b"journal"):
2645 2646 self.ui.status(_(b"rolling back interrupted transaction\n"))
2646 2647 vfsmap = {
2647 2648 b'': self.svfs,
2648 2649 b'plain': self.vfs,
2649 2650 }
2650 2651 transaction.rollback(
2651 2652 self.svfs,
2652 2653 vfsmap,
2653 2654 b"journal",
2654 2655 self.ui.warn,
2655 2656 checkambigfiles=_cachedfiles,
2656 2657 )
2657 2658 self.invalidate()
2658 2659 return True
2659 2660 else:
2660 2661 self.ui.warn(_(b"no interrupted transaction available\n"))
2661 2662 return False
2662 2663
2663 2664 def rollback(self, dryrun=False, force=False):
2664 2665 wlock = lock = dsguard = None
2665 2666 try:
2666 2667 wlock = self.wlock()
2667 2668 lock = self.lock()
2668 2669 if self.svfs.exists(b"undo"):
2669 2670 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2670 2671
2671 2672 return self._rollback(dryrun, force, dsguard)
2672 2673 else:
2673 2674 self.ui.warn(_(b"no rollback information available\n"))
2674 2675 return 1
2675 2676 finally:
2676 2677 release(dsguard, lock, wlock)
2677 2678
2678 2679 @unfilteredmethod # Until we get smarter cache management
2679 2680 def _rollback(self, dryrun, force, dsguard):
2680 2681 ui = self.ui
2681 2682 try:
2682 2683 args = self.vfs.read(b'undo.desc').splitlines()
2683 2684 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2684 2685 if len(args) >= 3:
2685 2686 detail = args[2]
2686 2687 oldtip = oldlen - 1
2687 2688
2688 2689 if detail and ui.verbose:
2689 2690 msg = _(
2690 2691 b'repository tip rolled back to revision %d'
2691 2692 b' (undo %s: %s)\n'
2692 2693 ) % (oldtip, desc, detail)
2693 2694 else:
2694 2695 msg = _(
2695 2696 b'repository tip rolled back to revision %d (undo %s)\n'
2696 2697 ) % (oldtip, desc)
2697 2698 except IOError:
2698 2699 msg = _(b'rolling back unknown transaction\n')
2699 2700 desc = None
2700 2701
2701 2702 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2702 2703 raise error.Abort(
2703 2704 _(
2704 2705 b'rollback of last commit while not checked out '
2705 2706 b'may lose data'
2706 2707 ),
2707 2708 hint=_(b'use -f to force'),
2708 2709 )
2709 2710
2710 2711 ui.status(msg)
2711 2712 if dryrun:
2712 2713 return 0
2713 2714
2714 2715 parents = self.dirstate.parents()
2715 2716 self.destroying()
2716 2717 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2717 2718 transaction.rollback(
2718 2719 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2719 2720 )
2720 2721 bookmarksvfs = bookmarks.bookmarksvfs(self)
2721 2722 if bookmarksvfs.exists(b'undo.bookmarks'):
2722 2723 bookmarksvfs.rename(
2723 2724 b'undo.bookmarks', b'bookmarks', checkambig=True
2724 2725 )
2725 2726 if self.svfs.exists(b'undo.phaseroots'):
2726 2727 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2727 2728 self.invalidate()
2728 2729
2729 2730 has_node = self.changelog.index.has_node
2730 2731 parentgone = any(not has_node(p) for p in parents)
2731 2732 if parentgone:
2732 2733 # prevent dirstateguard from overwriting already restored one
2733 2734 dsguard.close()
2734 2735
2735 2736 narrowspec.restorebackup(self, b'undo.narrowspec')
2736 2737 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2737 2738 self.dirstate.restorebackup(None, b'undo.dirstate')
2738 2739 try:
2739 2740 branch = self.vfs.read(b'undo.branch')
2740 2741 self.dirstate.setbranch(encoding.tolocal(branch))
2741 2742 except IOError:
2742 2743 ui.warn(
2743 2744 _(
2744 2745 b'named branch could not be reset: '
2745 2746 b'current branch is still \'%s\'\n'
2746 2747 )
2747 2748 % self.dirstate.branch()
2748 2749 )
2749 2750
2750 2751 parents = tuple([p.rev() for p in self[None].parents()])
2751 2752 if len(parents) > 1:
2752 2753 ui.status(
2753 2754 _(
2754 2755 b'working directory now based on '
2755 2756 b'revisions %d and %d\n'
2756 2757 )
2757 2758 % parents
2758 2759 )
2759 2760 else:
2760 2761 ui.status(
2761 2762 _(b'working directory now based on revision %d\n') % parents
2762 2763 )
2763 2764 mergestatemod.mergestate.clean(self)
2764 2765
2765 2766 # TODO: if we know which new heads may result from this rollback, pass
2766 2767 # them to destroy(), which will prevent the branchhead cache from being
2767 2768 # invalidated.
2768 2769 self.destroyed()
2769 2770 return 0
2770 2771
2771 2772 def _buildcacheupdater(self, newtransaction):
2772 2773 """called during transaction to build the callback updating cache
2773 2774
2774 2775 Lives on the repository to help extension who might want to augment
2775 2776 this logic. For this purpose, the created transaction is passed to the
2776 2777 method.
2777 2778 """
2778 2779 # we must avoid cyclic reference between repo and transaction.
2779 2780 reporef = weakref.ref(self)
2780 2781
2781 2782 def updater(tr):
2782 2783 repo = reporef()
2783 2784 assert repo is not None # help pytype
2784 2785 repo.updatecaches(tr)
2785 2786
2786 2787 return updater
2787 2788
2788 2789 @unfilteredmethod
2789 2790 def updatecaches(self, tr=None, full=False, caches=None):
2790 2791 """warm appropriate caches
2791 2792
2792 2793 If this function is called after a transaction closed. The transaction
2793 2794 will be available in the 'tr' argument. This can be used to selectively
2794 2795 update caches relevant to the changes in that transaction.
2795 2796
2796 2797 If 'full' is set, make sure all caches the function knows about have
2797 2798 up-to-date data. Even the ones usually loaded more lazily.
2798 2799
2799 2800 The `full` argument can take a special "post-clone" value. In this case
2800 2801 the cache warming is made after a clone and of the slower cache might
2801 2802 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2802 2803 as we plan for a cleaner way to deal with this for 5.9.
2803 2804 """
2804 2805 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2805 2806 # During strip, many caches are invalid but
2806 2807 # later call to `destroyed` will refresh them.
2807 2808 return
2808 2809
2809 2810 unfi = self.unfiltered()
2810 2811
2811 2812 if full:
2812 2813 msg = (
2813 2814 "`full` argument for `repo.updatecaches` is deprecated\n"
2814 2815 "(use `caches=repository.CACHE_ALL` instead)"
2815 2816 )
2816 2817 self.ui.deprecwarn(msg, b"5.9")
2817 2818 caches = repository.CACHES_ALL
2818 2819 if full == b"post-clone":
2819 2820 caches = repository.CACHES_POST_CLONE
2820 2821 caches = repository.CACHES_ALL
2821 2822 elif caches is None:
2822 2823 caches = repository.CACHES_DEFAULT
2823 2824
2824 2825 if repository.CACHE_BRANCHMAP_SERVED in caches:
2825 2826 if tr is None or tr.changes[b'origrepolen'] < len(self):
2826 2827 # accessing the 'served' branchmap should refresh all the others,
2827 2828 self.ui.debug(b'updating the branch cache\n')
2828 2829 self.filtered(b'served').branchmap()
2829 2830 self.filtered(b'served.hidden').branchmap()
2830 2831 # flush all possibly delayed write.
2831 2832 self._branchcaches.write_delayed(self)
2832 2833
2833 2834 if repository.CACHE_CHANGELOG_CACHE in caches:
2834 2835 self.changelog.update_caches(transaction=tr)
2835 2836
2836 2837 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2837 2838 self.manifestlog.update_caches(transaction=tr)
2838 2839
2839 2840 if repository.CACHE_REV_BRANCH in caches:
2840 2841 rbc = unfi.revbranchcache()
2841 2842 for r in unfi.changelog:
2842 2843 rbc.branchinfo(r)
2843 2844 rbc.write()
2844 2845
2845 2846 if repository.CACHE_FULL_MANIFEST in caches:
2846 2847 # ensure the working copy parents are in the manifestfulltextcache
2847 2848 for ctx in self[b'.'].parents():
2848 2849 ctx.manifest() # accessing the manifest is enough
2849 2850
2850 2851 if repository.CACHE_FILE_NODE_TAGS in caches:
2851 2852 # accessing fnode cache warms the cache
2852 2853 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2853 2854
2854 2855 if repository.CACHE_TAGS_DEFAULT in caches:
2855 2856 # accessing tags warm the cache
2856 2857 self.tags()
2857 2858 if repository.CACHE_TAGS_SERVED in caches:
2858 2859 self.filtered(b'served').tags()
2859 2860
2860 2861 if repository.CACHE_BRANCHMAP_ALL in caches:
2861 2862 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2862 2863 # so we're forcing a write to cause these caches to be warmed up
2863 2864 # even if they haven't explicitly been requested yet (if they've
2864 2865 # never been used by hg, they won't ever have been written, even if
2865 2866 # they're a subset of another kind of cache that *has* been used).
2866 2867 for filt in repoview.filtertable.keys():
2867 2868 filtered = self.filtered(filt)
2868 2869 filtered.branchmap().write(filtered)
2869 2870
2870 2871 def invalidatecaches(self):
2871 2872
2872 2873 if '_tagscache' in vars(self):
2873 2874 # can't use delattr on proxy
2874 2875 del self.__dict__['_tagscache']
2875 2876
2876 2877 self._branchcaches.clear()
2877 2878 self.invalidatevolatilesets()
2878 2879 self._sparsesignaturecache.clear()
2879 2880
2880 2881 def invalidatevolatilesets(self):
2881 2882 self.filteredrevcache.clear()
2882 2883 obsolete.clearobscaches(self)
2883 2884 self._quick_access_changeid_invalidate()
2884 2885
2885 2886 def invalidatedirstate(self):
2886 2887 """Invalidates the dirstate, causing the next call to dirstate
2887 2888 to check if it was modified since the last time it was read,
2888 2889 rereading it if it has.
2889 2890
2890 2891 This is different to dirstate.invalidate() that it doesn't always
2891 2892 rereads the dirstate. Use dirstate.invalidate() if you want to
2892 2893 explicitly read the dirstate again (i.e. restoring it to a previous
2893 2894 known good state)."""
2894 2895 if hasunfilteredcache(self, 'dirstate'):
2895 2896 for k in self.dirstate._filecache:
2896 2897 try:
2897 2898 delattr(self.dirstate, k)
2898 2899 except AttributeError:
2899 2900 pass
2900 2901 delattr(self.unfiltered(), 'dirstate')
2901 2902
2902 2903 def invalidate(self, clearfilecache=False):
2903 2904 """Invalidates both store and non-store parts other than dirstate
2904 2905
2905 2906 If a transaction is running, invalidation of store is omitted,
2906 2907 because discarding in-memory changes might cause inconsistency
2907 2908 (e.g. incomplete fncache causes unintentional failure, but
2908 2909 redundant one doesn't).
2909 2910 """
2910 2911 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2911 2912 for k in list(self._filecache.keys()):
2912 2913 # dirstate is invalidated separately in invalidatedirstate()
2913 2914 if k == b'dirstate':
2914 2915 continue
2915 2916 if (
2916 2917 k == b'changelog'
2917 2918 and self.currenttransaction()
2918 2919 and self.changelog._delayed
2919 2920 ):
2920 2921 # The changelog object may store unwritten revisions. We don't
2921 2922 # want to lose them.
2922 2923 # TODO: Solve the problem instead of working around it.
2923 2924 continue
2924 2925
2925 2926 if clearfilecache:
2926 2927 del self._filecache[k]
2927 2928 try:
2928 2929 delattr(unfiltered, k)
2929 2930 except AttributeError:
2930 2931 pass
2931 2932 self.invalidatecaches()
2932 2933 if not self.currenttransaction():
2933 2934 # TODO: Changing contents of store outside transaction
2934 2935 # causes inconsistency. We should make in-memory store
2935 2936 # changes detectable, and abort if changed.
2936 2937 self.store.invalidatecaches()
2937 2938
2938 2939 def invalidateall(self):
2939 2940 """Fully invalidates both store and non-store parts, causing the
2940 2941 subsequent operation to reread any outside changes."""
2941 2942 # extension should hook this to invalidate its caches
2942 2943 self.invalidate()
2943 2944 self.invalidatedirstate()
2944 2945
2945 2946 @unfilteredmethod
2946 2947 def _refreshfilecachestats(self, tr):
2947 2948 """Reload stats of cached files so that they are flagged as valid"""
2948 2949 for k, ce in self._filecache.items():
2949 2950 k = pycompat.sysstr(k)
2950 2951 if k == 'dirstate' or k not in self.__dict__:
2951 2952 continue
2952 2953 ce.refresh()
2953 2954
2954 2955 def _lock(
2955 2956 self,
2956 2957 vfs,
2957 2958 lockname,
2958 2959 wait,
2959 2960 releasefn,
2960 2961 acquirefn,
2961 2962 desc,
2962 2963 ):
2963 2964 timeout = 0
2964 2965 warntimeout = 0
2965 2966 if wait:
2966 2967 timeout = self.ui.configint(b"ui", b"timeout")
2967 2968 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2968 2969 # internal config: ui.signal-safe-lock
2969 2970 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2970 2971
2971 2972 l = lockmod.trylock(
2972 2973 self.ui,
2973 2974 vfs,
2974 2975 lockname,
2975 2976 timeout,
2976 2977 warntimeout,
2977 2978 releasefn=releasefn,
2978 2979 acquirefn=acquirefn,
2979 2980 desc=desc,
2980 2981 signalsafe=signalsafe,
2981 2982 )
2982 2983 return l
2983 2984
2984 2985 def _afterlock(self, callback):
2985 2986 """add a callback to be run when the repository is fully unlocked
2986 2987
2987 2988 The callback will be executed when the outermost lock is released
2988 2989 (with wlock being higher level than 'lock')."""
2989 2990 for ref in (self._wlockref, self._lockref):
2990 2991 l = ref and ref()
2991 2992 if l and l.held:
2992 2993 l.postrelease.append(callback)
2993 2994 break
2994 2995 else: # no lock have been found.
2995 2996 callback(True)
2996 2997
2997 2998 def lock(self, wait=True):
2998 2999 """Lock the repository store (.hg/store) and return a weak reference
2999 3000 to the lock. Use this before modifying the store (e.g. committing or
3000 3001 stripping). If you are opening a transaction, get a lock as well.)
3001 3002
3002 3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 3004 'wlock' first to avoid a dead-lock hazard."""
3004 3005 l = self._currentlock(self._lockref)
3005 3006 if l is not None:
3006 3007 l.lock()
3007 3008 return l
3008 3009
3009 3010 l = self._lock(
3010 3011 vfs=self.svfs,
3011 3012 lockname=b"lock",
3012 3013 wait=wait,
3013 3014 releasefn=None,
3014 3015 acquirefn=self.invalidate,
3015 3016 desc=_(b'repository %s') % self.origroot,
3016 3017 )
3017 3018 self._lockref = weakref.ref(l)
3018 3019 return l
3019 3020
3020 3021 def wlock(self, wait=True):
3021 3022 """Lock the non-store parts of the repository (everything under
3022 3023 .hg except .hg/store) and return a weak reference to the lock.
3023 3024
3024 3025 Use this before modifying files in .hg.
3025 3026
3026 3027 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3027 3028 'wlock' first to avoid a dead-lock hazard."""
3028 3029 l = self._wlockref() if self._wlockref else None
3029 3030 if l is not None and l.held:
3030 3031 l.lock()
3031 3032 return l
3032 3033
3033 3034 # We do not need to check for non-waiting lock acquisition. Such
3034 3035 # acquisition would not cause dead-lock as they would just fail.
3035 3036 if wait and (
3036 3037 self.ui.configbool(b'devel', b'all-warnings')
3037 3038 or self.ui.configbool(b'devel', b'check-locks')
3038 3039 ):
3039 3040 if self._currentlock(self._lockref) is not None:
3040 3041 self.ui.develwarn(b'"wlock" acquired after "lock"')
3041 3042
3042 3043 def unlock():
3043 3044 if self.dirstate.pendingparentchange():
3044 3045 self.dirstate.invalidate()
3045 3046 else:
3046 3047 self.dirstate.write(None)
3047 3048
3048 3049 self._filecache[b'dirstate'].refresh()
3049 3050
3050 3051 l = self._lock(
3051 3052 self.vfs,
3052 3053 b"wlock",
3053 3054 wait,
3054 3055 unlock,
3055 3056 self.invalidatedirstate,
3056 3057 _(b'working directory of %s') % self.origroot,
3057 3058 )
3058 3059 self._wlockref = weakref.ref(l)
3059 3060 return l
3060 3061
3061 3062 def _currentlock(self, lockref):
3062 3063 """Returns the lock if it's held, or None if it's not."""
3063 3064 if lockref is None:
3064 3065 return None
3065 3066 l = lockref()
3066 3067 if l is None or not l.held:
3067 3068 return None
3068 3069 return l
3069 3070
3070 3071 def currentwlock(self):
3071 3072 """Returns the wlock if it's held, or None if it's not."""
3072 3073 return self._currentlock(self._wlockref)
3073 3074
3074 3075 def checkcommitpatterns(self, wctx, match, status, fail):
3075 3076 """check for commit arguments that aren't committable"""
3076 3077 if match.isexact() or match.prefix():
3077 3078 matched = set(status.modified + status.added + status.removed)
3078 3079
3079 3080 for f in match.files():
3080 3081 f = self.dirstate.normalize(f)
3081 3082 if f == b'.' or f in matched or f in wctx.substate:
3082 3083 continue
3083 3084 if f in status.deleted:
3084 3085 fail(f, _(b'file not found!'))
3085 3086 # Is it a directory that exists or used to exist?
3086 3087 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3087 3088 d = f + b'/'
3088 3089 for mf in matched:
3089 3090 if mf.startswith(d):
3090 3091 break
3091 3092 else:
3092 3093 fail(f, _(b"no match under directory!"))
3093 3094 elif f not in self.dirstate:
3094 3095 fail(f, _(b"file not tracked!"))
3095 3096
3096 3097 @unfilteredmethod
3097 3098 def commit(
3098 3099 self,
3099 3100 text=b"",
3100 3101 user=None,
3101 3102 date=None,
3102 3103 match=None,
3103 3104 force=False,
3104 3105 editor=None,
3105 3106 extra=None,
3106 3107 ):
3107 3108 """Add a new revision to current repository.
3108 3109
3109 3110 Revision information is gathered from the working directory,
3110 3111 match can be used to filter the committed files. If editor is
3111 3112 supplied, it is called to get a commit message.
3112 3113 """
3113 3114 if extra is None:
3114 3115 extra = {}
3115 3116
3116 3117 def fail(f, msg):
3117 3118 raise error.InputError(b'%s: %s' % (f, msg))
3118 3119
3119 3120 if not match:
3120 3121 match = matchmod.always()
3121 3122
3122 3123 if not force:
3123 3124 match.bad = fail
3124 3125
3125 3126 # lock() for recent changelog (see issue4368)
3126 3127 with self.wlock(), self.lock():
3127 3128 wctx = self[None]
3128 3129 merge = len(wctx.parents()) > 1
3129 3130
3130 3131 if not force and merge and not match.always():
3131 3132 raise error.Abort(
3132 3133 _(
3133 3134 b'cannot partially commit a merge '
3134 3135 b'(do not specify files or patterns)'
3135 3136 )
3136 3137 )
3137 3138
3138 3139 status = self.status(match=match, clean=force)
3139 3140 if force:
3140 3141 status.modified.extend(
3141 3142 status.clean
3142 3143 ) # mq may commit clean files
3143 3144
3144 3145 # check subrepos
3145 3146 subs, commitsubs, newstate = subrepoutil.precommit(
3146 3147 self.ui, wctx, status, match, force=force
3147 3148 )
3148 3149
3149 3150 # make sure all explicit patterns are matched
3150 3151 if not force:
3151 3152 self.checkcommitpatterns(wctx, match, status, fail)
3152 3153
3153 3154 cctx = context.workingcommitctx(
3154 3155 self, status, text, user, date, extra
3155 3156 )
3156 3157
3157 3158 ms = mergestatemod.mergestate.read(self)
3158 3159 mergeutil.checkunresolved(ms)
3159 3160
3160 3161 # internal config: ui.allowemptycommit
3161 3162 if cctx.isempty() and not self.ui.configbool(
3162 3163 b'ui', b'allowemptycommit'
3163 3164 ):
3164 3165 self.ui.debug(b'nothing to commit, clearing merge state\n')
3165 3166 ms.reset()
3166 3167 return None
3167 3168
3168 3169 if merge and cctx.deleted():
3169 3170 raise error.Abort(_(b"cannot commit merge with missing files"))
3170 3171
3171 3172 if editor:
3172 3173 cctx._text = editor(self, cctx, subs)
3173 3174 edited = text != cctx._text
3174 3175
3175 3176 # Save commit message in case this transaction gets rolled back
3176 3177 # (e.g. by a pretxncommit hook). Leave the content alone on
3177 3178 # the assumption that the user will use the same editor again.
3178 3179 msg_path = self.savecommitmessage(cctx._text)
3179 3180
3180 3181 # commit subs and write new state
3181 3182 if subs:
3182 3183 uipathfn = scmutil.getuipathfn(self)
3183 3184 for s in sorted(commitsubs):
3184 3185 sub = wctx.sub(s)
3185 3186 self.ui.status(
3186 3187 _(b'committing subrepository %s\n')
3187 3188 % uipathfn(subrepoutil.subrelpath(sub))
3188 3189 )
3189 3190 sr = sub.commit(cctx._text, user, date)
3190 3191 newstate[s] = (newstate[s][0], sr)
3191 3192 subrepoutil.writestate(self, newstate)
3192 3193
3193 3194 p1, p2 = self.dirstate.parents()
3194 3195 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3195 3196 try:
3196 3197 self.hook(
3197 3198 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3198 3199 )
3199 3200 with self.transaction(b'commit'):
3200 3201 ret = self.commitctx(cctx, True)
3201 3202 # update bookmarks, dirstate and mergestate
3202 3203 bookmarks.update(self, [p1, p2], ret)
3203 3204 cctx.markcommitted(ret)
3204 3205 ms.reset()
3205 3206 except: # re-raises
3206 3207 if edited:
3207 3208 self.ui.write(
3208 3209 _(b'note: commit message saved in %s\n') % msg_path
3209 3210 )
3210 3211 self.ui.write(
3211 3212 _(
3212 3213 b"note: use 'hg commit --logfile "
3213 3214 b"%s --edit' to reuse it\n"
3214 3215 )
3215 3216 % msg_path
3216 3217 )
3217 3218 raise
3218 3219
3219 3220 def commithook(unused_success):
3220 3221 # hack for command that use a temporary commit (eg: histedit)
3221 3222 # temporary commit got stripped before hook release
3222 3223 if self.changelog.hasnode(ret):
3223 3224 self.hook(
3224 3225 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 3226 )
3226 3227
3227 3228 self._afterlock(commithook)
3228 3229 return ret
3229 3230
3230 3231 @unfilteredmethod
3231 3232 def commitctx(self, ctx, error=False, origctx=None):
3232 3233 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233 3234
3234 3235 @unfilteredmethod
3235 3236 def destroying(self):
3236 3237 """Inform the repository that nodes are about to be destroyed.
3237 3238 Intended for use by strip and rollback, so there's a common
3238 3239 place for anything that has to be done before destroying history.
3239 3240
3240 3241 This is mostly useful for saving state that is in memory and waiting
3241 3242 to be flushed when the current lock is released. Because a call to
3242 3243 destroyed is imminent, the repo will be invalidated causing those
3243 3244 changes to stay in memory (waiting for the next unlock), or vanish
3244 3245 completely.
3245 3246 """
3246 3247 # When using the same lock to commit and strip, the phasecache is left
3247 3248 # dirty after committing. Then when we strip, the repo is invalidated,
3248 3249 # causing those changes to disappear.
3249 3250 if '_phasecache' in vars(self):
3250 3251 self._phasecache.write()
3251 3252
3252 3253 @unfilteredmethod
3253 3254 def destroyed(self):
3254 3255 """Inform the repository that nodes have been destroyed.
3255 3256 Intended for use by strip and rollback, so there's a common
3256 3257 place for anything that has to be done after destroying history.
3257 3258 """
3258 3259 # When one tries to:
3259 3260 # 1) destroy nodes thus calling this method (e.g. strip)
3260 3261 # 2) use phasecache somewhere (e.g. commit)
3261 3262 #
3262 3263 # then 2) will fail because the phasecache contains nodes that were
3263 3264 # removed. We can either remove phasecache from the filecache,
3264 3265 # causing it to reload next time it is accessed, or simply filter
3265 3266 # the removed nodes now and write the updated cache.
3266 3267 self._phasecache.filterunknown(self)
3267 3268 self._phasecache.write()
3268 3269
3269 3270 # refresh all repository caches
3270 3271 self.updatecaches()
3271 3272
3272 3273 # Ensure the persistent tag cache is updated. Doing it now
3273 3274 # means that the tag cache only has to worry about destroyed
3274 3275 # heads immediately after a strip/rollback. That in turn
3275 3276 # guarantees that "cachetip == currenttip" (comparing both rev
3276 3277 # and node) always means no nodes have been added or destroyed.
3277 3278
3278 3279 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 3280 # head, refresh the tag cache, then immediately add a new head.
3280 3281 # But I think doing it this way is necessary for the "instant
3281 3282 # tag cache retrieval" case to work.
3282 3283 self.invalidate()
3283 3284
3284 3285 def status(
3285 3286 self,
3286 3287 node1=b'.',
3287 3288 node2=None,
3288 3289 match=None,
3289 3290 ignored=False,
3290 3291 clean=False,
3291 3292 unknown=False,
3292 3293 listsubrepos=False,
3293 3294 ):
3294 3295 '''a convenience method that calls node1.status(node2)'''
3295 3296 return self[node1].status(
3296 3297 node2, match, ignored, clean, unknown, listsubrepos
3297 3298 )
3298 3299
3299 3300 def addpostdsstatus(self, ps):
3300 3301 """Add a callback to run within the wlock, at the point at which status
3301 3302 fixups happen.
3302 3303
3303 3304 On status completion, callback(wctx, status) will be called with the
3304 3305 wlock held, unless the dirstate has changed from underneath or the wlock
3305 3306 couldn't be grabbed.
3306 3307
3307 3308 Callbacks should not capture and use a cached copy of the dirstate --
3308 3309 it might change in the meanwhile. Instead, they should access the
3309 3310 dirstate via wctx.repo().dirstate.
3310 3311
3311 3312 This list is emptied out after each status run -- extensions should
3312 3313 make sure it adds to this list each time dirstate.status is called.
3313 3314 Extensions should also make sure they don't call this for statuses
3314 3315 that don't involve the dirstate.
3315 3316 """
3316 3317
3317 3318 # The list is located here for uniqueness reasons -- it is actually
3318 3319 # managed by the workingctx, but that isn't unique per-repo.
3319 3320 self._postdsstatus.append(ps)
3320 3321
3321 3322 def postdsstatus(self):
3322 3323 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 3324 return self._postdsstatus
3324 3325
3325 3326 def clearpostdsstatus(self):
3326 3327 """Used by workingctx to clear post-dirstate-status hooks."""
3327 3328 del self._postdsstatus[:]
3328 3329
3329 3330 def heads(self, start=None):
3330 3331 if start is None:
3331 3332 cl = self.changelog
3332 3333 headrevs = reversed(cl.headrevs())
3333 3334 return [cl.node(rev) for rev in headrevs]
3334 3335
3335 3336 heads = self.changelog.heads(start)
3336 3337 # sort the output in rev descending order
3337 3338 return sorted(heads, key=self.changelog.rev, reverse=True)
3338 3339
3339 3340 def branchheads(self, branch=None, start=None, closed=False):
3340 3341 """return a (possibly filtered) list of heads for the given branch
3341 3342
3342 3343 Heads are returned in topological order, from newest to oldest.
3343 3344 If branch is None, use the dirstate branch.
3344 3345 If start is not None, return only heads reachable from start.
3345 3346 If closed is True, return heads that are marked as closed as well.
3346 3347 """
3347 3348 if branch is None:
3348 3349 branch = self[None].branch()
3349 3350 branches = self.branchmap()
3350 3351 if not branches.hasbranch(branch):
3351 3352 return []
3352 3353 # the cache returns heads ordered lowest to highest
3353 3354 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 3355 if start is not None:
3355 3356 # filter out the heads that cannot be reached from startrev
3356 3357 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 3358 bheads = [h for h in bheads if h in fbheads]
3358 3359 return bheads
3359 3360
3360 3361 def branches(self, nodes):
3361 3362 if not nodes:
3362 3363 nodes = [self.changelog.tip()]
3363 3364 b = []
3364 3365 for n in nodes:
3365 3366 t = n
3366 3367 while True:
3367 3368 p = self.changelog.parents(n)
3368 3369 if p[1] != self.nullid or p[0] == self.nullid:
3369 3370 b.append((t, n, p[0], p[1]))
3370 3371 break
3371 3372 n = p[0]
3372 3373 return b
3373 3374
3374 3375 def between(self, pairs):
3375 3376 r = []
3376 3377
3377 3378 for top, bottom in pairs:
3378 3379 n, l, i = top, [], 0
3379 3380 f = 1
3380 3381
3381 3382 while n != bottom and n != self.nullid:
3382 3383 p = self.changelog.parents(n)[0]
3383 3384 if i == f:
3384 3385 l.append(n)
3385 3386 f = f * 2
3386 3387 n = p
3387 3388 i += 1
3388 3389
3389 3390 r.append(l)
3390 3391
3391 3392 return r
3392 3393
3393 3394 def checkpush(self, pushop):
3394 3395 """Extensions can override this function if additional checks have
3395 3396 to be performed before pushing, or call it if they override push
3396 3397 command.
3397 3398 """
3398 3399
3399 3400 @unfilteredpropertycache
3400 3401 def prepushoutgoinghooks(self):
3401 3402 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 3403 methods, which are called before pushing changesets.
3403 3404 """
3404 3405 return util.hooks()
3405 3406
3406 3407 def pushkey(self, namespace, key, old, new):
3407 3408 try:
3408 3409 tr = self.currenttransaction()
3409 3410 hookargs = {}
3410 3411 if tr is not None:
3411 3412 hookargs.update(tr.hookargs)
3412 3413 hookargs = pycompat.strkwargs(hookargs)
3413 3414 hookargs['namespace'] = namespace
3414 3415 hookargs['key'] = key
3415 3416 hookargs['old'] = old
3416 3417 hookargs['new'] = new
3417 3418 self.hook(b'prepushkey', throw=True, **hookargs)
3418 3419 except error.HookAbort as exc:
3419 3420 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 3421 if exc.hint:
3421 3422 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 3423 return False
3423 3424 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 3425 ret = pushkey.push(self, namespace, key, old, new)
3425 3426
3426 3427 def runhook(unused_success):
3427 3428 self.hook(
3428 3429 b'pushkey',
3429 3430 namespace=namespace,
3430 3431 key=key,
3431 3432 old=old,
3432 3433 new=new,
3433 3434 ret=ret,
3434 3435 )
3435 3436
3436 3437 self._afterlock(runhook)
3437 3438 return ret
3438 3439
3439 3440 def listkeys(self, namespace):
3440 3441 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 3442 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 3443 values = pushkey.list(self, namespace)
3443 3444 self.hook(b'listkeys', namespace=namespace, values=values)
3444 3445 return values
3445 3446
3446 3447 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 3448 '''used to test argument passing over the wire'''
3448 3449 return b"%s %s %s %s %s" % (
3449 3450 one,
3450 3451 two,
3451 3452 pycompat.bytestr(three),
3452 3453 pycompat.bytestr(four),
3453 3454 pycompat.bytestr(five),
3454 3455 )
3455 3456
3456 3457 def savecommitmessage(self, text):
3457 3458 fp = self.vfs(b'last-message.txt', b'wb')
3458 3459 try:
3459 3460 fp.write(text)
3460 3461 finally:
3461 3462 fp.close()
3462 3463 return self.pathto(fp.name[len(self.root) + 1 :])
3463 3464
3464 3465 def register_wanted_sidedata(self, category):
3465 3466 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 3467 # Only revlogv2 repos can want sidedata.
3467 3468 return
3468 3469 self._wanted_sidedata.add(pycompat.bytestr(category))
3469 3470
3470 3471 def register_sidedata_computer(
3471 3472 self, kind, category, keys, computer, flags, replace=False
3472 3473 ):
3473 3474 if kind not in revlogconst.ALL_KINDS:
3474 3475 msg = _(b"unexpected revlog kind '%s'.")
3475 3476 raise error.ProgrammingError(msg % kind)
3476 3477 category = pycompat.bytestr(category)
3477 3478 already_registered = category in self._sidedata_computers.get(kind, [])
3478 3479 if already_registered and not replace:
3479 3480 msg = _(
3480 3481 b"cannot register a sidedata computer twice for category '%s'."
3481 3482 )
3482 3483 raise error.ProgrammingError(msg % category)
3483 3484 if replace and not already_registered:
3484 3485 msg = _(
3485 3486 b"cannot replace a sidedata computer that isn't registered "
3486 3487 b"for category '%s'."
3487 3488 )
3488 3489 raise error.ProgrammingError(msg % category)
3489 3490 self._sidedata_computers.setdefault(kind, {})
3490 3491 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491 3492
3492 3493
3493 3494 # used to avoid circular references so destructors work
3494 3495 def aftertrans(files):
3495 3496 renamefiles = [tuple(t) for t in files]
3496 3497
3497 3498 def a():
3498 3499 for vfs, src, dest in renamefiles:
3499 3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 3501 # leaving both src and dest on disk. delete dest to make sure
3501 3502 # the rename couldn't be such a no-op.
3502 3503 vfs.tryunlink(dest)
3503 3504 try:
3504 3505 vfs.rename(src, dest)
3505 3506 except OSError as exc: # journal file does not yet exist
3506 3507 if exc.errno != errno.ENOENT:
3507 3508 raise
3508 3509
3509 3510 return a
3510 3511
3511 3512
3512 3513 def undoname(fn):
3513 3514 base, name = os.path.split(fn)
3514 3515 assert name.startswith(b'journal')
3515 3516 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516 3517
3517 3518
3518 3519 def instance(ui, path, create, intents=None, createopts=None):
3519 3520
3520 3521 # prevent cyclic import localrepo -> upgrade -> localrepo
3521 3522 from . import upgrade
3522 3523
3523 3524 localpath = urlutil.urllocalpath(path)
3524 3525 if create:
3525 3526 createrepository(ui, localpath, createopts=createopts)
3526 3527
3527 3528 def repo_maker():
3528 3529 return makelocalrepository(ui, localpath, intents=intents)
3529 3530
3530 3531 repo = repo_maker()
3531 3532 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3532 3533 return repo
3533 3534
3534 3535
3535 3536 def islocal(path):
3536 3537 return True
3537 3538
3538 3539
3539 3540 def defaultcreateopts(ui, createopts=None):
3540 3541 """Populate the default creation options for a repository.
3541 3542
3542 3543 A dictionary of explicitly requested creation options can be passed
3543 3544 in. Missing keys will be populated.
3544 3545 """
3545 3546 createopts = dict(createopts or {})
3546 3547
3547 3548 if b'backend' not in createopts:
3548 3549 # experimental config: storage.new-repo-backend
3549 3550 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3550 3551
3551 3552 return createopts
3552 3553
3553 3554
3554 3555 def clone_requirements(ui, createopts, srcrepo):
3555 3556 """clone the requirements of a local repo for a local clone
3556 3557
3557 3558 The store requirements are unchanged while the working copy requirements
3558 3559 depends on the configuration
3559 3560 """
3560 3561 target_requirements = set()
3561 3562 if not srcrepo.requirements:
3562 3563 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3563 3564 # with it.
3564 3565 return target_requirements
3565 3566 createopts = defaultcreateopts(ui, createopts=createopts)
3566 3567 for r in newreporequirements(ui, createopts):
3567 3568 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3568 3569 target_requirements.add(r)
3569 3570
3570 3571 for r in srcrepo.requirements:
3571 3572 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3572 3573 target_requirements.add(r)
3573 3574 return target_requirements
3574 3575
3575 3576
3576 3577 def newreporequirements(ui, createopts):
3577 3578 """Determine the set of requirements for a new local repository.
3578 3579
3579 3580 Extensions can wrap this function to specify custom requirements for
3580 3581 new repositories.
3581 3582 """
3582 3583
3583 3584 if b'backend' not in createopts:
3584 3585 raise error.ProgrammingError(
3585 3586 b'backend key not present in createopts; '
3586 3587 b'was defaultcreateopts() called?'
3587 3588 )
3588 3589
3589 3590 if createopts[b'backend'] != b'revlogv1':
3590 3591 raise error.Abort(
3591 3592 _(
3592 3593 b'unable to determine repository requirements for '
3593 3594 b'storage backend: %s'
3594 3595 )
3595 3596 % createopts[b'backend']
3596 3597 )
3597 3598
3598 3599 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3599 3600 if ui.configbool(b'format', b'usestore'):
3600 3601 requirements.add(requirementsmod.STORE_REQUIREMENT)
3601 3602 if ui.configbool(b'format', b'usefncache'):
3602 3603 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3603 3604 if ui.configbool(b'format', b'dotencode'):
3604 3605 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3605 3606
3606 3607 compengines = ui.configlist(b'format', b'revlog-compression')
3607 3608 for compengine in compengines:
3608 3609 if compengine in util.compengines:
3609 3610 engine = util.compengines[compengine]
3610 3611 if engine.available() and engine.revlogheader():
3611 3612 break
3612 3613 else:
3613 3614 raise error.Abort(
3614 3615 _(
3615 3616 b'compression engines %s defined by '
3616 3617 b'format.revlog-compression not available'
3617 3618 )
3618 3619 % b', '.join(b'"%s"' % e for e in compengines),
3619 3620 hint=_(
3620 3621 b'run "hg debuginstall" to list available '
3621 3622 b'compression engines'
3622 3623 ),
3623 3624 )
3624 3625
3625 3626 # zlib is the historical default and doesn't need an explicit requirement.
3626 3627 if compengine == b'zstd':
3627 3628 requirements.add(b'revlog-compression-zstd')
3628 3629 elif compengine != b'zlib':
3629 3630 requirements.add(b'exp-compression-%s' % compengine)
3630 3631
3631 3632 if scmutil.gdinitconfig(ui):
3632 3633 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3633 3634 if ui.configbool(b'format', b'sparse-revlog'):
3634 3635 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3635 3636
3636 3637 # experimental config: format.use-dirstate-v2
3637 3638 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3638 3639 if ui.configbool(b'format', b'use-dirstate-v2'):
3639 3640 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3640 3641
3641 3642 # experimental config: format.exp-use-copies-side-data-changeset
3642 3643 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3643 3644 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3644 3645 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3645 3646 if ui.configbool(b'experimental', b'treemanifest'):
3646 3647 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3647 3648
3648 3649 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3649 3650 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3650 3651 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3651 3652
3652 3653 revlogv2 = ui.config(b'experimental', b'revlogv2')
3653 3654 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3654 3655 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3655 3656 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3656 3657 # experimental config: format.internal-phase
3657 3658 if ui.configbool(b'format', b'internal-phase'):
3658 3659 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3659 3660
3660 3661 if createopts.get(b'narrowfiles'):
3661 3662 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3662 3663
3663 3664 if createopts.get(b'lfs'):
3664 3665 requirements.add(b'lfs')
3665 3666
3666 3667 if ui.configbool(b'format', b'bookmarks-in-store'):
3667 3668 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3668 3669
3669 3670 if ui.configbool(b'format', b'use-persistent-nodemap'):
3670 3671 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3671 3672
3672 3673 # if share-safe is enabled, let's create the new repository with the new
3673 3674 # requirement
3674 3675 if ui.configbool(b'format', b'use-share-safe'):
3675 3676 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3676 3677
3677 3678 # if we are creating a share-repoΒΉ we have to handle requirement
3678 3679 # differently.
3679 3680 #
3680 3681 # [1] (i.e. reusing the store from another repository, just having a
3681 3682 # working copy)
3682 3683 if b'sharedrepo' in createopts:
3683 3684 source_requirements = set(createopts[b'sharedrepo'].requirements)
3684 3685
3685 3686 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3686 3687 # share to an old school repository, we have to copy the
3687 3688 # requirements and hope for the best.
3688 3689 requirements = source_requirements
3689 3690 else:
3690 3691 # We have control on the working copy only, so "copy" the non
3691 3692 # working copy part over, ignoring previous logic.
3692 3693 to_drop = set()
3693 3694 for req in requirements:
3694 3695 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3695 3696 continue
3696 3697 if req in source_requirements:
3697 3698 continue
3698 3699 to_drop.add(req)
3699 3700 requirements -= to_drop
3700 3701 requirements |= source_requirements
3701 3702
3702 3703 if createopts.get(b'sharedrelative'):
3703 3704 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3704 3705 else:
3705 3706 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3706 3707
3707 3708 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3708 3709 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3709 3710 msg = _("ignoring unknown tracked key version: %d\n")
3710 3711 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3711 3712 if version != 1:
3712 3713 ui.warn(msg % version, hint=hint)
3713 3714 else:
3714 3715 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3715 3716
3716 3717 return requirements
3717 3718
3718 3719
3719 3720 def checkrequirementscompat(ui, requirements):
3720 3721 """Checks compatibility of repository requirements enabled and disabled.
3721 3722
3722 3723 Returns a set of requirements which needs to be dropped because dependend
3723 3724 requirements are not enabled. Also warns users about it"""
3724 3725
3725 3726 dropped = set()
3726 3727
3727 3728 if requirementsmod.STORE_REQUIREMENT not in requirements:
3728 3729 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3729 3730 ui.warn(
3730 3731 _(
3731 3732 b'ignoring enabled \'format.bookmarks-in-store\' config '
3732 3733 b'beacuse it is incompatible with disabled '
3733 3734 b'\'format.usestore\' config\n'
3734 3735 )
3735 3736 )
3736 3737 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3737 3738
3738 3739 if (
3739 3740 requirementsmod.SHARED_REQUIREMENT in requirements
3740 3741 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3741 3742 ):
3742 3743 raise error.Abort(
3743 3744 _(
3744 3745 b"cannot create shared repository as source was created"
3745 3746 b" with 'format.usestore' config disabled"
3746 3747 )
3747 3748 )
3748 3749
3749 3750 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3750 3751 if ui.hasconfig(b'format', b'use-share-safe'):
3751 3752 msg = _(
3752 3753 b"ignoring enabled 'format.use-share-safe' config because "
3753 3754 b"it is incompatible with disabled 'format.usestore'"
3754 3755 b" config\n"
3755 3756 )
3756 3757 ui.warn(msg)
3757 3758 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3758 3759
3759 3760 return dropped
3760 3761
3761 3762
3762 3763 def filterknowncreateopts(ui, createopts):
3763 3764 """Filters a dict of repo creation options against options that are known.
3764 3765
3765 3766 Receives a dict of repo creation options and returns a dict of those
3766 3767 options that we don't know how to handle.
3767 3768
3768 3769 This function is called as part of repository creation. If the
3769 3770 returned dict contains any items, repository creation will not
3770 3771 be allowed, as it means there was a request to create a repository
3771 3772 with options not recognized by loaded code.
3772 3773
3773 3774 Extensions can wrap this function to filter out creation options
3774 3775 they know how to handle.
3775 3776 """
3776 3777 known = {
3777 3778 b'backend',
3778 3779 b'lfs',
3779 3780 b'narrowfiles',
3780 3781 b'sharedrepo',
3781 3782 b'sharedrelative',
3782 3783 b'shareditems',
3783 3784 b'shallowfilestore',
3784 3785 }
3785 3786
3786 3787 return {k: v for k, v in createopts.items() if k not in known}
3787 3788
3788 3789
3789 3790 def createrepository(ui, path, createopts=None, requirements=None):
3790 3791 """Create a new repository in a vfs.
3791 3792
3792 3793 ``path`` path to the new repo's working directory.
3793 3794 ``createopts`` options for the new repository.
3794 3795 ``requirement`` predefined set of requirements.
3795 3796 (incompatible with ``createopts``)
3796 3797
3797 3798 The following keys for ``createopts`` are recognized:
3798 3799
3799 3800 backend
3800 3801 The storage backend to use.
3801 3802 lfs
3802 3803 Repository will be created with ``lfs`` requirement. The lfs extension
3803 3804 will automatically be loaded when the repository is accessed.
3804 3805 narrowfiles
3805 3806 Set up repository to support narrow file storage.
3806 3807 sharedrepo
3807 3808 Repository object from which storage should be shared.
3808 3809 sharedrelative
3809 3810 Boolean indicating if the path to the shared repo should be
3810 3811 stored as relative. By default, the pointer to the "parent" repo
3811 3812 is stored as an absolute path.
3812 3813 shareditems
3813 3814 Set of items to share to the new repository (in addition to storage).
3814 3815 shallowfilestore
3815 3816 Indicates that storage for files should be shallow (not all ancestor
3816 3817 revisions are known).
3817 3818 """
3818 3819
3819 3820 if requirements is not None:
3820 3821 if createopts is not None:
3821 3822 msg = b'cannot specify both createopts and requirements'
3822 3823 raise error.ProgrammingError(msg)
3823 3824 createopts = {}
3824 3825 else:
3825 3826 createopts = defaultcreateopts(ui, createopts=createopts)
3826 3827
3827 3828 unknownopts = filterknowncreateopts(ui, createopts)
3828 3829
3829 3830 if not isinstance(unknownopts, dict):
3830 3831 raise error.ProgrammingError(
3831 3832 b'filterknowncreateopts() did not return a dict'
3832 3833 )
3833 3834
3834 3835 if unknownopts:
3835 3836 raise error.Abort(
3836 3837 _(
3837 3838 b'unable to create repository because of unknown '
3838 3839 b'creation option: %s'
3839 3840 )
3840 3841 % b', '.join(sorted(unknownopts)),
3841 3842 hint=_(b'is a required extension not loaded?'),
3842 3843 )
3843 3844
3844 3845 requirements = newreporequirements(ui, createopts=createopts)
3845 3846 requirements -= checkrequirementscompat(ui, requirements)
3846 3847
3847 3848 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3848 3849
3849 3850 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3850 3851 if hgvfs.exists():
3851 3852 raise error.RepoError(_(b'repository %s already exists') % path)
3852 3853
3853 3854 if b'sharedrepo' in createopts:
3854 3855 sharedpath = createopts[b'sharedrepo'].sharedpath
3855 3856
3856 3857 if createopts.get(b'sharedrelative'):
3857 3858 try:
3858 3859 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3859 3860 sharedpath = util.pconvert(sharedpath)
3860 3861 except (IOError, ValueError) as e:
3861 3862 # ValueError is raised on Windows if the drive letters differ
3862 3863 # on each path.
3863 3864 raise error.Abort(
3864 3865 _(b'cannot calculate relative path'),
3865 3866 hint=stringutil.forcebytestr(e),
3866 3867 )
3867 3868
3868 3869 if not wdirvfs.exists():
3869 3870 wdirvfs.makedirs()
3870 3871
3871 3872 hgvfs.makedir(notindexed=True)
3872 3873 if b'sharedrepo' not in createopts:
3873 3874 hgvfs.mkdir(b'cache')
3874 3875 hgvfs.mkdir(b'wcache')
3875 3876
3876 3877 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3877 3878 if has_store and b'sharedrepo' not in createopts:
3878 3879 hgvfs.mkdir(b'store')
3879 3880
3880 3881 # We create an invalid changelog outside the store so very old
3881 3882 # Mercurial versions (which didn't know about the requirements
3882 3883 # file) encounter an error on reading the changelog. This
3883 3884 # effectively locks out old clients and prevents them from
3884 3885 # mucking with a repo in an unknown format.
3885 3886 #
3886 3887 # The revlog header has version 65535, which won't be recognized by
3887 3888 # such old clients.
3888 3889 hgvfs.append(
3889 3890 b'00changelog.i',
3890 3891 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3891 3892 b'layout',
3892 3893 )
3893 3894
3894 3895 # Filter the requirements into working copy and store ones
3895 3896 wcreq, storereq = scmutil.filterrequirements(requirements)
3896 3897 # write working copy ones
3897 3898 scmutil.writerequires(hgvfs, wcreq)
3898 3899 # If there are store requirements and the current repository
3899 3900 # is not a shared one, write stored requirements
3900 3901 # For new shared repository, we don't need to write the store
3901 3902 # requirements as they are already present in store requires
3902 3903 if storereq and b'sharedrepo' not in createopts:
3903 3904 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3904 3905 scmutil.writerequires(storevfs, storereq)
3905 3906
3906 3907 # Write out file telling readers where to find the shared store.
3907 3908 if b'sharedrepo' in createopts:
3908 3909 hgvfs.write(b'sharedpath', sharedpath)
3909 3910
3910 3911 if createopts.get(b'shareditems'):
3911 3912 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3912 3913 hgvfs.write(b'shared', shared)
3913 3914
3914 3915
3915 3916 def poisonrepository(repo):
3916 3917 """Poison a repository instance so it can no longer be used."""
3917 3918 # Perform any cleanup on the instance.
3918 3919 repo.close()
3919 3920
3920 3921 # Our strategy is to replace the type of the object with one that
3921 3922 # has all attribute lookups result in error.
3922 3923 #
3923 3924 # But we have to allow the close() method because some constructors
3924 3925 # of repos call close() on repo references.
3925 3926 class poisonedrepository:
3926 3927 def __getattribute__(self, item):
3927 3928 if item == 'close':
3928 3929 return object.__getattribute__(self, item)
3929 3930
3930 3931 raise error.ProgrammingError(
3931 3932 b'repo instances should not be used after unshare'
3932 3933 )
3933 3934
3934 3935 def close(self):
3935 3936 pass
3936 3937
3937 3938 # We may have a repoview, which intercepts __setattr__. So be sure
3938 3939 # we operate at the lowest level possible.
3939 3940 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3322 +1,3342 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 # coding: utf8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Storage back-end for Mercurial.
10 10
11 11 This provides efficient delta storage with O(1) retrieve and append
12 12 and O(changes) merge between branches.
13 13 """
14 14
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import errno
20 20 import io
21 21 import os
22 22 import struct
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .pycompat import getattr
36 36 from .revlogutils.constants import (
37 37 ALL_KINDS,
38 38 CHANGELOGV2,
39 39 COMP_MODE_DEFAULT,
40 40 COMP_MODE_INLINE,
41 41 COMP_MODE_PLAIN,
42 42 ENTRY_RANK,
43 43 FEATURES_BY_VERSION,
44 44 FLAG_GENERALDELTA,
45 45 FLAG_INLINE_DATA,
46 46 INDEX_HEADER,
47 47 KIND_CHANGELOG,
48 48 RANK_UNKNOWN,
49 49 REVLOGV0,
50 50 REVLOGV1,
51 51 REVLOGV1_FLAGS,
52 52 REVLOGV2,
53 53 REVLOGV2_FLAGS,
54 54 REVLOG_DEFAULT_FLAGS,
55 55 REVLOG_DEFAULT_FORMAT,
56 56 REVLOG_DEFAULT_VERSION,
57 57 SUPPORTED_FLAGS,
58 58 )
59 59 from .revlogutils.flagutil import (
60 60 REVIDX_DEFAULT_FLAGS,
61 61 REVIDX_ELLIPSIS,
62 62 REVIDX_EXTSTORED,
63 63 REVIDX_FLAGS_ORDER,
64 64 REVIDX_HASCOPIESINFO,
65 65 REVIDX_ISCENSORED,
66 66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 67 )
68 68 from .thirdparty import attr
69 69 from . import (
70 70 ancestor,
71 71 dagop,
72 72 error,
73 73 mdiff,
74 74 policy,
75 75 pycompat,
76 76 revlogutils,
77 77 templatefilters,
78 78 util,
79 79 )
80 80 from .interfaces import (
81 81 repository,
82 82 util as interfaceutil,
83 83 )
84 84 from .revlogutils import (
85 85 deltas as deltautil,
86 86 docket as docketutil,
87 87 flagutil,
88 88 nodemap as nodemaputil,
89 89 randomaccessfile,
90 90 revlogv0,
91 91 rewrite,
92 92 sidedata as sidedatautil,
93 93 )
94 94 from .utils import (
95 95 storageutil,
96 96 stringutil,
97 97 )
98 98
99 99 # blanked usage of all the name to prevent pyflakes constraints
100 100 # We need these name available in the module for extensions.
101 101
102 102 REVLOGV0
103 103 REVLOGV1
104 104 REVLOGV2
105 105 CHANGELOGV2
106 106 FLAG_INLINE_DATA
107 107 FLAG_GENERALDELTA
108 108 REVLOG_DEFAULT_FLAGS
109 109 REVLOG_DEFAULT_FORMAT
110 110 REVLOG_DEFAULT_VERSION
111 111 REVLOGV1_FLAGS
112 112 REVLOGV2_FLAGS
113 113 REVIDX_ISCENSORED
114 114 REVIDX_ELLIPSIS
115 115 REVIDX_HASCOPIESINFO
116 116 REVIDX_EXTSTORED
117 117 REVIDX_DEFAULT_FLAGS
118 118 REVIDX_FLAGS_ORDER
119 119 REVIDX_RAWTEXT_CHANGING_FLAGS
120 120
121 121 parsers = policy.importmod('parsers')
122 122 rustancestor = policy.importrust('ancestor')
123 123 rustdagop = policy.importrust('dagop')
124 124 rustrevlog = policy.importrust('revlog')
125 125
126 126 # Aliased for performance.
127 127 _zlibdecompress = zlib.decompress
128 128
129 129 # max size of revlog with inline data
130 130 _maxinline = 131072
131 131
132 132 # Flag processors for REVIDX_ELLIPSIS.
133 133 def ellipsisreadprocessor(rl, text):
134 134 return text, False
135 135
136 136
137 137 def ellipsiswriteprocessor(rl, text):
138 138 return text, False
139 139
140 140
141 141 def ellipsisrawprocessor(rl, text):
142 142 return False
143 143
144 144
145 145 ellipsisprocessor = (
146 146 ellipsisreadprocessor,
147 147 ellipsiswriteprocessor,
148 148 ellipsisrawprocessor,
149 149 )
150 150
151 151
152 152 def _verify_revision(rl, skipflags, state, node):
153 153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 154 point for extensions to influence the operation."""
155 155 if skipflags:
156 156 state[b'skipread'].add(node)
157 157 else:
158 158 # Side-effect: read content and verify hash.
159 159 rl.revision(node)
160 160
161 161
162 162 # True if a fast implementation for persistent-nodemap is available
163 163 #
164 164 # We also consider we have a "fast" implementation in "pure" python because
165 165 # people using pure don't really have performance consideration (and a
166 166 # wheelbarrow of other slowness source)
167 167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 168 parsers, 'BaseIndexObject'
169 169 )
170 170
171 171
172 172 @interfaceutil.implementer(repository.irevisiondelta)
173 173 @attr.s(slots=True)
174 174 class revlogrevisiondelta:
175 175 node = attr.ib()
176 176 p1node = attr.ib()
177 177 p2node = attr.ib()
178 178 basenode = attr.ib()
179 179 flags = attr.ib()
180 180 baserevisionsize = attr.ib()
181 181 revision = attr.ib()
182 182 delta = attr.ib()
183 183 sidedata = attr.ib()
184 184 protocol_flags = attr.ib()
185 185 linknode = attr.ib(default=None)
186 186
187 187
188 188 @interfaceutil.implementer(repository.iverifyproblem)
189 189 @attr.s(frozen=True)
190 190 class revlogproblem:
191 191 warning = attr.ib(default=None)
192 192 error = attr.ib(default=None)
193 193 node = attr.ib(default=None)
194 194
195 195
196 196 def parse_index_v1(data, inline):
197 197 # call the C implementation to parse the index data
198 198 index, cache = parsers.parse_index2(data, inline)
199 199 return index, cache
200 200
201 201
202 202 def parse_index_v2(data, inline):
203 203 # call the C implementation to parse the index data
204 204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 205 return index, cache
206 206
207 207
208 208 def parse_index_cl_v2(data, inline):
209 209 # call the C implementation to parse the index data
210 210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 211 return index, cache
212 212
213 213
214 214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215 215
216 216 def parse_index_v1_nodemap(data, inline):
217 217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 218 return index, cache
219 219
220 220
221 221 else:
222 222 parse_index_v1_nodemap = None
223 223
224 224
225 225 def parse_index_v1_mixed(data, inline):
226 226 index, cache = parse_index_v1(data, inline)
227 227 return rustrevlog.MixedIndex(index), cache
228 228
229 229
230 230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 231 # signed integer)
232 232 _maxentrysize = 0x7FFFFFFF
233 233
234 234 FILE_TOO_SHORT_MSG = _(
235 235 b'cannot read from revlog %s;'
236 236 b' expected %d bytes from offset %d, data size is %d'
237 237 )
238 238
239 239
240 240 class revlog:
241 241 """
242 242 the underlying revision storage object
243 243
244 244 A revlog consists of two parts, an index and the revision data.
245 245
246 246 The index is a file with a fixed record size containing
247 247 information on each revision, including its nodeid (hash), the
248 248 nodeids of its parents, the position and offset of its data within
249 249 the data file, and the revision it's based on. Finally, each entry
250 250 contains a linkrev entry that can serve as a pointer to external
251 251 data.
252 252
253 253 The revision data itself is a linear collection of data chunks.
254 254 Each chunk represents a revision and is usually represented as a
255 255 delta against the previous chunk. To bound lookup time, runs of
256 256 deltas are limited to about 2 times the length of the original
257 257 version data. This makes retrieval of a version proportional to
258 258 its size, or O(1) relative to the number of revisions.
259 259
260 260 Both pieces of the revlog are written to in an append-only
261 261 fashion, which means we never need to rewrite a file to insert or
262 262 remove data, and can use some simple techniques to avoid the need
263 263 for locking while reading.
264 264
265 265 If checkambig, indexfile is opened with checkambig=True at
266 266 writing, to avoid file stat ambiguity.
267 267
268 268 If mmaplargeindex is True, and an mmapindexthreshold is set, the
269 269 index will be mmapped rather than read if it is larger than the
270 270 configured threshold.
271 271
272 272 If censorable is True, the revlog can have censored revisions.
273 273
274 274 If `upperboundcomp` is not None, this is the expected maximal gain from
275 275 compression for the data content.
276 276
277 277 `concurrencychecker` is an optional function that receives 3 arguments: a
278 278 file handle, a filename, and an expected position. It should check whether
279 279 the current position in the file handle is valid, and log/warn/fail (by
280 280 raising).
281 281
282 282 See mercurial/revlogutils/contants.py for details about the content of an
283 283 index entry.
284 284 """
285 285
286 286 _flagserrorclass = error.RevlogError
287 287
288 288 def __init__(
289 289 self,
290 290 opener,
291 291 target,
292 292 radix,
293 293 postfix=None, # only exist for `tmpcensored` now
294 294 checkambig=False,
295 295 mmaplargeindex=False,
296 296 censorable=False,
297 297 upperboundcomp=None,
298 298 persistentnodemap=False,
299 299 concurrencychecker=None,
300 300 trypending=False,
301 301 canonical_parent_order=True,
302 302 ):
303 303 """
304 304 create a revlog object
305 305
306 306 opener is a function that abstracts the file opening operation
307 307 and can be used to implement COW semantics or the like.
308 308
309 309 `target`: a (KIND, ID) tuple that identify the content stored in
310 310 this revlog. It help the rest of the code to understand what the revlog
311 311 is about without having to resort to heuristic and index filename
312 312 analysis. Note: that this must be reliably be set by normal code, but
313 313 that test, debug, or performance measurement code might not set this to
314 314 accurate value.
315 315 """
316 316 self.upperboundcomp = upperboundcomp
317 317
318 318 self.radix = radix
319 319
320 320 self._docket_file = None
321 321 self._indexfile = None
322 322 self._datafile = None
323 323 self._sidedatafile = None
324 324 self._nodemap_file = None
325 325 self.postfix = postfix
326 326 self._trypending = trypending
327 327 self.opener = opener
328 328 if persistentnodemap:
329 329 self._nodemap_file = nodemaputil.get_nodemap_file(self)
330 330
331 331 assert target[0] in ALL_KINDS
332 332 assert len(target) == 2
333 333 self.target = target
334 334 # When True, indexfile is opened with checkambig=True at writing, to
335 335 # avoid file stat ambiguity.
336 336 self._checkambig = checkambig
337 337 self._mmaplargeindex = mmaplargeindex
338 338 self._censorable = censorable
339 339 # 3-tuple of (node, rev, text) for a raw revision.
340 340 self._revisioncache = None
341 341 # Maps rev to chain base rev.
342 342 self._chainbasecache = util.lrucachedict(100)
343 343 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
344 344 self._chunkcache = (0, b'')
345 345 # How much data to read and cache into the raw revlog data cache.
346 346 self._chunkcachesize = 65536
347 347 self._maxchainlen = None
348 348 self._deltabothparents = True
349 self._debug_delta = False
349 350 self.index = None
350 351 self._docket = None
351 352 self._nodemap_docket = None
352 353 # Mapping of partial identifiers to full nodes.
353 354 self._pcache = {}
354 355 # Mapping of revision integer to full node.
355 356 self._compengine = b'zlib'
356 357 self._compengineopts = {}
357 358 self._maxdeltachainspan = -1
358 359 self._withsparseread = False
359 360 self._sparserevlog = False
360 361 self.hassidedata = False
361 362 self._srdensitythreshold = 0.50
362 363 self._srmingapsize = 262144
363 364
364 365 # Make copy of flag processors so each revlog instance can support
365 366 # custom flags.
366 367 self._flagprocessors = dict(flagutil.flagprocessors)
367 368
368 369 # 3-tuple of file handles being used for active writing.
369 370 self._writinghandles = None
370 371 # prevent nesting of addgroup
371 372 self._adding_group = None
372 373
373 374 self._loadindex()
374 375
375 376 self._concurrencychecker = concurrencychecker
376 377
377 378 # parent order is supposed to be semantically irrelevant, so we
378 379 # normally resort parents to ensure that the first parent is non-null,
379 380 # if there is a non-null parent at all.
380 381 # filelog abuses the parent order as flag to mark some instances of
381 382 # meta-encoded files, so allow it to disable this behavior.
382 383 self.canonical_parent_order = canonical_parent_order
383 384
384 385 def _init_opts(self):
385 386 """process options (from above/config) to setup associated default revlog mode
386 387
387 388 These values might be affected when actually reading on disk information.
388 389
389 390 The relevant values are returned for use in _loadindex().
390 391
391 392 * newversionflags:
392 393 version header to use if we need to create a new revlog
393 394
394 395 * mmapindexthreshold:
395 396 minimal index size for start to use mmap
396 397
397 398 * force_nodemap:
398 399 force the usage of a "development" version of the nodemap code
399 400 """
400 401 mmapindexthreshold = None
401 402 opts = self.opener.options
402 403
403 404 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
404 405 new_header = CHANGELOGV2
405 406 elif b'revlogv2' in opts:
406 407 new_header = REVLOGV2
407 408 elif b'revlogv1' in opts:
408 409 new_header = REVLOGV1 | FLAG_INLINE_DATA
409 410 if b'generaldelta' in opts:
410 411 new_header |= FLAG_GENERALDELTA
411 412 elif b'revlogv0' in self.opener.options:
412 413 new_header = REVLOGV0
413 414 else:
414 415 new_header = REVLOG_DEFAULT_VERSION
415 416
416 417 if b'chunkcachesize' in opts:
417 418 self._chunkcachesize = opts[b'chunkcachesize']
418 419 if b'maxchainlen' in opts:
419 420 self._maxchainlen = opts[b'maxchainlen']
420 421 if b'deltabothparents' in opts:
421 422 self._deltabothparents = opts[b'deltabothparents']
422 423 self._lazydelta = bool(opts.get(b'lazydelta', True))
423 424 self._lazydeltabase = False
424 425 if self._lazydelta:
425 426 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
427 if b'debug-delta' in opts:
428 self._debug_delta = opts[b'debug-delta']
426 429 if b'compengine' in opts:
427 430 self._compengine = opts[b'compengine']
428 431 if b'zlib.level' in opts:
429 432 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
430 433 if b'zstd.level' in opts:
431 434 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
432 435 if b'maxdeltachainspan' in opts:
433 436 self._maxdeltachainspan = opts[b'maxdeltachainspan']
434 437 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
435 438 mmapindexthreshold = opts[b'mmapindexthreshold']
436 439 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
437 440 withsparseread = bool(opts.get(b'with-sparse-read', False))
438 441 # sparse-revlog forces sparse-read
439 442 self._withsparseread = self._sparserevlog or withsparseread
440 443 if b'sparse-read-density-threshold' in opts:
441 444 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
442 445 if b'sparse-read-min-gap-size' in opts:
443 446 self._srmingapsize = opts[b'sparse-read-min-gap-size']
444 447 if opts.get(b'enableellipsis'):
445 448 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
446 449
447 450 # revlog v0 doesn't have flag processors
448 451 for flag, processor in opts.get(b'flagprocessors', {}).items():
449 452 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
450 453
451 454 if self._chunkcachesize <= 0:
452 455 raise error.RevlogError(
453 456 _(b'revlog chunk cache size %r is not greater than 0')
454 457 % self._chunkcachesize
455 458 )
456 459 elif self._chunkcachesize & (self._chunkcachesize - 1):
457 460 raise error.RevlogError(
458 461 _(b'revlog chunk cache size %r is not a power of 2')
459 462 % self._chunkcachesize
460 463 )
461 464 force_nodemap = opts.get(b'devel-force-nodemap', False)
462 465 return new_header, mmapindexthreshold, force_nodemap
463 466
464 467 def _get_data(self, filepath, mmap_threshold, size=None):
465 468 """return a file content with or without mmap
466 469
467 470 If the file is missing return the empty string"""
468 471 try:
469 472 with self.opener(filepath) as fp:
470 473 if mmap_threshold is not None:
471 474 file_size = self.opener.fstat(fp).st_size
472 475 if file_size >= mmap_threshold:
473 476 if size is not None:
474 477 # avoid potentiel mmap crash
475 478 size = min(file_size, size)
476 479 # TODO: should .close() to release resources without
477 480 # relying on Python GC
478 481 if size is None:
479 482 return util.buffer(util.mmapread(fp))
480 483 else:
481 484 return util.buffer(util.mmapread(fp, size))
482 485 if size is None:
483 486 return fp.read()
484 487 else:
485 488 return fp.read(size)
486 489 except IOError as inst:
487 490 if inst.errno != errno.ENOENT:
488 491 raise
489 492 return b''
490 493
491 494 def _loadindex(self, docket=None):
492 495
493 496 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
494 497
495 498 if self.postfix is not None:
496 499 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
497 500 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
498 501 entry_point = b'%s.i.a' % self.radix
499 502 else:
500 503 entry_point = b'%s.i' % self.radix
501 504
502 505 if docket is not None:
503 506 self._docket = docket
504 507 self._docket_file = entry_point
505 508 else:
506 509 entry_data = b''
507 510 self._initempty = True
508 511 entry_data = self._get_data(entry_point, mmapindexthreshold)
509 512 if len(entry_data) > 0:
510 513 header = INDEX_HEADER.unpack(entry_data[:4])[0]
511 514 self._initempty = False
512 515 else:
513 516 header = new_header
514 517
515 518 self._format_flags = header & ~0xFFFF
516 519 self._format_version = header & 0xFFFF
517 520
518 521 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
519 522 if supported_flags is None:
520 523 msg = _(b'unknown version (%d) in revlog %s')
521 524 msg %= (self._format_version, self.display_id)
522 525 raise error.RevlogError(msg)
523 526 elif self._format_flags & ~supported_flags:
524 527 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
525 528 display_flag = self._format_flags >> 16
526 529 msg %= (display_flag, self._format_version, self.display_id)
527 530 raise error.RevlogError(msg)
528 531
529 532 features = FEATURES_BY_VERSION[self._format_version]
530 533 self._inline = features[b'inline'](self._format_flags)
531 534 self._generaldelta = features[b'generaldelta'](self._format_flags)
532 535 self.hassidedata = features[b'sidedata']
533 536
534 537 if not features[b'docket']:
535 538 self._indexfile = entry_point
536 539 index_data = entry_data
537 540 else:
538 541 self._docket_file = entry_point
539 542 if self._initempty:
540 543 self._docket = docketutil.default_docket(self, header)
541 544 else:
542 545 self._docket = docketutil.parse_docket(
543 546 self, entry_data, use_pending=self._trypending
544 547 )
545 548
546 549 if self._docket is not None:
547 550 self._indexfile = self._docket.index_filepath()
548 551 index_data = b''
549 552 index_size = self._docket.index_end
550 553 if index_size > 0:
551 554 index_data = self._get_data(
552 555 self._indexfile, mmapindexthreshold, size=index_size
553 556 )
554 557 if len(index_data) < index_size:
555 558 msg = _(b'too few index data for %s: got %d, expected %d')
556 559 msg %= (self.display_id, len(index_data), index_size)
557 560 raise error.RevlogError(msg)
558 561
559 562 self._inline = False
560 563 # generaldelta implied by version 2 revlogs.
561 564 self._generaldelta = True
562 565 # the logic for persistent nodemap will be dealt with within the
563 566 # main docket, so disable it for now.
564 567 self._nodemap_file = None
565 568
566 569 if self._docket is not None:
567 570 self._datafile = self._docket.data_filepath()
568 571 self._sidedatafile = self._docket.sidedata_filepath()
569 572 elif self.postfix is None:
570 573 self._datafile = b'%s.d' % self.radix
571 574 else:
572 575 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
573 576
574 577 self.nodeconstants = sha1nodeconstants
575 578 self.nullid = self.nodeconstants.nullid
576 579
577 580 # sparse-revlog can't be on without general-delta (issue6056)
578 581 if not self._generaldelta:
579 582 self._sparserevlog = False
580 583
581 584 self._storedeltachains = True
582 585
583 586 devel_nodemap = (
584 587 self._nodemap_file
585 588 and force_nodemap
586 589 and parse_index_v1_nodemap is not None
587 590 )
588 591
589 592 use_rust_index = False
590 593 if rustrevlog is not None:
591 594 if self._nodemap_file is not None:
592 595 use_rust_index = True
593 596 else:
594 597 use_rust_index = self.opener.options.get(b'rust.index')
595 598
596 599 self._parse_index = parse_index_v1
597 600 if self._format_version == REVLOGV0:
598 601 self._parse_index = revlogv0.parse_index_v0
599 602 elif self._format_version == REVLOGV2:
600 603 self._parse_index = parse_index_v2
601 604 elif self._format_version == CHANGELOGV2:
602 605 self._parse_index = parse_index_cl_v2
603 606 elif devel_nodemap:
604 607 self._parse_index = parse_index_v1_nodemap
605 608 elif use_rust_index:
606 609 self._parse_index = parse_index_v1_mixed
607 610 try:
608 611 d = self._parse_index(index_data, self._inline)
609 612 index, chunkcache = d
610 613 use_nodemap = (
611 614 not self._inline
612 615 and self._nodemap_file is not None
613 616 and util.safehasattr(index, 'update_nodemap_data')
614 617 )
615 618 if use_nodemap:
616 619 nodemap_data = nodemaputil.persisted_data(self)
617 620 if nodemap_data is not None:
618 621 docket = nodemap_data[0]
619 622 if (
620 623 len(d[0]) > docket.tip_rev
621 624 and d[0][docket.tip_rev][7] == docket.tip_node
622 625 ):
623 626 # no changelog tampering
624 627 self._nodemap_docket = docket
625 628 index.update_nodemap_data(*nodemap_data)
626 629 except (ValueError, IndexError):
627 630 raise error.RevlogError(
628 631 _(b"index %s is corrupted") % self.display_id
629 632 )
630 633 self.index = index
631 634 self._segmentfile = randomaccessfile.randomaccessfile(
632 635 self.opener,
633 636 (self._indexfile if self._inline else self._datafile),
634 637 self._chunkcachesize,
635 638 chunkcache,
636 639 )
637 640 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
638 641 self.opener,
639 642 self._sidedatafile,
640 643 self._chunkcachesize,
641 644 )
642 645 # revnum -> (chain-length, sum-delta-length)
643 646 self._chaininfocache = util.lrucachedict(500)
644 647 # revlog header -> revlog compressor
645 648 self._decompressors = {}
646 649
647 650 @util.propertycache
648 651 def revlog_kind(self):
649 652 return self.target[0]
650 653
651 654 @util.propertycache
652 655 def display_id(self):
653 656 """The public facing "ID" of the revlog that we use in message"""
654 657 # Maybe we should build a user facing representation of
655 658 # revlog.target instead of using `self.radix`
656 659 return self.radix
657 660
658 661 def _get_decompressor(self, t):
659 662 try:
660 663 compressor = self._decompressors[t]
661 664 except KeyError:
662 665 try:
663 666 engine = util.compengines.forrevlogheader(t)
664 667 compressor = engine.revlogcompressor(self._compengineopts)
665 668 self._decompressors[t] = compressor
666 669 except KeyError:
667 670 raise error.RevlogError(
668 671 _(b'unknown compression type %s') % binascii.hexlify(t)
669 672 )
670 673 return compressor
671 674
672 675 @util.propertycache
673 676 def _compressor(self):
674 677 engine = util.compengines[self._compengine]
675 678 return engine.revlogcompressor(self._compengineopts)
676 679
677 680 @util.propertycache
678 681 def _decompressor(self):
679 682 """the default decompressor"""
680 683 if self._docket is None:
681 684 return None
682 685 t = self._docket.default_compression_header
683 686 c = self._get_decompressor(t)
684 687 return c.decompress
685 688
686 689 def _indexfp(self):
687 690 """file object for the revlog's index file"""
688 691 return self.opener(self._indexfile, mode=b"r")
689 692
690 693 def __index_write_fp(self):
691 694 # You should not use this directly and use `_writing` instead
692 695 try:
693 696 f = self.opener(
694 697 self._indexfile, mode=b"r+", checkambig=self._checkambig
695 698 )
696 699 if self._docket is None:
697 700 f.seek(0, os.SEEK_END)
698 701 else:
699 702 f.seek(self._docket.index_end, os.SEEK_SET)
700 703 return f
701 704 except IOError as inst:
702 705 if inst.errno != errno.ENOENT:
703 706 raise
704 707 return self.opener(
705 708 self._indexfile, mode=b"w+", checkambig=self._checkambig
706 709 )
707 710
708 711 def __index_new_fp(self):
709 712 # You should not use this unless you are upgrading from inline revlog
710 713 return self.opener(
711 714 self._indexfile,
712 715 mode=b"w",
713 716 checkambig=self._checkambig,
714 717 atomictemp=True,
715 718 )
716 719
717 720 def _datafp(self, mode=b'r'):
718 721 """file object for the revlog's data file"""
719 722 return self.opener(self._datafile, mode=mode)
720 723
721 724 @contextlib.contextmanager
722 725 def _sidedatareadfp(self):
723 726 """file object suitable to read sidedata"""
724 727 if self._writinghandles:
725 728 yield self._writinghandles[2]
726 729 else:
727 730 with self.opener(self._sidedatafile) as fp:
728 731 yield fp
729 732
730 733 def tiprev(self):
731 734 return len(self.index) - 1
732 735
733 736 def tip(self):
734 737 return self.node(self.tiprev())
735 738
736 739 def __contains__(self, rev):
737 740 return 0 <= rev < len(self)
738 741
739 742 def __len__(self):
740 743 return len(self.index)
741 744
742 745 def __iter__(self):
743 746 return iter(pycompat.xrange(len(self)))
744 747
745 748 def revs(self, start=0, stop=None):
746 749 """iterate over all rev in this revlog (from start to stop)"""
747 750 return storageutil.iterrevs(len(self), start=start, stop=stop)
748 751
749 752 def hasnode(self, node):
750 753 try:
751 754 self.rev(node)
752 755 return True
753 756 except KeyError:
754 757 return False
755 758
756 759 def candelta(self, baserev, rev):
757 760 """whether two revisions (baserev, rev) can be delta-ed or not"""
758 761 # Disable delta if either rev requires a content-changing flag
759 762 # processor (ex. LFS). This is because such flag processor can alter
760 763 # the rawtext content that the delta will be based on, and two clients
761 764 # could have a same revlog node with different flags (i.e. different
762 765 # rawtext contents) and the delta could be incompatible.
763 766 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
764 767 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
765 768 ):
766 769 return False
767 770 return True
768 771
769 772 def update_caches(self, transaction):
770 773 if self._nodemap_file is not None:
771 774 if transaction is None:
772 775 nodemaputil.update_persistent_nodemap(self)
773 776 else:
774 777 nodemaputil.setup_persistent_nodemap(transaction, self)
775 778
776 779 def clearcaches(self):
777 780 self._revisioncache = None
778 781 self._chainbasecache.clear()
779 782 self._segmentfile.clear_cache()
780 783 self._segmentfile_sidedata.clear_cache()
781 784 self._pcache = {}
782 785 self._nodemap_docket = None
783 786 self.index.clearcaches()
784 787 # The python code is the one responsible for validating the docket, we
785 788 # end up having to refresh it here.
786 789 use_nodemap = (
787 790 not self._inline
788 791 and self._nodemap_file is not None
789 792 and util.safehasattr(self.index, 'update_nodemap_data')
790 793 )
791 794 if use_nodemap:
792 795 nodemap_data = nodemaputil.persisted_data(self)
793 796 if nodemap_data is not None:
794 797 self._nodemap_docket = nodemap_data[0]
795 798 self.index.update_nodemap_data(*nodemap_data)
796 799
797 800 def rev(self, node):
798 801 try:
799 802 return self.index.rev(node)
800 803 except TypeError:
801 804 raise
802 805 except error.RevlogError:
803 806 # parsers.c radix tree lookup failed
804 807 if (
805 808 node == self.nodeconstants.wdirid
806 809 or node in self.nodeconstants.wdirfilenodeids
807 810 ):
808 811 raise error.WdirUnsupported
809 812 raise error.LookupError(node, self.display_id, _(b'no node'))
810 813
811 814 # Accessors for index entries.
812 815
813 816 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
814 817 # are flags.
815 818 def start(self, rev):
816 819 return int(self.index[rev][0] >> 16)
817 820
818 821 def sidedata_cut_off(self, rev):
819 822 sd_cut_off = self.index[rev][8]
820 823 if sd_cut_off != 0:
821 824 return sd_cut_off
822 825 # This is some annoying dance, because entries without sidedata
823 826 # currently use 0 as their ofsset. (instead of previous-offset +
824 827 # previous-size)
825 828 #
826 829 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
827 830 # In the meantime, we need this.
828 831 while 0 <= rev:
829 832 e = self.index[rev]
830 833 if e[9] != 0:
831 834 return e[8] + e[9]
832 835 rev -= 1
833 836 return 0
834 837
835 838 def flags(self, rev):
836 839 return self.index[rev][0] & 0xFFFF
837 840
838 841 def length(self, rev):
839 842 return self.index[rev][1]
840 843
841 844 def sidedata_length(self, rev):
842 845 if not self.hassidedata:
843 846 return 0
844 847 return self.index[rev][9]
845 848
846 849 def rawsize(self, rev):
847 850 """return the length of the uncompressed text for a given revision"""
848 851 l = self.index[rev][2]
849 852 if l >= 0:
850 853 return l
851 854
852 855 t = self.rawdata(rev)
853 856 return len(t)
854 857
855 858 def size(self, rev):
856 859 """length of non-raw text (processed by a "read" flag processor)"""
857 860 # fast path: if no "read" flag processor could change the content,
858 861 # size is rawsize. note: ELLIPSIS is known to not change the content.
859 862 flags = self.flags(rev)
860 863 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
861 864 return self.rawsize(rev)
862 865
863 866 return len(self.revision(rev))
864 867
865 868 def fast_rank(self, rev):
866 869 """Return the rank of a revision if already known, or None otherwise.
867 870
868 871 The rank of a revision is the size of the sub-graph it defines as a
869 872 head. Equivalently, the rank of a revision `r` is the size of the set
870 873 `ancestors(r)`, `r` included.
871 874
872 875 This method returns the rank retrieved from the revlog in constant
873 876 time. It makes no attempt at computing unknown values for versions of
874 877 the revlog which do not persist the rank.
875 878 """
876 879 rank = self.index[rev][ENTRY_RANK]
877 880 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
878 881 return None
879 882 if rev == nullrev:
880 883 return 0 # convention
881 884 return rank
882 885
883 886 def chainbase(self, rev):
884 887 base = self._chainbasecache.get(rev)
885 888 if base is not None:
886 889 return base
887 890
888 891 index = self.index
889 892 iterrev = rev
890 893 base = index[iterrev][3]
891 894 while base != iterrev:
892 895 iterrev = base
893 896 base = index[iterrev][3]
894 897
895 898 self._chainbasecache[rev] = base
896 899 return base
897 900
898 901 def linkrev(self, rev):
899 902 return self.index[rev][4]
900 903
901 904 def parentrevs(self, rev):
902 905 try:
903 906 entry = self.index[rev]
904 907 except IndexError:
905 908 if rev == wdirrev:
906 909 raise error.WdirUnsupported
907 910 raise
908 911
909 912 if self.canonical_parent_order and entry[5] == nullrev:
910 913 return entry[6], entry[5]
911 914 else:
912 915 return entry[5], entry[6]
913 916
914 917 # fast parentrevs(rev) where rev isn't filtered
915 918 _uncheckedparentrevs = parentrevs
916 919
917 920 def node(self, rev):
918 921 try:
919 922 return self.index[rev][7]
920 923 except IndexError:
921 924 if rev == wdirrev:
922 925 raise error.WdirUnsupported
923 926 raise
924 927
925 928 # Derived from index values.
926 929
927 930 def end(self, rev):
928 931 return self.start(rev) + self.length(rev)
929 932
930 933 def parents(self, node):
931 934 i = self.index
932 935 d = i[self.rev(node)]
933 936 # inline node() to avoid function call overhead
934 937 if self.canonical_parent_order and d[5] == self.nullid:
935 938 return i[d[6]][7], i[d[5]][7]
936 939 else:
937 940 return i[d[5]][7], i[d[6]][7]
938 941
939 942 def chainlen(self, rev):
940 943 return self._chaininfo(rev)[0]
941 944
942 945 def _chaininfo(self, rev):
943 946 chaininfocache = self._chaininfocache
944 947 if rev in chaininfocache:
945 948 return chaininfocache[rev]
946 949 index = self.index
947 950 generaldelta = self._generaldelta
948 951 iterrev = rev
949 952 e = index[iterrev]
950 953 clen = 0
951 954 compresseddeltalen = 0
952 955 while iterrev != e[3]:
953 956 clen += 1
954 957 compresseddeltalen += e[1]
955 958 if generaldelta:
956 959 iterrev = e[3]
957 960 else:
958 961 iterrev -= 1
959 962 if iterrev in chaininfocache:
960 963 t = chaininfocache[iterrev]
961 964 clen += t[0]
962 965 compresseddeltalen += t[1]
963 966 break
964 967 e = index[iterrev]
965 968 else:
966 969 # Add text length of base since decompressing that also takes
967 970 # work. For cache hits the length is already included.
968 971 compresseddeltalen += e[1]
969 972 r = (clen, compresseddeltalen)
970 973 chaininfocache[rev] = r
971 974 return r
972 975
973 976 def _deltachain(self, rev, stoprev=None):
974 977 """Obtain the delta chain for a revision.
975 978
976 979 ``stoprev`` specifies a revision to stop at. If not specified, we
977 980 stop at the base of the chain.
978 981
979 982 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
980 983 revs in ascending order and ``stopped`` is a bool indicating whether
981 984 ``stoprev`` was hit.
982 985 """
983 986 # Try C implementation.
984 987 try:
985 988 return self.index.deltachain(rev, stoprev, self._generaldelta)
986 989 except AttributeError:
987 990 pass
988 991
989 992 chain = []
990 993
991 994 # Alias to prevent attribute lookup in tight loop.
992 995 index = self.index
993 996 generaldelta = self._generaldelta
994 997
995 998 iterrev = rev
996 999 e = index[iterrev]
997 1000 while iterrev != e[3] and iterrev != stoprev:
998 1001 chain.append(iterrev)
999 1002 if generaldelta:
1000 1003 iterrev = e[3]
1001 1004 else:
1002 1005 iterrev -= 1
1003 1006 e = index[iterrev]
1004 1007
1005 1008 if iterrev == stoprev:
1006 1009 stopped = True
1007 1010 else:
1008 1011 chain.append(iterrev)
1009 1012 stopped = False
1010 1013
1011 1014 chain.reverse()
1012 1015 return chain, stopped
1013 1016
1014 1017 def ancestors(self, revs, stoprev=0, inclusive=False):
1015 1018 """Generate the ancestors of 'revs' in reverse revision order.
1016 1019 Does not generate revs lower than stoprev.
1017 1020
1018 1021 See the documentation for ancestor.lazyancestors for more details."""
1019 1022
1020 1023 # first, make sure start revisions aren't filtered
1021 1024 revs = list(revs)
1022 1025 checkrev = self.node
1023 1026 for r in revs:
1024 1027 checkrev(r)
1025 1028 # and we're sure ancestors aren't filtered as well
1026 1029
1027 1030 if rustancestor is not None and self.index.rust_ext_compat:
1028 1031 lazyancestors = rustancestor.LazyAncestors
1029 1032 arg = self.index
1030 1033 else:
1031 1034 lazyancestors = ancestor.lazyancestors
1032 1035 arg = self._uncheckedparentrevs
1033 1036 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1034 1037
1035 1038 def descendants(self, revs):
1036 1039 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1037 1040
1038 1041 def findcommonmissing(self, common=None, heads=None):
1039 1042 """Return a tuple of the ancestors of common and the ancestors of heads
1040 1043 that are not ancestors of common. In revset terminology, we return the
1041 1044 tuple:
1042 1045
1043 1046 ::common, (::heads) - (::common)
1044 1047
1045 1048 The list is sorted by revision number, meaning it is
1046 1049 topologically sorted.
1047 1050
1048 1051 'heads' and 'common' are both lists of node IDs. If heads is
1049 1052 not supplied, uses all of the revlog's heads. If common is not
1050 1053 supplied, uses nullid."""
1051 1054 if common is None:
1052 1055 common = [self.nullid]
1053 1056 if heads is None:
1054 1057 heads = self.heads()
1055 1058
1056 1059 common = [self.rev(n) for n in common]
1057 1060 heads = [self.rev(n) for n in heads]
1058 1061
1059 1062 # we want the ancestors, but inclusive
1060 1063 class lazyset:
1061 1064 def __init__(self, lazyvalues):
1062 1065 self.addedvalues = set()
1063 1066 self.lazyvalues = lazyvalues
1064 1067
1065 1068 def __contains__(self, value):
1066 1069 return value in self.addedvalues or value in self.lazyvalues
1067 1070
1068 1071 def __iter__(self):
1069 1072 added = self.addedvalues
1070 1073 for r in added:
1071 1074 yield r
1072 1075 for r in self.lazyvalues:
1073 1076 if not r in added:
1074 1077 yield r
1075 1078
1076 1079 def add(self, value):
1077 1080 self.addedvalues.add(value)
1078 1081
1079 1082 def update(self, values):
1080 1083 self.addedvalues.update(values)
1081 1084
1082 1085 has = lazyset(self.ancestors(common))
1083 1086 has.add(nullrev)
1084 1087 has.update(common)
1085 1088
1086 1089 # take all ancestors from heads that aren't in has
1087 1090 missing = set()
1088 1091 visit = collections.deque(r for r in heads if r not in has)
1089 1092 while visit:
1090 1093 r = visit.popleft()
1091 1094 if r in missing:
1092 1095 continue
1093 1096 else:
1094 1097 missing.add(r)
1095 1098 for p in self.parentrevs(r):
1096 1099 if p not in has:
1097 1100 visit.append(p)
1098 1101 missing = list(missing)
1099 1102 missing.sort()
1100 1103 return has, [self.node(miss) for miss in missing]
1101 1104
1102 1105 def incrementalmissingrevs(self, common=None):
1103 1106 """Return an object that can be used to incrementally compute the
1104 1107 revision numbers of the ancestors of arbitrary sets that are not
1105 1108 ancestors of common. This is an ancestor.incrementalmissingancestors
1106 1109 object.
1107 1110
1108 1111 'common' is a list of revision numbers. If common is not supplied, uses
1109 1112 nullrev.
1110 1113 """
1111 1114 if common is None:
1112 1115 common = [nullrev]
1113 1116
1114 1117 if rustancestor is not None and self.index.rust_ext_compat:
1115 1118 return rustancestor.MissingAncestors(self.index, common)
1116 1119 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1117 1120
1118 1121 def findmissingrevs(self, common=None, heads=None):
1119 1122 """Return the revision numbers of the ancestors of heads that
1120 1123 are not ancestors of common.
1121 1124
1122 1125 More specifically, return a list of revision numbers corresponding to
1123 1126 nodes N such that every N satisfies the following constraints:
1124 1127
1125 1128 1. N is an ancestor of some node in 'heads'
1126 1129 2. N is not an ancestor of any node in 'common'
1127 1130
1128 1131 The list is sorted by revision number, meaning it is
1129 1132 topologically sorted.
1130 1133
1131 1134 'heads' and 'common' are both lists of revision numbers. If heads is
1132 1135 not supplied, uses all of the revlog's heads. If common is not
1133 1136 supplied, uses nullid."""
1134 1137 if common is None:
1135 1138 common = [nullrev]
1136 1139 if heads is None:
1137 1140 heads = self.headrevs()
1138 1141
1139 1142 inc = self.incrementalmissingrevs(common=common)
1140 1143 return inc.missingancestors(heads)
1141 1144
1142 1145 def findmissing(self, common=None, heads=None):
1143 1146 """Return the ancestors of heads that are not ancestors of common.
1144 1147
1145 1148 More specifically, return a list of nodes N such that every N
1146 1149 satisfies the following constraints:
1147 1150
1148 1151 1. N is an ancestor of some node in 'heads'
1149 1152 2. N is not an ancestor of any node in 'common'
1150 1153
1151 1154 The list is sorted by revision number, meaning it is
1152 1155 topologically sorted.
1153 1156
1154 1157 'heads' and 'common' are both lists of node IDs. If heads is
1155 1158 not supplied, uses all of the revlog's heads. If common is not
1156 1159 supplied, uses nullid."""
1157 1160 if common is None:
1158 1161 common = [self.nullid]
1159 1162 if heads is None:
1160 1163 heads = self.heads()
1161 1164
1162 1165 common = [self.rev(n) for n in common]
1163 1166 heads = [self.rev(n) for n in heads]
1164 1167
1165 1168 inc = self.incrementalmissingrevs(common=common)
1166 1169 return [self.node(r) for r in inc.missingancestors(heads)]
1167 1170
1168 1171 def nodesbetween(self, roots=None, heads=None):
1169 1172 """Return a topological path from 'roots' to 'heads'.
1170 1173
1171 1174 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1172 1175 topologically sorted list of all nodes N that satisfy both of
1173 1176 these constraints:
1174 1177
1175 1178 1. N is a descendant of some node in 'roots'
1176 1179 2. N is an ancestor of some node in 'heads'
1177 1180
1178 1181 Every node is considered to be both a descendant and an ancestor
1179 1182 of itself, so every reachable node in 'roots' and 'heads' will be
1180 1183 included in 'nodes'.
1181 1184
1182 1185 'outroots' is the list of reachable nodes in 'roots', i.e., the
1183 1186 subset of 'roots' that is returned in 'nodes'. Likewise,
1184 1187 'outheads' is the subset of 'heads' that is also in 'nodes'.
1185 1188
1186 1189 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1187 1190 unspecified, uses nullid as the only root. If 'heads' is
1188 1191 unspecified, uses list of all of the revlog's heads."""
1189 1192 nonodes = ([], [], [])
1190 1193 if roots is not None:
1191 1194 roots = list(roots)
1192 1195 if not roots:
1193 1196 return nonodes
1194 1197 lowestrev = min([self.rev(n) for n in roots])
1195 1198 else:
1196 1199 roots = [self.nullid] # Everybody's a descendant of nullid
1197 1200 lowestrev = nullrev
1198 1201 if (lowestrev == nullrev) and (heads is None):
1199 1202 # We want _all_ the nodes!
1200 1203 return (
1201 1204 [self.node(r) for r in self],
1202 1205 [self.nullid],
1203 1206 list(self.heads()),
1204 1207 )
1205 1208 if heads is None:
1206 1209 # All nodes are ancestors, so the latest ancestor is the last
1207 1210 # node.
1208 1211 highestrev = len(self) - 1
1209 1212 # Set ancestors to None to signal that every node is an ancestor.
1210 1213 ancestors = None
1211 1214 # Set heads to an empty dictionary for later discovery of heads
1212 1215 heads = {}
1213 1216 else:
1214 1217 heads = list(heads)
1215 1218 if not heads:
1216 1219 return nonodes
1217 1220 ancestors = set()
1218 1221 # Turn heads into a dictionary so we can remove 'fake' heads.
1219 1222 # Also, later we will be using it to filter out the heads we can't
1220 1223 # find from roots.
1221 1224 heads = dict.fromkeys(heads, False)
1222 1225 # Start at the top and keep marking parents until we're done.
1223 1226 nodestotag = set(heads)
1224 1227 # Remember where the top was so we can use it as a limit later.
1225 1228 highestrev = max([self.rev(n) for n in nodestotag])
1226 1229 while nodestotag:
1227 1230 # grab a node to tag
1228 1231 n = nodestotag.pop()
1229 1232 # Never tag nullid
1230 1233 if n == self.nullid:
1231 1234 continue
1232 1235 # A node's revision number represents its place in a
1233 1236 # topologically sorted list of nodes.
1234 1237 r = self.rev(n)
1235 1238 if r >= lowestrev:
1236 1239 if n not in ancestors:
1237 1240 # If we are possibly a descendant of one of the roots
1238 1241 # and we haven't already been marked as an ancestor
1239 1242 ancestors.add(n) # Mark as ancestor
1240 1243 # Add non-nullid parents to list of nodes to tag.
1241 1244 nodestotag.update(
1242 1245 [p for p in self.parents(n) if p != self.nullid]
1243 1246 )
1244 1247 elif n in heads: # We've seen it before, is it a fake head?
1245 1248 # So it is, real heads should not be the ancestors of
1246 1249 # any other heads.
1247 1250 heads.pop(n)
1248 1251 if not ancestors:
1249 1252 return nonodes
1250 1253 # Now that we have our set of ancestors, we want to remove any
1251 1254 # roots that are not ancestors.
1252 1255
1253 1256 # If one of the roots was nullid, everything is included anyway.
1254 1257 if lowestrev > nullrev:
1255 1258 # But, since we weren't, let's recompute the lowest rev to not
1256 1259 # include roots that aren't ancestors.
1257 1260
1258 1261 # Filter out roots that aren't ancestors of heads
1259 1262 roots = [root for root in roots if root in ancestors]
1260 1263 # Recompute the lowest revision
1261 1264 if roots:
1262 1265 lowestrev = min([self.rev(root) for root in roots])
1263 1266 else:
1264 1267 # No more roots? Return empty list
1265 1268 return nonodes
1266 1269 else:
1267 1270 # We are descending from nullid, and don't need to care about
1268 1271 # any other roots.
1269 1272 lowestrev = nullrev
1270 1273 roots = [self.nullid]
1271 1274 # Transform our roots list into a set.
1272 1275 descendants = set(roots)
1273 1276 # Also, keep the original roots so we can filter out roots that aren't
1274 1277 # 'real' roots (i.e. are descended from other roots).
1275 1278 roots = descendants.copy()
1276 1279 # Our topologically sorted list of output nodes.
1277 1280 orderedout = []
1278 1281 # Don't start at nullid since we don't want nullid in our output list,
1279 1282 # and if nullid shows up in descendants, empty parents will look like
1280 1283 # they're descendants.
1281 1284 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1282 1285 n = self.node(r)
1283 1286 isdescendant = False
1284 1287 if lowestrev == nullrev: # Everybody is a descendant of nullid
1285 1288 isdescendant = True
1286 1289 elif n in descendants:
1287 1290 # n is already a descendant
1288 1291 isdescendant = True
1289 1292 # This check only needs to be done here because all the roots
1290 1293 # will start being marked is descendants before the loop.
1291 1294 if n in roots:
1292 1295 # If n was a root, check if it's a 'real' root.
1293 1296 p = tuple(self.parents(n))
1294 1297 # If any of its parents are descendants, it's not a root.
1295 1298 if (p[0] in descendants) or (p[1] in descendants):
1296 1299 roots.remove(n)
1297 1300 else:
1298 1301 p = tuple(self.parents(n))
1299 1302 # A node is a descendant if either of its parents are
1300 1303 # descendants. (We seeded the dependents list with the roots
1301 1304 # up there, remember?)
1302 1305 if (p[0] in descendants) or (p[1] in descendants):
1303 1306 descendants.add(n)
1304 1307 isdescendant = True
1305 1308 if isdescendant and ((ancestors is None) or (n in ancestors)):
1306 1309 # Only include nodes that are both descendants and ancestors.
1307 1310 orderedout.append(n)
1308 1311 if (ancestors is not None) and (n in heads):
1309 1312 # We're trying to figure out which heads are reachable
1310 1313 # from roots.
1311 1314 # Mark this head as having been reached
1312 1315 heads[n] = True
1313 1316 elif ancestors is None:
1314 1317 # Otherwise, we're trying to discover the heads.
1315 1318 # Assume this is a head because if it isn't, the next step
1316 1319 # will eventually remove it.
1317 1320 heads[n] = True
1318 1321 # But, obviously its parents aren't.
1319 1322 for p in self.parents(n):
1320 1323 heads.pop(p, None)
1321 1324 heads = [head for head, flag in heads.items() if flag]
1322 1325 roots = list(roots)
1323 1326 assert orderedout
1324 1327 assert roots
1325 1328 assert heads
1326 1329 return (orderedout, roots, heads)
1327 1330
1328 1331 def headrevs(self, revs=None):
1329 1332 if revs is None:
1330 1333 try:
1331 1334 return self.index.headrevs()
1332 1335 except AttributeError:
1333 1336 return self._headrevs()
1334 1337 if rustdagop is not None and self.index.rust_ext_compat:
1335 1338 return rustdagop.headrevs(self.index, revs)
1336 1339 return dagop.headrevs(revs, self._uncheckedparentrevs)
1337 1340
1338 1341 def computephases(self, roots):
1339 1342 return self.index.computephasesmapsets(roots)
1340 1343
1341 1344 def _headrevs(self):
1342 1345 count = len(self)
1343 1346 if not count:
1344 1347 return [nullrev]
1345 1348 # we won't iter over filtered rev so nobody is a head at start
1346 1349 ishead = [0] * (count + 1)
1347 1350 index = self.index
1348 1351 for r in self:
1349 1352 ishead[r] = 1 # I may be an head
1350 1353 e = index[r]
1351 1354 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1352 1355 return [r for r, val in enumerate(ishead) if val]
1353 1356
1354 1357 def heads(self, start=None, stop=None):
1355 1358 """return the list of all nodes that have no children
1356 1359
1357 1360 if start is specified, only heads that are descendants of
1358 1361 start will be returned
1359 1362 if stop is specified, it will consider all the revs from stop
1360 1363 as if they had no children
1361 1364 """
1362 1365 if start is None and stop is None:
1363 1366 if not len(self):
1364 1367 return [self.nullid]
1365 1368 return [self.node(r) for r in self.headrevs()]
1366 1369
1367 1370 if start is None:
1368 1371 start = nullrev
1369 1372 else:
1370 1373 start = self.rev(start)
1371 1374
1372 1375 stoprevs = {self.rev(n) for n in stop or []}
1373 1376
1374 1377 revs = dagop.headrevssubset(
1375 1378 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1376 1379 )
1377 1380
1378 1381 return [self.node(rev) for rev in revs]
1379 1382
1380 1383 def children(self, node):
1381 1384 """find the children of a given node"""
1382 1385 c = []
1383 1386 p = self.rev(node)
1384 1387 for r in self.revs(start=p + 1):
1385 1388 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1386 1389 if prevs:
1387 1390 for pr in prevs:
1388 1391 if pr == p:
1389 1392 c.append(self.node(r))
1390 1393 elif p == nullrev:
1391 1394 c.append(self.node(r))
1392 1395 return c
1393 1396
1394 1397 def commonancestorsheads(self, a, b):
1395 1398 """calculate all the heads of the common ancestors of nodes a and b"""
1396 1399 a, b = self.rev(a), self.rev(b)
1397 1400 ancs = self._commonancestorsheads(a, b)
1398 1401 return pycompat.maplist(self.node, ancs)
1399 1402
1400 1403 def _commonancestorsheads(self, *revs):
1401 1404 """calculate all the heads of the common ancestors of revs"""
1402 1405 try:
1403 1406 ancs = self.index.commonancestorsheads(*revs)
1404 1407 except (AttributeError, OverflowError): # C implementation failed
1405 1408 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1406 1409 return ancs
1407 1410
1408 1411 def isancestor(self, a, b):
1409 1412 """return True if node a is an ancestor of node b
1410 1413
1411 1414 A revision is considered an ancestor of itself."""
1412 1415 a, b = self.rev(a), self.rev(b)
1413 1416 return self.isancestorrev(a, b)
1414 1417
1415 1418 def isancestorrev(self, a, b):
1416 1419 """return True if revision a is an ancestor of revision b
1417 1420
1418 1421 A revision is considered an ancestor of itself.
1419 1422
1420 1423 The implementation of this is trivial but the use of
1421 1424 reachableroots is not."""
1422 1425 if a == nullrev:
1423 1426 return True
1424 1427 elif a == b:
1425 1428 return True
1426 1429 elif a > b:
1427 1430 return False
1428 1431 return bool(self.reachableroots(a, [b], [a], includepath=False))
1429 1432
1430 1433 def reachableroots(self, minroot, heads, roots, includepath=False):
1431 1434 """return (heads(::(<roots> and <roots>::<heads>)))
1432 1435
1433 1436 If includepath is True, return (<roots>::<heads>)."""
1434 1437 try:
1435 1438 return self.index.reachableroots2(
1436 1439 minroot, heads, roots, includepath
1437 1440 )
1438 1441 except AttributeError:
1439 1442 return dagop._reachablerootspure(
1440 1443 self.parentrevs, minroot, roots, heads, includepath
1441 1444 )
1442 1445
1443 1446 def ancestor(self, a, b):
1444 1447 """calculate the "best" common ancestor of nodes a and b"""
1445 1448
1446 1449 a, b = self.rev(a), self.rev(b)
1447 1450 try:
1448 1451 ancs = self.index.ancestors(a, b)
1449 1452 except (AttributeError, OverflowError):
1450 1453 ancs = ancestor.ancestors(self.parentrevs, a, b)
1451 1454 if ancs:
1452 1455 # choose a consistent winner when there's a tie
1453 1456 return min(map(self.node, ancs))
1454 1457 return self.nullid
1455 1458
1456 1459 def _match(self, id):
1457 1460 if isinstance(id, int):
1458 1461 # rev
1459 1462 return self.node(id)
1460 1463 if len(id) == self.nodeconstants.nodelen:
1461 1464 # possibly a binary node
1462 1465 # odds of a binary node being all hex in ASCII are 1 in 10**25
1463 1466 try:
1464 1467 node = id
1465 1468 self.rev(node) # quick search the index
1466 1469 return node
1467 1470 except error.LookupError:
1468 1471 pass # may be partial hex id
1469 1472 try:
1470 1473 # str(rev)
1471 1474 rev = int(id)
1472 1475 if b"%d" % rev != id:
1473 1476 raise ValueError
1474 1477 if rev < 0:
1475 1478 rev = len(self) + rev
1476 1479 if rev < 0 or rev >= len(self):
1477 1480 raise ValueError
1478 1481 return self.node(rev)
1479 1482 except (ValueError, OverflowError):
1480 1483 pass
1481 1484 if len(id) == 2 * self.nodeconstants.nodelen:
1482 1485 try:
1483 1486 # a full hex nodeid?
1484 1487 node = bin(id)
1485 1488 self.rev(node)
1486 1489 return node
1487 1490 except (TypeError, error.LookupError):
1488 1491 pass
1489 1492
1490 1493 def _partialmatch(self, id):
1491 1494 # we don't care wdirfilenodeids as they should be always full hash
1492 1495 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1493 1496 ambiguous = False
1494 1497 try:
1495 1498 partial = self.index.partialmatch(id)
1496 1499 if partial and self.hasnode(partial):
1497 1500 if maybewdir:
1498 1501 # single 'ff...' match in radix tree, ambiguous with wdir
1499 1502 ambiguous = True
1500 1503 else:
1501 1504 return partial
1502 1505 elif maybewdir:
1503 1506 # no 'ff...' match in radix tree, wdir identified
1504 1507 raise error.WdirUnsupported
1505 1508 else:
1506 1509 return None
1507 1510 except error.RevlogError:
1508 1511 # parsers.c radix tree lookup gave multiple matches
1509 1512 # fast path: for unfiltered changelog, radix tree is accurate
1510 1513 if not getattr(self, 'filteredrevs', None):
1511 1514 ambiguous = True
1512 1515 # fall through to slow path that filters hidden revisions
1513 1516 except (AttributeError, ValueError):
1514 1517 # we are pure python, or key was too short to search radix tree
1515 1518 pass
1516 1519 if ambiguous:
1517 1520 raise error.AmbiguousPrefixLookupError(
1518 1521 id, self.display_id, _(b'ambiguous identifier')
1519 1522 )
1520 1523
1521 1524 if id in self._pcache:
1522 1525 return self._pcache[id]
1523 1526
1524 1527 if len(id) <= 40:
1525 1528 try:
1526 1529 # hex(node)[:...]
1527 1530 l = len(id) // 2 # grab an even number of digits
1528 1531 prefix = bin(id[: l * 2])
1529 1532 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1530 1533 nl = [
1531 1534 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1532 1535 ]
1533 1536 if self.nodeconstants.nullhex.startswith(id):
1534 1537 nl.append(self.nullid)
1535 1538 if len(nl) > 0:
1536 1539 if len(nl) == 1 and not maybewdir:
1537 1540 self._pcache[id] = nl[0]
1538 1541 return nl[0]
1539 1542 raise error.AmbiguousPrefixLookupError(
1540 1543 id, self.display_id, _(b'ambiguous identifier')
1541 1544 )
1542 1545 if maybewdir:
1543 1546 raise error.WdirUnsupported
1544 1547 return None
1545 1548 except TypeError:
1546 1549 pass
1547 1550
1548 1551 def lookup(self, id):
1549 1552 """locate a node based on:
1550 1553 - revision number or str(revision number)
1551 1554 - nodeid or subset of hex nodeid
1552 1555 """
1553 1556 n = self._match(id)
1554 1557 if n is not None:
1555 1558 return n
1556 1559 n = self._partialmatch(id)
1557 1560 if n:
1558 1561 return n
1559 1562
1560 1563 raise error.LookupError(id, self.display_id, _(b'no match found'))
1561 1564
1562 1565 def shortest(self, node, minlength=1):
1563 1566 """Find the shortest unambiguous prefix that matches node."""
1564 1567
1565 1568 def isvalid(prefix):
1566 1569 try:
1567 1570 matchednode = self._partialmatch(prefix)
1568 1571 except error.AmbiguousPrefixLookupError:
1569 1572 return False
1570 1573 except error.WdirUnsupported:
1571 1574 # single 'ff...' match
1572 1575 return True
1573 1576 if matchednode is None:
1574 1577 raise error.LookupError(node, self.display_id, _(b'no node'))
1575 1578 return True
1576 1579
1577 1580 def maybewdir(prefix):
1578 1581 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1579 1582
1580 1583 hexnode = hex(node)
1581 1584
1582 1585 def disambiguate(hexnode, minlength):
1583 1586 """Disambiguate against wdirid."""
1584 1587 for length in range(minlength, len(hexnode) + 1):
1585 1588 prefix = hexnode[:length]
1586 1589 if not maybewdir(prefix):
1587 1590 return prefix
1588 1591
1589 1592 if not getattr(self, 'filteredrevs', None):
1590 1593 try:
1591 1594 length = max(self.index.shortest(node), minlength)
1592 1595 return disambiguate(hexnode, length)
1593 1596 except error.RevlogError:
1594 1597 if node != self.nodeconstants.wdirid:
1595 1598 raise error.LookupError(
1596 1599 node, self.display_id, _(b'no node')
1597 1600 )
1598 1601 except AttributeError:
1599 1602 # Fall through to pure code
1600 1603 pass
1601 1604
1602 1605 if node == self.nodeconstants.wdirid:
1603 1606 for length in range(minlength, len(hexnode) + 1):
1604 1607 prefix = hexnode[:length]
1605 1608 if isvalid(prefix):
1606 1609 return prefix
1607 1610
1608 1611 for length in range(minlength, len(hexnode) + 1):
1609 1612 prefix = hexnode[:length]
1610 1613 if isvalid(prefix):
1611 1614 return disambiguate(hexnode, length)
1612 1615
1613 1616 def cmp(self, node, text):
1614 1617 """compare text with a given file revision
1615 1618
1616 1619 returns True if text is different than what is stored.
1617 1620 """
1618 1621 p1, p2 = self.parents(node)
1619 1622 return storageutil.hashrevisionsha1(text, p1, p2) != node
1620 1623
1621 1624 def _getsegmentforrevs(self, startrev, endrev, df=None):
1622 1625 """Obtain a segment of raw data corresponding to a range of revisions.
1623 1626
1624 1627 Accepts the start and end revisions and an optional already-open
1625 1628 file handle to be used for reading. If the file handle is read, its
1626 1629 seek position will not be preserved.
1627 1630
1628 1631 Requests for data may be satisfied by a cache.
1629 1632
1630 1633 Returns a 2-tuple of (offset, data) for the requested range of
1631 1634 revisions. Offset is the integer offset from the beginning of the
1632 1635 revlog and data is a str or buffer of the raw byte data.
1633 1636
1634 1637 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1635 1638 to determine where each revision's data begins and ends.
1636 1639 """
1637 1640 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1638 1641 # (functions are expensive).
1639 1642 index = self.index
1640 1643 istart = index[startrev]
1641 1644 start = int(istart[0] >> 16)
1642 1645 if startrev == endrev:
1643 1646 end = start + istart[1]
1644 1647 else:
1645 1648 iend = index[endrev]
1646 1649 end = int(iend[0] >> 16) + iend[1]
1647 1650
1648 1651 if self._inline:
1649 1652 start += (startrev + 1) * self.index.entry_size
1650 1653 end += (endrev + 1) * self.index.entry_size
1651 1654 length = end - start
1652 1655
1653 1656 return start, self._segmentfile.read_chunk(start, length, df)
1654 1657
1655 1658 def _chunk(self, rev, df=None):
1656 1659 """Obtain a single decompressed chunk for a revision.
1657 1660
1658 1661 Accepts an integer revision and an optional already-open file handle
1659 1662 to be used for reading. If used, the seek position of the file will not
1660 1663 be preserved.
1661 1664
1662 1665 Returns a str holding uncompressed data for the requested revision.
1663 1666 """
1664 1667 compression_mode = self.index[rev][10]
1665 1668 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1666 1669 if compression_mode == COMP_MODE_PLAIN:
1667 1670 return data
1668 1671 elif compression_mode == COMP_MODE_DEFAULT:
1669 1672 return self._decompressor(data)
1670 1673 elif compression_mode == COMP_MODE_INLINE:
1671 1674 return self.decompress(data)
1672 1675 else:
1673 1676 msg = b'unknown compression mode %d'
1674 1677 msg %= compression_mode
1675 1678 raise error.RevlogError(msg)
1676 1679
1677 1680 def _chunks(self, revs, df=None, targetsize=None):
1678 1681 """Obtain decompressed chunks for the specified revisions.
1679 1682
1680 1683 Accepts an iterable of numeric revisions that are assumed to be in
1681 1684 ascending order. Also accepts an optional already-open file handle
1682 1685 to be used for reading. If used, the seek position of the file will
1683 1686 not be preserved.
1684 1687
1685 1688 This function is similar to calling ``self._chunk()`` multiple times,
1686 1689 but is faster.
1687 1690
1688 1691 Returns a list with decompressed data for each requested revision.
1689 1692 """
1690 1693 if not revs:
1691 1694 return []
1692 1695 start = self.start
1693 1696 length = self.length
1694 1697 inline = self._inline
1695 1698 iosize = self.index.entry_size
1696 1699 buffer = util.buffer
1697 1700
1698 1701 l = []
1699 1702 ladd = l.append
1700 1703
1701 1704 if not self._withsparseread:
1702 1705 slicedchunks = (revs,)
1703 1706 else:
1704 1707 slicedchunks = deltautil.slicechunk(
1705 1708 self, revs, targetsize=targetsize
1706 1709 )
1707 1710
1708 1711 for revschunk in slicedchunks:
1709 1712 firstrev = revschunk[0]
1710 1713 # Skip trailing revisions with empty diff
1711 1714 for lastrev in revschunk[::-1]:
1712 1715 if length(lastrev) != 0:
1713 1716 break
1714 1717
1715 1718 try:
1716 1719 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1717 1720 except OverflowError:
1718 1721 # issue4215 - we can't cache a run of chunks greater than
1719 1722 # 2G on Windows
1720 1723 return [self._chunk(rev, df=df) for rev in revschunk]
1721 1724
1722 1725 decomp = self.decompress
1723 1726 # self._decompressor might be None, but will not be used in that case
1724 1727 def_decomp = self._decompressor
1725 1728 for rev in revschunk:
1726 1729 chunkstart = start(rev)
1727 1730 if inline:
1728 1731 chunkstart += (rev + 1) * iosize
1729 1732 chunklength = length(rev)
1730 1733 comp_mode = self.index[rev][10]
1731 1734 c = buffer(data, chunkstart - offset, chunklength)
1732 1735 if comp_mode == COMP_MODE_PLAIN:
1733 1736 ladd(c)
1734 1737 elif comp_mode == COMP_MODE_INLINE:
1735 1738 ladd(decomp(c))
1736 1739 elif comp_mode == COMP_MODE_DEFAULT:
1737 1740 ladd(def_decomp(c))
1738 1741 else:
1739 1742 msg = b'unknown compression mode %d'
1740 1743 msg %= comp_mode
1741 1744 raise error.RevlogError(msg)
1742 1745
1743 1746 return l
1744 1747
1745 1748 def deltaparent(self, rev):
1746 1749 """return deltaparent of the given revision"""
1747 1750 base = self.index[rev][3]
1748 1751 if base == rev:
1749 1752 return nullrev
1750 1753 elif self._generaldelta:
1751 1754 return base
1752 1755 else:
1753 1756 return rev - 1
1754 1757
1755 1758 def issnapshot(self, rev):
1756 1759 """tells whether rev is a snapshot"""
1757 1760 if not self._sparserevlog:
1758 1761 return self.deltaparent(rev) == nullrev
1759 1762 elif util.safehasattr(self.index, b'issnapshot'):
1760 1763 # directly assign the method to cache the testing and access
1761 1764 self.issnapshot = self.index.issnapshot
1762 1765 return self.issnapshot(rev)
1763 1766 if rev == nullrev:
1764 1767 return True
1765 1768 entry = self.index[rev]
1766 1769 base = entry[3]
1767 1770 if base == rev:
1768 1771 return True
1769 1772 if base == nullrev:
1770 1773 return True
1771 1774 p1 = entry[5]
1772 1775 p2 = entry[6]
1773 1776 if base == p1 or base == p2:
1774 1777 return False
1775 1778 return self.issnapshot(base)
1776 1779
1777 1780 def snapshotdepth(self, rev):
1778 1781 """number of snapshot in the chain before this one"""
1779 1782 if not self.issnapshot(rev):
1780 1783 raise error.ProgrammingError(b'revision %d not a snapshot')
1781 1784 return len(self._deltachain(rev)[0]) - 1
1782 1785
1783 1786 def revdiff(self, rev1, rev2):
1784 1787 """return or calculate a delta between two revisions
1785 1788
1786 1789 The delta calculated is in binary form and is intended to be written to
1787 1790 revlog data directly. So this function needs raw revision data.
1788 1791 """
1789 1792 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1790 1793 return bytes(self._chunk(rev2))
1791 1794
1792 1795 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1793 1796
1794 1797 def revision(self, nodeorrev, _df=None):
1795 1798 """return an uncompressed revision of a given node or revision
1796 1799 number.
1797 1800
1798 1801 _df - an existing file handle to read from. (internal-only)
1799 1802 """
1800 1803 return self._revisiondata(nodeorrev, _df)
1801 1804
1802 1805 def sidedata(self, nodeorrev, _df=None):
1803 1806 """a map of extra data related to the changeset but not part of the hash
1804 1807
1805 1808 This function currently return a dictionary. However, more advanced
1806 1809 mapping object will likely be used in the future for a more
1807 1810 efficient/lazy code.
1808 1811 """
1809 1812 # deal with <nodeorrev> argument type
1810 1813 if isinstance(nodeorrev, int):
1811 1814 rev = nodeorrev
1812 1815 else:
1813 1816 rev = self.rev(nodeorrev)
1814 1817 return self._sidedata(rev)
1815 1818
1816 1819 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1817 1820 # deal with <nodeorrev> argument type
1818 1821 if isinstance(nodeorrev, int):
1819 1822 rev = nodeorrev
1820 1823 node = self.node(rev)
1821 1824 else:
1822 1825 node = nodeorrev
1823 1826 rev = None
1824 1827
1825 1828 # fast path the special `nullid` rev
1826 1829 if node == self.nullid:
1827 1830 return b""
1828 1831
1829 1832 # ``rawtext`` is the text as stored inside the revlog. Might be the
1830 1833 # revision or might need to be processed to retrieve the revision.
1831 1834 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1832 1835
1833 1836 if raw and validated:
1834 1837 # if we don't want to process the raw text and that raw
1835 1838 # text is cached, we can exit early.
1836 1839 return rawtext
1837 1840 if rev is None:
1838 1841 rev = self.rev(node)
1839 1842 # the revlog's flag for this revision
1840 1843 # (usually alter its state or content)
1841 1844 flags = self.flags(rev)
1842 1845
1843 1846 if validated and flags == REVIDX_DEFAULT_FLAGS:
1844 1847 # no extra flags set, no flag processor runs, text = rawtext
1845 1848 return rawtext
1846 1849
1847 1850 if raw:
1848 1851 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1849 1852 text = rawtext
1850 1853 else:
1851 1854 r = flagutil.processflagsread(self, rawtext, flags)
1852 1855 text, validatehash = r
1853 1856 if validatehash:
1854 1857 self.checkhash(text, node, rev=rev)
1855 1858 if not validated:
1856 1859 self._revisioncache = (node, rev, rawtext)
1857 1860
1858 1861 return text
1859 1862
1860 1863 def _rawtext(self, node, rev, _df=None):
1861 1864 """return the possibly unvalidated rawtext for a revision
1862 1865
1863 1866 returns (rev, rawtext, validated)
1864 1867 """
1865 1868
1866 1869 # revision in the cache (could be useful to apply delta)
1867 1870 cachedrev = None
1868 1871 # An intermediate text to apply deltas to
1869 1872 basetext = None
1870 1873
1871 1874 # Check if we have the entry in cache
1872 1875 # The cache entry looks like (node, rev, rawtext)
1873 1876 if self._revisioncache:
1874 1877 if self._revisioncache[0] == node:
1875 1878 return (rev, self._revisioncache[2], True)
1876 1879 cachedrev = self._revisioncache[1]
1877 1880
1878 1881 if rev is None:
1879 1882 rev = self.rev(node)
1880 1883
1881 1884 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1882 1885 if stopped:
1883 1886 basetext = self._revisioncache[2]
1884 1887
1885 1888 # drop cache to save memory, the caller is expected to
1886 1889 # update self._revisioncache after validating the text
1887 1890 self._revisioncache = None
1888 1891
1889 1892 targetsize = None
1890 1893 rawsize = self.index[rev][2]
1891 1894 if 0 <= rawsize:
1892 1895 targetsize = 4 * rawsize
1893 1896
1894 1897 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1895 1898 if basetext is None:
1896 1899 basetext = bytes(bins[0])
1897 1900 bins = bins[1:]
1898 1901
1899 1902 rawtext = mdiff.patches(basetext, bins)
1900 1903 del basetext # let us have a chance to free memory early
1901 1904 return (rev, rawtext, False)
1902 1905
1903 1906 def _sidedata(self, rev):
1904 1907 """Return the sidedata for a given revision number."""
1905 1908 index_entry = self.index[rev]
1906 1909 sidedata_offset = index_entry[8]
1907 1910 sidedata_size = index_entry[9]
1908 1911
1909 1912 if self._inline:
1910 1913 sidedata_offset += self.index.entry_size * (1 + rev)
1911 1914 if sidedata_size == 0:
1912 1915 return {}
1913 1916
1914 1917 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1915 1918 filename = self._sidedatafile
1916 1919 end = self._docket.sidedata_end
1917 1920 offset = sidedata_offset
1918 1921 length = sidedata_size
1919 1922 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1920 1923 raise error.RevlogError(m)
1921 1924
1922 1925 comp_segment = self._segmentfile_sidedata.read_chunk(
1923 1926 sidedata_offset, sidedata_size
1924 1927 )
1925 1928
1926 1929 comp = self.index[rev][11]
1927 1930 if comp == COMP_MODE_PLAIN:
1928 1931 segment = comp_segment
1929 1932 elif comp == COMP_MODE_DEFAULT:
1930 1933 segment = self._decompressor(comp_segment)
1931 1934 elif comp == COMP_MODE_INLINE:
1932 1935 segment = self.decompress(comp_segment)
1933 1936 else:
1934 1937 msg = b'unknown compression mode %d'
1935 1938 msg %= comp
1936 1939 raise error.RevlogError(msg)
1937 1940
1938 1941 sidedata = sidedatautil.deserialize_sidedata(segment)
1939 1942 return sidedata
1940 1943
1941 1944 def rawdata(self, nodeorrev, _df=None):
1942 1945 """return an uncompressed raw data of a given node or revision number.
1943 1946
1944 1947 _df - an existing file handle to read from. (internal-only)
1945 1948 """
1946 1949 return self._revisiondata(nodeorrev, _df, raw=True)
1947 1950
1948 1951 def hash(self, text, p1, p2):
1949 1952 """Compute a node hash.
1950 1953
1951 1954 Available as a function so that subclasses can replace the hash
1952 1955 as needed.
1953 1956 """
1954 1957 return storageutil.hashrevisionsha1(text, p1, p2)
1955 1958
1956 1959 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1957 1960 """Check node hash integrity.
1958 1961
1959 1962 Available as a function so that subclasses can extend hash mismatch
1960 1963 behaviors as needed.
1961 1964 """
1962 1965 try:
1963 1966 if p1 is None and p2 is None:
1964 1967 p1, p2 = self.parents(node)
1965 1968 if node != self.hash(text, p1, p2):
1966 1969 # Clear the revision cache on hash failure. The revision cache
1967 1970 # only stores the raw revision and clearing the cache does have
1968 1971 # the side-effect that we won't have a cache hit when the raw
1969 1972 # revision data is accessed. But this case should be rare and
1970 1973 # it is extra work to teach the cache about the hash
1971 1974 # verification state.
1972 1975 if self._revisioncache and self._revisioncache[0] == node:
1973 1976 self._revisioncache = None
1974 1977
1975 1978 revornode = rev
1976 1979 if revornode is None:
1977 1980 revornode = templatefilters.short(hex(node))
1978 1981 raise error.RevlogError(
1979 1982 _(b"integrity check failed on %s:%s")
1980 1983 % (self.display_id, pycompat.bytestr(revornode))
1981 1984 )
1982 1985 except error.RevlogError:
1983 1986 if self._censorable and storageutil.iscensoredtext(text):
1984 1987 raise error.CensoredNodeError(self.display_id, node, text)
1985 1988 raise
1986 1989
1987 1990 def _enforceinlinesize(self, tr):
1988 1991 """Check if the revlog is too big for inline and convert if so.
1989 1992
1990 1993 This should be called after revisions are added to the revlog. If the
1991 1994 revlog has grown too large to be an inline revlog, it will convert it
1992 1995 to use multiple index and data files.
1993 1996 """
1994 1997 tiprev = len(self) - 1
1995 1998 total_size = self.start(tiprev) + self.length(tiprev)
1996 1999 if not self._inline or total_size < _maxinline:
1997 2000 return
1998 2001
1999 2002 troffset = tr.findoffset(self._indexfile)
2000 2003 if troffset is None:
2001 2004 raise error.RevlogError(
2002 2005 _(b"%s not found in the transaction") % self._indexfile
2003 2006 )
2004 2007 trindex = None
2005 2008 tr.add(self._datafile, 0)
2006 2009
2007 2010 existing_handles = False
2008 2011 if self._writinghandles is not None:
2009 2012 existing_handles = True
2010 2013 fp = self._writinghandles[0]
2011 2014 fp.flush()
2012 2015 fp.close()
2013 2016 # We can't use the cached file handle after close(). So prevent
2014 2017 # its usage.
2015 2018 self._writinghandles = None
2016 2019 self._segmentfile.writing_handle = None
2017 2020 # No need to deal with sidedata writing handle as it is only
2018 2021 # relevant with revlog-v2 which is never inline, not reaching
2019 2022 # this code
2020 2023
2021 2024 new_dfh = self._datafp(b'w+')
2022 2025 new_dfh.truncate(0) # drop any potentially existing data
2023 2026 try:
2024 2027 with self._indexfp() as read_ifh:
2025 2028 for r in self:
2026 2029 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2027 2030 if (
2028 2031 trindex is None
2029 2032 and troffset
2030 2033 <= self.start(r) + r * self.index.entry_size
2031 2034 ):
2032 2035 trindex = r
2033 2036 new_dfh.flush()
2034 2037
2035 2038 if trindex is None:
2036 2039 trindex = 0
2037 2040
2038 2041 with self.__index_new_fp() as fp:
2039 2042 self._format_flags &= ~FLAG_INLINE_DATA
2040 2043 self._inline = False
2041 2044 for i in self:
2042 2045 e = self.index.entry_binary(i)
2043 2046 if i == 0 and self._docket is None:
2044 2047 header = self._format_flags | self._format_version
2045 2048 header = self.index.pack_header(header)
2046 2049 e = header + e
2047 2050 fp.write(e)
2048 2051 if self._docket is not None:
2049 2052 self._docket.index_end = fp.tell()
2050 2053
2051 2054 # There is a small transactional race here. If the rename of
2052 2055 # the index fails, we should remove the datafile. It is more
2053 2056 # important to ensure that the data file is not truncated
2054 2057 # when the index is replaced as otherwise data is lost.
2055 2058 tr.replace(self._datafile, self.start(trindex))
2056 2059
2057 2060 # the temp file replace the real index when we exit the context
2058 2061 # manager
2059 2062
2060 2063 tr.replace(self._indexfile, trindex * self.index.entry_size)
2061 2064 nodemaputil.setup_persistent_nodemap(tr, self)
2062 2065 self._segmentfile = randomaccessfile.randomaccessfile(
2063 2066 self.opener,
2064 2067 self._datafile,
2065 2068 self._chunkcachesize,
2066 2069 )
2067 2070
2068 2071 if existing_handles:
2069 2072 # switched from inline to conventional reopen the index
2070 2073 ifh = self.__index_write_fp()
2071 2074 self._writinghandles = (ifh, new_dfh, None)
2072 2075 self._segmentfile.writing_handle = new_dfh
2073 2076 new_dfh = None
2074 2077 # No need to deal with sidedata writing handle as it is only
2075 2078 # relevant with revlog-v2 which is never inline, not reaching
2076 2079 # this code
2077 2080 finally:
2078 2081 if new_dfh is not None:
2079 2082 new_dfh.close()
2080 2083
2081 2084 def _nodeduplicatecallback(self, transaction, node):
2082 2085 """called when trying to add a node already stored."""
2083 2086
2084 2087 @contextlib.contextmanager
2085 2088 def reading(self):
2086 2089 """Context manager that keeps data and sidedata files open for reading"""
2087 2090 with self._segmentfile.reading():
2088 2091 with self._segmentfile_sidedata.reading():
2089 2092 yield
2090 2093
2091 2094 @contextlib.contextmanager
2092 2095 def _writing(self, transaction):
2093 2096 if self._trypending:
2094 2097 msg = b'try to write in a `trypending` revlog: %s'
2095 2098 msg %= self.display_id
2096 2099 raise error.ProgrammingError(msg)
2097 2100 if self._writinghandles is not None:
2098 2101 yield
2099 2102 else:
2100 2103 ifh = dfh = sdfh = None
2101 2104 try:
2102 2105 r = len(self)
2103 2106 # opening the data file.
2104 2107 dsize = 0
2105 2108 if r:
2106 2109 dsize = self.end(r - 1)
2107 2110 dfh = None
2108 2111 if not self._inline:
2109 2112 try:
2110 2113 dfh = self._datafp(b"r+")
2111 2114 if self._docket is None:
2112 2115 dfh.seek(0, os.SEEK_END)
2113 2116 else:
2114 2117 dfh.seek(self._docket.data_end, os.SEEK_SET)
2115 2118 except IOError as inst:
2116 2119 if inst.errno != errno.ENOENT:
2117 2120 raise
2118 2121 dfh = self._datafp(b"w+")
2119 2122 transaction.add(self._datafile, dsize)
2120 2123 if self._sidedatafile is not None:
2121 2124 # revlog-v2 does not inline, help Pytype
2122 2125 assert dfh is not None
2123 2126 try:
2124 2127 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2125 2128 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2126 2129 except IOError as inst:
2127 2130 if inst.errno != errno.ENOENT:
2128 2131 raise
2129 2132 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2130 2133 transaction.add(
2131 2134 self._sidedatafile, self._docket.sidedata_end
2132 2135 )
2133 2136
2134 2137 # opening the index file.
2135 2138 isize = r * self.index.entry_size
2136 2139 ifh = self.__index_write_fp()
2137 2140 if self._inline:
2138 2141 transaction.add(self._indexfile, dsize + isize)
2139 2142 else:
2140 2143 transaction.add(self._indexfile, isize)
2141 2144 # exposing all file handle for writing.
2142 2145 self._writinghandles = (ifh, dfh, sdfh)
2143 2146 self._segmentfile.writing_handle = ifh if self._inline else dfh
2144 2147 self._segmentfile_sidedata.writing_handle = sdfh
2145 2148 yield
2146 2149 if self._docket is not None:
2147 2150 self._write_docket(transaction)
2148 2151 finally:
2149 2152 self._writinghandles = None
2150 2153 self._segmentfile.writing_handle = None
2151 2154 self._segmentfile_sidedata.writing_handle = None
2152 2155 if dfh is not None:
2153 2156 dfh.close()
2154 2157 if sdfh is not None:
2155 2158 sdfh.close()
2156 2159 # closing the index file last to avoid exposing referent to
2157 2160 # potential unflushed data content.
2158 2161 if ifh is not None:
2159 2162 ifh.close()
2160 2163
2161 2164 def _write_docket(self, transaction):
2162 2165 """write the current docket on disk
2163 2166
2164 2167 Exist as a method to help changelog to implement transaction logic
2165 2168
2166 2169 We could also imagine using the same transaction logic for all revlog
2167 2170 since docket are cheap."""
2168 2171 self._docket.write(transaction)
2169 2172
2170 2173 def addrevision(
2171 2174 self,
2172 2175 text,
2173 2176 transaction,
2174 2177 link,
2175 2178 p1,
2176 2179 p2,
2177 2180 cachedelta=None,
2178 2181 node=None,
2179 2182 flags=REVIDX_DEFAULT_FLAGS,
2180 2183 deltacomputer=None,
2181 2184 sidedata=None,
2182 2185 ):
2183 2186 """add a revision to the log
2184 2187
2185 2188 text - the revision data to add
2186 2189 transaction - the transaction object used for rollback
2187 2190 link - the linkrev data to add
2188 2191 p1, p2 - the parent nodeids of the revision
2189 2192 cachedelta - an optional precomputed delta
2190 2193 node - nodeid of revision; typically node is not specified, and it is
2191 2194 computed by default as hash(text, p1, p2), however subclasses might
2192 2195 use different hashing method (and override checkhash() in such case)
2193 2196 flags - the known flags to set on the revision
2194 2197 deltacomputer - an optional deltacomputer instance shared between
2195 2198 multiple calls
2196 2199 """
2197 2200 if link == nullrev:
2198 2201 raise error.RevlogError(
2199 2202 _(b"attempted to add linkrev -1 to %s") % self.display_id
2200 2203 )
2201 2204
2202 2205 if sidedata is None:
2203 2206 sidedata = {}
2204 2207 elif sidedata and not self.hassidedata:
2205 2208 raise error.ProgrammingError(
2206 2209 _(b"trying to add sidedata to a revlog who don't support them")
2207 2210 )
2208 2211
2209 2212 if flags:
2210 2213 node = node or self.hash(text, p1, p2)
2211 2214
2212 2215 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2213 2216
2214 2217 # If the flag processor modifies the revision data, ignore any provided
2215 2218 # cachedelta.
2216 2219 if rawtext != text:
2217 2220 cachedelta = None
2218 2221
2219 2222 if len(rawtext) > _maxentrysize:
2220 2223 raise error.RevlogError(
2221 2224 _(
2222 2225 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2223 2226 )
2224 2227 % (self.display_id, len(rawtext))
2225 2228 )
2226 2229
2227 2230 node = node or self.hash(rawtext, p1, p2)
2228 2231 rev = self.index.get_rev(node)
2229 2232 if rev is not None:
2230 2233 return rev
2231 2234
2232 2235 if validatehash:
2233 2236 self.checkhash(rawtext, node, p1=p1, p2=p2)
2234 2237
2235 2238 return self.addrawrevision(
2236 2239 rawtext,
2237 2240 transaction,
2238 2241 link,
2239 2242 p1,
2240 2243 p2,
2241 2244 node,
2242 2245 flags,
2243 2246 cachedelta=cachedelta,
2244 2247 deltacomputer=deltacomputer,
2245 2248 sidedata=sidedata,
2246 2249 )
2247 2250
2248 2251 def addrawrevision(
2249 2252 self,
2250 2253 rawtext,
2251 2254 transaction,
2252 2255 link,
2253 2256 p1,
2254 2257 p2,
2255 2258 node,
2256 2259 flags,
2257 2260 cachedelta=None,
2258 2261 deltacomputer=None,
2259 2262 sidedata=None,
2260 2263 ):
2261 2264 """add a raw revision with known flags, node and parents
2262 2265 useful when reusing a revision not stored in this revlog (ex: received
2263 2266 over wire, or read from an external bundle).
2264 2267 """
2265 2268 with self._writing(transaction):
2266 2269 return self._addrevision(
2267 2270 node,
2268 2271 rawtext,
2269 2272 transaction,
2270 2273 link,
2271 2274 p1,
2272 2275 p2,
2273 2276 flags,
2274 2277 cachedelta,
2275 2278 deltacomputer=deltacomputer,
2276 2279 sidedata=sidedata,
2277 2280 )
2278 2281
2279 2282 def compress(self, data):
2280 2283 """Generate a possibly-compressed representation of data."""
2281 2284 if not data:
2282 2285 return b'', data
2283 2286
2284 2287 compressed = self._compressor.compress(data)
2285 2288
2286 2289 if compressed:
2287 2290 # The revlog compressor added the header in the returned data.
2288 2291 return b'', compressed
2289 2292
2290 2293 if data[0:1] == b'\0':
2291 2294 return b'', data
2292 2295 return b'u', data
2293 2296
2294 2297 def decompress(self, data):
2295 2298 """Decompress a revlog chunk.
2296 2299
2297 2300 The chunk is expected to begin with a header identifying the
2298 2301 format type so it can be routed to an appropriate decompressor.
2299 2302 """
2300 2303 if not data:
2301 2304 return data
2302 2305
2303 2306 # Revlogs are read much more frequently than they are written and many
2304 2307 # chunks only take microseconds to decompress, so performance is
2305 2308 # important here.
2306 2309 #
2307 2310 # We can make a few assumptions about revlogs:
2308 2311 #
2309 2312 # 1) the majority of chunks will be compressed (as opposed to inline
2310 2313 # raw data).
2311 2314 # 2) decompressing *any* data will likely by at least 10x slower than
2312 2315 # returning raw inline data.
2313 2316 # 3) we want to prioritize common and officially supported compression
2314 2317 # engines
2315 2318 #
2316 2319 # It follows that we want to optimize for "decompress compressed data
2317 2320 # when encoded with common and officially supported compression engines"
2318 2321 # case over "raw data" and "data encoded by less common or non-official
2319 2322 # compression engines." That is why we have the inline lookup first
2320 2323 # followed by the compengines lookup.
2321 2324 #
2322 2325 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2323 2326 # compressed chunks. And this matters for changelog and manifest reads.
2324 2327 t = data[0:1]
2325 2328
2326 2329 if t == b'x':
2327 2330 try:
2328 2331 return _zlibdecompress(data)
2329 2332 except zlib.error as e:
2330 2333 raise error.RevlogError(
2331 2334 _(b'revlog decompress error: %s')
2332 2335 % stringutil.forcebytestr(e)
2333 2336 )
2334 2337 # '\0' is more common than 'u' so it goes first.
2335 2338 elif t == b'\0':
2336 2339 return data
2337 2340 elif t == b'u':
2338 2341 return util.buffer(data, 1)
2339 2342
2340 2343 compressor = self._get_decompressor(t)
2341 2344
2342 2345 return compressor.decompress(data)
2343 2346
2344 2347 def _addrevision(
2345 2348 self,
2346 2349 node,
2347 2350 rawtext,
2348 2351 transaction,
2349 2352 link,
2350 2353 p1,
2351 2354 p2,
2352 2355 flags,
2353 2356 cachedelta,
2354 2357 alwayscache=False,
2355 2358 deltacomputer=None,
2356 2359 sidedata=None,
2357 2360 ):
2358 2361 """internal function to add revisions to the log
2359 2362
2360 2363 see addrevision for argument descriptions.
2361 2364
2362 2365 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2363 2366
2364 2367 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2365 2368 be used.
2366 2369
2367 2370 invariants:
2368 2371 - rawtext is optional (can be None); if not set, cachedelta must be set.
2369 2372 if both are set, they must correspond to each other.
2370 2373 """
2371 2374 if node == self.nullid:
2372 2375 raise error.RevlogError(
2373 2376 _(b"%s: attempt to add null revision") % self.display_id
2374 2377 )
2375 2378 if (
2376 2379 node == self.nodeconstants.wdirid
2377 2380 or node in self.nodeconstants.wdirfilenodeids
2378 2381 ):
2379 2382 raise error.RevlogError(
2380 2383 _(b"%s: attempt to add wdir revision") % self.display_id
2381 2384 )
2382 2385 if self._writinghandles is None:
2383 2386 msg = b'adding revision outside `revlog._writing` context'
2384 2387 raise error.ProgrammingError(msg)
2385 2388
2386 2389 if self._inline:
2387 2390 fh = self._writinghandles[0]
2388 2391 else:
2389 2392 fh = self._writinghandles[1]
2390 2393
2391 2394 btext = [rawtext]
2392 2395
2393 2396 curr = len(self)
2394 2397 prev = curr - 1
2395 2398
2396 2399 offset = self._get_data_offset(prev)
2397 2400
2398 2401 if self._concurrencychecker:
2399 2402 ifh, dfh, sdfh = self._writinghandles
2400 2403 # XXX no checking for the sidedata file
2401 2404 if self._inline:
2402 2405 # offset is "as if" it were in the .d file, so we need to add on
2403 2406 # the size of the entry metadata.
2404 2407 self._concurrencychecker(
2405 2408 ifh, self._indexfile, offset + curr * self.index.entry_size
2406 2409 )
2407 2410 else:
2408 2411 # Entries in the .i are a consistent size.
2409 2412 self._concurrencychecker(
2410 2413 ifh, self._indexfile, curr * self.index.entry_size
2411 2414 )
2412 2415 self._concurrencychecker(dfh, self._datafile, offset)
2413 2416
2414 2417 p1r, p2r = self.rev(p1), self.rev(p2)
2415 2418
2416 2419 # full versions are inserted when the needed deltas
2417 2420 # become comparable to the uncompressed text
2418 2421 if rawtext is None:
2419 2422 # need rawtext size, before changed by flag processors, which is
2420 2423 # the non-raw size. use revlog explicitly to avoid filelog's extra
2421 2424 # logic that might remove metadata size.
2422 2425 textlen = mdiff.patchedsize(
2423 2426 revlog.size(self, cachedelta[0]), cachedelta[1]
2424 2427 )
2425 2428 else:
2426 2429 textlen = len(rawtext)
2427 2430
2428 2431 if deltacomputer is None:
2429 deltacomputer = deltautil.deltacomputer(self)
2432 write_debug = None
2433 if self._debug_delta:
2434 write_debug = transaction._report
2435 deltacomputer = deltautil.deltacomputer(
2436 self, write_debug=write_debug
2437 )
2430 2438
2431 2439 revinfo = revlogutils.revisioninfo(
2432 2440 node,
2433 2441 p1,
2434 2442 p2,
2435 2443 btext,
2436 2444 textlen,
2437 2445 cachedelta,
2438 2446 flags,
2439 2447 )
2440 2448
2441 2449 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2442 2450
2443 2451 compression_mode = COMP_MODE_INLINE
2444 2452 if self._docket is not None:
2445 2453 default_comp = self._docket.default_compression_header
2446 2454 r = deltautil.delta_compression(default_comp, deltainfo)
2447 2455 compression_mode, deltainfo = r
2448 2456
2449 2457 sidedata_compression_mode = COMP_MODE_INLINE
2450 2458 if sidedata and self.hassidedata:
2451 2459 sidedata_compression_mode = COMP_MODE_PLAIN
2452 2460 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2453 2461 sidedata_offset = self._docket.sidedata_end
2454 2462 h, comp_sidedata = self.compress(serialized_sidedata)
2455 2463 if (
2456 2464 h != b'u'
2457 2465 and comp_sidedata[0:1] != b'\0'
2458 2466 and len(comp_sidedata) < len(serialized_sidedata)
2459 2467 ):
2460 2468 assert not h
2461 2469 if (
2462 2470 comp_sidedata[0:1]
2463 2471 == self._docket.default_compression_header
2464 2472 ):
2465 2473 sidedata_compression_mode = COMP_MODE_DEFAULT
2466 2474 serialized_sidedata = comp_sidedata
2467 2475 else:
2468 2476 sidedata_compression_mode = COMP_MODE_INLINE
2469 2477 serialized_sidedata = comp_sidedata
2470 2478 else:
2471 2479 serialized_sidedata = b""
2472 2480 # Don't store the offset if the sidedata is empty, that way
2473 2481 # we can easily detect empty sidedata and they will be no different
2474 2482 # than ones we manually add.
2475 2483 sidedata_offset = 0
2476 2484
2477 2485 rank = RANK_UNKNOWN
2478 2486 if self._format_version == CHANGELOGV2:
2479 2487 if (p1r, p2r) == (nullrev, nullrev):
2480 2488 rank = 1
2481 2489 elif p1r != nullrev and p2r == nullrev:
2482 2490 rank = 1 + self.fast_rank(p1r)
2483 2491 elif p1r == nullrev and p2r != nullrev:
2484 2492 rank = 1 + self.fast_rank(p2r)
2485 2493 else: # merge node
2486 2494 if rustdagop is not None and self.index.rust_ext_compat:
2487 2495 rank = rustdagop.rank(self.index, p1r, p2r)
2488 2496 else:
2489 2497 pmin, pmax = sorted((p1r, p2r))
2490 2498 rank = 1 + self.fast_rank(pmax)
2491 2499 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2492 2500
2493 2501 e = revlogutils.entry(
2494 2502 flags=flags,
2495 2503 data_offset=offset,
2496 2504 data_compressed_length=deltainfo.deltalen,
2497 2505 data_uncompressed_length=textlen,
2498 2506 data_compression_mode=compression_mode,
2499 2507 data_delta_base=deltainfo.base,
2500 2508 link_rev=link,
2501 2509 parent_rev_1=p1r,
2502 2510 parent_rev_2=p2r,
2503 2511 node_id=node,
2504 2512 sidedata_offset=sidedata_offset,
2505 2513 sidedata_compressed_length=len(serialized_sidedata),
2506 2514 sidedata_compression_mode=sidedata_compression_mode,
2507 2515 rank=rank,
2508 2516 )
2509 2517
2510 2518 self.index.append(e)
2511 2519 entry = self.index.entry_binary(curr)
2512 2520 if curr == 0 and self._docket is None:
2513 2521 header = self._format_flags | self._format_version
2514 2522 header = self.index.pack_header(header)
2515 2523 entry = header + entry
2516 2524 self._writeentry(
2517 2525 transaction,
2518 2526 entry,
2519 2527 deltainfo.data,
2520 2528 link,
2521 2529 offset,
2522 2530 serialized_sidedata,
2523 2531 sidedata_offset,
2524 2532 )
2525 2533
2526 2534 rawtext = btext[0]
2527 2535
2528 2536 if alwayscache and rawtext is None:
2529 2537 rawtext = deltacomputer.buildtext(revinfo, fh)
2530 2538
2531 2539 if type(rawtext) == bytes: # only accept immutable objects
2532 2540 self._revisioncache = (node, curr, rawtext)
2533 2541 self._chainbasecache[curr] = deltainfo.chainbase
2534 2542 return curr
2535 2543
2536 2544 def _get_data_offset(self, prev):
2537 2545 """Returns the current offset in the (in-transaction) data file.
2538 2546 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2539 2547 file to store that information: since sidedata can be rewritten to the
2540 2548 end of the data file within a transaction, you can have cases where, for
2541 2549 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2542 2550 to `n - 1`'s sidedata being written after `n`'s data.
2543 2551
2544 2552 TODO cache this in a docket file before getting out of experimental."""
2545 2553 if self._docket is None:
2546 2554 return self.end(prev)
2547 2555 else:
2548 2556 return self._docket.data_end
2549 2557
2550 2558 def _writeentry(
2551 2559 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2552 2560 ):
2553 2561 # Files opened in a+ mode have inconsistent behavior on various
2554 2562 # platforms. Windows requires that a file positioning call be made
2555 2563 # when the file handle transitions between reads and writes. See
2556 2564 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2557 2565 # platforms, Python or the platform itself can be buggy. Some versions
2558 2566 # of Solaris have been observed to not append at the end of the file
2559 2567 # if the file was seeked to before the end. See issue4943 for more.
2560 2568 #
2561 2569 # We work around this issue by inserting a seek() before writing.
2562 2570 # Note: This is likely not necessary on Python 3. However, because
2563 2571 # the file handle is reused for reads and may be seeked there, we need
2564 2572 # to be careful before changing this.
2565 2573 if self._writinghandles is None:
2566 2574 msg = b'adding revision outside `revlog._writing` context'
2567 2575 raise error.ProgrammingError(msg)
2568 2576 ifh, dfh, sdfh = self._writinghandles
2569 2577 if self._docket is None:
2570 2578 ifh.seek(0, os.SEEK_END)
2571 2579 else:
2572 2580 ifh.seek(self._docket.index_end, os.SEEK_SET)
2573 2581 if dfh:
2574 2582 if self._docket is None:
2575 2583 dfh.seek(0, os.SEEK_END)
2576 2584 else:
2577 2585 dfh.seek(self._docket.data_end, os.SEEK_SET)
2578 2586 if sdfh:
2579 2587 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2580 2588
2581 2589 curr = len(self) - 1
2582 2590 if not self._inline:
2583 2591 transaction.add(self._datafile, offset)
2584 2592 if self._sidedatafile:
2585 2593 transaction.add(self._sidedatafile, sidedata_offset)
2586 2594 transaction.add(self._indexfile, curr * len(entry))
2587 2595 if data[0]:
2588 2596 dfh.write(data[0])
2589 2597 dfh.write(data[1])
2590 2598 if sidedata:
2591 2599 sdfh.write(sidedata)
2592 2600 ifh.write(entry)
2593 2601 else:
2594 2602 offset += curr * self.index.entry_size
2595 2603 transaction.add(self._indexfile, offset)
2596 2604 ifh.write(entry)
2597 2605 ifh.write(data[0])
2598 2606 ifh.write(data[1])
2599 2607 assert not sidedata
2600 2608 self._enforceinlinesize(transaction)
2601 2609 if self._docket is not None:
2602 2610 # revlog-v2 always has 3 writing handles, help Pytype
2603 2611 wh1 = self._writinghandles[0]
2604 2612 wh2 = self._writinghandles[1]
2605 2613 wh3 = self._writinghandles[2]
2606 2614 assert wh1 is not None
2607 2615 assert wh2 is not None
2608 2616 assert wh3 is not None
2609 2617 self._docket.index_end = wh1.tell()
2610 2618 self._docket.data_end = wh2.tell()
2611 2619 self._docket.sidedata_end = wh3.tell()
2612 2620
2613 2621 nodemaputil.setup_persistent_nodemap(transaction, self)
2614 2622
2615 2623 def addgroup(
2616 2624 self,
2617 2625 deltas,
2618 2626 linkmapper,
2619 2627 transaction,
2620 2628 alwayscache=False,
2621 2629 addrevisioncb=None,
2622 2630 duplicaterevisioncb=None,
2623 2631 ):
2624 2632 """
2625 2633 add a delta group
2626 2634
2627 2635 given a set of deltas, add them to the revision log. the
2628 2636 first delta is against its parent, which should be in our
2629 2637 log, the rest are against the previous delta.
2630 2638
2631 2639 If ``addrevisioncb`` is defined, it will be called with arguments of
2632 2640 this revlog and the node that was added.
2633 2641 """
2634 2642
2635 2643 if self._adding_group:
2636 2644 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2637 2645
2638 2646 self._adding_group = True
2639 2647 empty = True
2640 2648 try:
2641 2649 with self._writing(transaction):
2642 deltacomputer = deltautil.deltacomputer(self)
2650 write_debug = None
2651 if self._debug_delta:
2652 write_debug = transaction._report
2653 deltacomputer = deltautil.deltacomputer(
2654 self,
2655 write_debug=write_debug,
2656 )
2643 2657 # loop through our set of deltas
2644 2658 for data in deltas:
2645 2659 (
2646 2660 node,
2647 2661 p1,
2648 2662 p2,
2649 2663 linknode,
2650 2664 deltabase,
2651 2665 delta,
2652 2666 flags,
2653 2667 sidedata,
2654 2668 ) = data
2655 2669 link = linkmapper(linknode)
2656 2670 flags = flags or REVIDX_DEFAULT_FLAGS
2657 2671
2658 2672 rev = self.index.get_rev(node)
2659 2673 if rev is not None:
2660 2674 # this can happen if two branches make the same change
2661 2675 self._nodeduplicatecallback(transaction, rev)
2662 2676 if duplicaterevisioncb:
2663 2677 duplicaterevisioncb(self, rev)
2664 2678 empty = False
2665 2679 continue
2666 2680
2667 2681 for p in (p1, p2):
2668 2682 if not self.index.has_node(p):
2669 2683 raise error.LookupError(
2670 2684 p, self.radix, _(b'unknown parent')
2671 2685 )
2672 2686
2673 2687 if not self.index.has_node(deltabase):
2674 2688 raise error.LookupError(
2675 2689 deltabase, self.display_id, _(b'unknown delta base')
2676 2690 )
2677 2691
2678 2692 baserev = self.rev(deltabase)
2679 2693
2680 2694 if baserev != nullrev and self.iscensored(baserev):
2681 2695 # if base is censored, delta must be full replacement in a
2682 2696 # single patch operation
2683 2697 hlen = struct.calcsize(b">lll")
2684 2698 oldlen = self.rawsize(baserev)
2685 2699 newlen = len(delta) - hlen
2686 2700 if delta[:hlen] != mdiff.replacediffheader(
2687 2701 oldlen, newlen
2688 2702 ):
2689 2703 raise error.CensoredBaseError(
2690 2704 self.display_id, self.node(baserev)
2691 2705 )
2692 2706
2693 2707 if not flags and self._peek_iscensored(baserev, delta):
2694 2708 flags |= REVIDX_ISCENSORED
2695 2709
2696 2710 # We assume consumers of addrevisioncb will want to retrieve
2697 2711 # the added revision, which will require a call to
2698 2712 # revision(). revision() will fast path if there is a cache
2699 2713 # hit. So, we tell _addrevision() to always cache in this case.
2700 2714 # We're only using addgroup() in the context of changegroup
2701 2715 # generation so the revision data can always be handled as raw
2702 2716 # by the flagprocessor.
2703 2717 rev = self._addrevision(
2704 2718 node,
2705 2719 None,
2706 2720 transaction,
2707 2721 link,
2708 2722 p1,
2709 2723 p2,
2710 2724 flags,
2711 2725 (baserev, delta),
2712 2726 alwayscache=alwayscache,
2713 2727 deltacomputer=deltacomputer,
2714 2728 sidedata=sidedata,
2715 2729 )
2716 2730
2717 2731 if addrevisioncb:
2718 2732 addrevisioncb(self, rev)
2719 2733 empty = False
2720 2734 finally:
2721 2735 self._adding_group = False
2722 2736 return not empty
2723 2737
2724 2738 def iscensored(self, rev):
2725 2739 """Check if a file revision is censored."""
2726 2740 if not self._censorable:
2727 2741 return False
2728 2742
2729 2743 return self.flags(rev) & REVIDX_ISCENSORED
2730 2744
2731 2745 def _peek_iscensored(self, baserev, delta):
2732 2746 """Quickly check if a delta produces a censored revision."""
2733 2747 if not self._censorable:
2734 2748 return False
2735 2749
2736 2750 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2737 2751
2738 2752 def getstrippoint(self, minlink):
2739 2753 """find the minimum rev that must be stripped to strip the linkrev
2740 2754
2741 2755 Returns a tuple containing the minimum rev and a set of all revs that
2742 2756 have linkrevs that will be broken by this strip.
2743 2757 """
2744 2758 return storageutil.resolvestripinfo(
2745 2759 minlink,
2746 2760 len(self) - 1,
2747 2761 self.headrevs(),
2748 2762 self.linkrev,
2749 2763 self.parentrevs,
2750 2764 )
2751 2765
2752 2766 def strip(self, minlink, transaction):
2753 2767 """truncate the revlog on the first revision with a linkrev >= minlink
2754 2768
2755 2769 This function is called when we're stripping revision minlink and
2756 2770 its descendants from the repository.
2757 2771
2758 2772 We have to remove all revisions with linkrev >= minlink, because
2759 2773 the equivalent changelog revisions will be renumbered after the
2760 2774 strip.
2761 2775
2762 2776 So we truncate the revlog on the first of these revisions, and
2763 2777 trust that the caller has saved the revisions that shouldn't be
2764 2778 removed and that it'll re-add them after this truncation.
2765 2779 """
2766 2780 if len(self) == 0:
2767 2781 return
2768 2782
2769 2783 rev, _ = self.getstrippoint(minlink)
2770 2784 if rev == len(self):
2771 2785 return
2772 2786
2773 2787 # first truncate the files on disk
2774 2788 data_end = self.start(rev)
2775 2789 if not self._inline:
2776 2790 transaction.add(self._datafile, data_end)
2777 2791 end = rev * self.index.entry_size
2778 2792 else:
2779 2793 end = data_end + (rev * self.index.entry_size)
2780 2794
2781 2795 if self._sidedatafile:
2782 2796 sidedata_end = self.sidedata_cut_off(rev)
2783 2797 transaction.add(self._sidedatafile, sidedata_end)
2784 2798
2785 2799 transaction.add(self._indexfile, end)
2786 2800 if self._docket is not None:
2787 2801 # XXX we could, leverage the docket while stripping. However it is
2788 2802 # not powerfull enough at the time of this comment
2789 2803 self._docket.index_end = end
2790 2804 self._docket.data_end = data_end
2791 2805 self._docket.sidedata_end = sidedata_end
2792 2806 self._docket.write(transaction, stripping=True)
2793 2807
2794 2808 # then reset internal state in memory to forget those revisions
2795 2809 self._revisioncache = None
2796 2810 self._chaininfocache = util.lrucachedict(500)
2797 2811 self._segmentfile.clear_cache()
2798 2812 self._segmentfile_sidedata.clear_cache()
2799 2813
2800 2814 del self.index[rev:-1]
2801 2815
2802 2816 def checksize(self):
2803 2817 """Check size of index and data files
2804 2818
2805 2819 return a (dd, di) tuple.
2806 2820 - dd: extra bytes for the "data" file
2807 2821 - di: extra bytes for the "index" file
2808 2822
2809 2823 A healthy revlog will return (0, 0).
2810 2824 """
2811 2825 expected = 0
2812 2826 if len(self):
2813 2827 expected = max(0, self.end(len(self) - 1))
2814 2828
2815 2829 try:
2816 2830 with self._datafp() as f:
2817 2831 f.seek(0, io.SEEK_END)
2818 2832 actual = f.tell()
2819 2833 dd = actual - expected
2820 2834 except IOError as inst:
2821 2835 if inst.errno != errno.ENOENT:
2822 2836 raise
2823 2837 dd = 0
2824 2838
2825 2839 try:
2826 2840 f = self.opener(self._indexfile)
2827 2841 f.seek(0, io.SEEK_END)
2828 2842 actual = f.tell()
2829 2843 f.close()
2830 2844 s = self.index.entry_size
2831 2845 i = max(0, actual // s)
2832 2846 di = actual - (i * s)
2833 2847 if self._inline:
2834 2848 databytes = 0
2835 2849 for r in self:
2836 2850 databytes += max(0, self.length(r))
2837 2851 dd = 0
2838 2852 di = actual - len(self) * s - databytes
2839 2853 except IOError as inst:
2840 2854 if inst.errno != errno.ENOENT:
2841 2855 raise
2842 2856 di = 0
2843 2857
2844 2858 return (dd, di)
2845 2859
2846 2860 def files(self):
2847 2861 res = [self._indexfile]
2848 2862 if self._docket_file is None:
2849 2863 if not self._inline:
2850 2864 res.append(self._datafile)
2851 2865 else:
2852 2866 res.append(self._docket_file)
2853 2867 res.extend(self._docket.old_index_filepaths(include_empty=False))
2854 2868 if self._docket.data_end:
2855 2869 res.append(self._datafile)
2856 2870 res.extend(self._docket.old_data_filepaths(include_empty=False))
2857 2871 if self._docket.sidedata_end:
2858 2872 res.append(self._sidedatafile)
2859 2873 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2860 2874 return res
2861 2875
2862 2876 def emitrevisions(
2863 2877 self,
2864 2878 nodes,
2865 2879 nodesorder=None,
2866 2880 revisiondata=False,
2867 2881 assumehaveparentrevisions=False,
2868 2882 deltamode=repository.CG_DELTAMODE_STD,
2869 2883 sidedata_helpers=None,
2870 2884 ):
2871 2885 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2872 2886 raise error.ProgrammingError(
2873 2887 b'unhandled value for nodesorder: %s' % nodesorder
2874 2888 )
2875 2889
2876 2890 if nodesorder is None and not self._generaldelta:
2877 2891 nodesorder = b'storage'
2878 2892
2879 2893 if (
2880 2894 not self._storedeltachains
2881 2895 and deltamode != repository.CG_DELTAMODE_PREV
2882 2896 ):
2883 2897 deltamode = repository.CG_DELTAMODE_FULL
2884 2898
2885 2899 return storageutil.emitrevisions(
2886 2900 self,
2887 2901 nodes,
2888 2902 nodesorder,
2889 2903 revlogrevisiondelta,
2890 2904 deltaparentfn=self.deltaparent,
2891 2905 candeltafn=self.candelta,
2892 2906 rawsizefn=self.rawsize,
2893 2907 revdifffn=self.revdiff,
2894 2908 flagsfn=self.flags,
2895 2909 deltamode=deltamode,
2896 2910 revisiondata=revisiondata,
2897 2911 assumehaveparentrevisions=assumehaveparentrevisions,
2898 2912 sidedata_helpers=sidedata_helpers,
2899 2913 )
2900 2914
2901 2915 DELTAREUSEALWAYS = b'always'
2902 2916 DELTAREUSESAMEREVS = b'samerevs'
2903 2917 DELTAREUSENEVER = b'never'
2904 2918
2905 2919 DELTAREUSEFULLADD = b'fulladd'
2906 2920
2907 2921 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2908 2922
2909 2923 def clone(
2910 2924 self,
2911 2925 tr,
2912 2926 destrevlog,
2913 2927 addrevisioncb=None,
2914 2928 deltareuse=DELTAREUSESAMEREVS,
2915 2929 forcedeltabothparents=None,
2916 2930 sidedata_helpers=None,
2917 2931 ):
2918 2932 """Copy this revlog to another, possibly with format changes.
2919 2933
2920 2934 The destination revlog will contain the same revisions and nodes.
2921 2935 However, it may not be bit-for-bit identical due to e.g. delta encoding
2922 2936 differences.
2923 2937
2924 2938 The ``deltareuse`` argument control how deltas from the existing revlog
2925 2939 are preserved in the destination revlog. The argument can have the
2926 2940 following values:
2927 2941
2928 2942 DELTAREUSEALWAYS
2929 2943 Deltas will always be reused (if possible), even if the destination
2930 2944 revlog would not select the same revisions for the delta. This is the
2931 2945 fastest mode of operation.
2932 2946 DELTAREUSESAMEREVS
2933 2947 Deltas will be reused if the destination revlog would pick the same
2934 2948 revisions for the delta. This mode strikes a balance between speed
2935 2949 and optimization.
2936 2950 DELTAREUSENEVER
2937 2951 Deltas will never be reused. This is the slowest mode of execution.
2938 2952 This mode can be used to recompute deltas (e.g. if the diff/delta
2939 2953 algorithm changes).
2940 2954 DELTAREUSEFULLADD
2941 2955 Revision will be re-added as if their were new content. This is
2942 2956 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2943 2957 eg: large file detection and handling.
2944 2958
2945 2959 Delta computation can be slow, so the choice of delta reuse policy can
2946 2960 significantly affect run time.
2947 2961
2948 2962 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2949 2963 two extremes. Deltas will be reused if they are appropriate. But if the
2950 2964 delta could choose a better revision, it will do so. This means if you
2951 2965 are converting a non-generaldelta revlog to a generaldelta revlog,
2952 2966 deltas will be recomputed if the delta's parent isn't a parent of the
2953 2967 revision.
2954 2968
2955 2969 In addition to the delta policy, the ``forcedeltabothparents``
2956 2970 argument controls whether to force compute deltas against both parents
2957 2971 for merges. By default, the current default is used.
2958 2972
2959 2973 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2960 2974 `sidedata_helpers`.
2961 2975 """
2962 2976 if deltareuse not in self.DELTAREUSEALL:
2963 2977 raise ValueError(
2964 2978 _(b'value for deltareuse invalid: %s') % deltareuse
2965 2979 )
2966 2980
2967 2981 if len(destrevlog):
2968 2982 raise ValueError(_(b'destination revlog is not empty'))
2969 2983
2970 2984 if getattr(self, 'filteredrevs', None):
2971 2985 raise ValueError(_(b'source revlog has filtered revisions'))
2972 2986 if getattr(destrevlog, 'filteredrevs', None):
2973 2987 raise ValueError(_(b'destination revlog has filtered revisions'))
2974 2988
2975 2989 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2976 2990 # if possible.
2977 2991 oldlazydelta = destrevlog._lazydelta
2978 2992 oldlazydeltabase = destrevlog._lazydeltabase
2979 2993 oldamd = destrevlog._deltabothparents
2980 2994
2981 2995 try:
2982 2996 if deltareuse == self.DELTAREUSEALWAYS:
2983 2997 destrevlog._lazydeltabase = True
2984 2998 destrevlog._lazydelta = True
2985 2999 elif deltareuse == self.DELTAREUSESAMEREVS:
2986 3000 destrevlog._lazydeltabase = False
2987 3001 destrevlog._lazydelta = True
2988 3002 elif deltareuse == self.DELTAREUSENEVER:
2989 3003 destrevlog._lazydeltabase = False
2990 3004 destrevlog._lazydelta = False
2991 3005
2992 3006 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2993 3007
2994 3008 self._clone(
2995 3009 tr,
2996 3010 destrevlog,
2997 3011 addrevisioncb,
2998 3012 deltareuse,
2999 3013 forcedeltabothparents,
3000 3014 sidedata_helpers,
3001 3015 )
3002 3016
3003 3017 finally:
3004 3018 destrevlog._lazydelta = oldlazydelta
3005 3019 destrevlog._lazydeltabase = oldlazydeltabase
3006 3020 destrevlog._deltabothparents = oldamd
3007 3021
3008 3022 def _clone(
3009 3023 self,
3010 3024 tr,
3011 3025 destrevlog,
3012 3026 addrevisioncb,
3013 3027 deltareuse,
3014 3028 forcedeltabothparents,
3015 3029 sidedata_helpers,
3016 3030 ):
3017 3031 """perform the core duty of `revlog.clone` after parameter processing"""
3018 deltacomputer = deltautil.deltacomputer(destrevlog)
3032 write_debug = None
3033 if self._debug_delta:
3034 write_debug = tr._report
3035 deltacomputer = deltautil.deltacomputer(
3036 destrevlog,
3037 write_debug=write_debug,
3038 )
3019 3039 index = self.index
3020 3040 for rev in self:
3021 3041 entry = index[rev]
3022 3042
3023 3043 # Some classes override linkrev to take filtered revs into
3024 3044 # account. Use raw entry from index.
3025 3045 flags = entry[0] & 0xFFFF
3026 3046 linkrev = entry[4]
3027 3047 p1 = index[entry[5]][7]
3028 3048 p2 = index[entry[6]][7]
3029 3049 node = entry[7]
3030 3050
3031 3051 # (Possibly) reuse the delta from the revlog if allowed and
3032 3052 # the revlog chunk is a delta.
3033 3053 cachedelta = None
3034 3054 rawtext = None
3035 3055 if deltareuse == self.DELTAREUSEFULLADD:
3036 3056 text = self._revisiondata(rev)
3037 3057 sidedata = self.sidedata(rev)
3038 3058
3039 3059 if sidedata_helpers is not None:
3040 3060 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3041 3061 self, sidedata_helpers, sidedata, rev
3042 3062 )
3043 3063 flags = flags | new_flags[0] & ~new_flags[1]
3044 3064
3045 3065 destrevlog.addrevision(
3046 3066 text,
3047 3067 tr,
3048 3068 linkrev,
3049 3069 p1,
3050 3070 p2,
3051 3071 cachedelta=cachedelta,
3052 3072 node=node,
3053 3073 flags=flags,
3054 3074 deltacomputer=deltacomputer,
3055 3075 sidedata=sidedata,
3056 3076 )
3057 3077 else:
3058 3078 if destrevlog._lazydelta:
3059 3079 dp = self.deltaparent(rev)
3060 3080 if dp != nullrev:
3061 3081 cachedelta = (dp, bytes(self._chunk(rev)))
3062 3082
3063 3083 sidedata = None
3064 3084 if not cachedelta:
3065 3085 rawtext = self._revisiondata(rev)
3066 3086 sidedata = self.sidedata(rev)
3067 3087 if sidedata is None:
3068 3088 sidedata = self.sidedata(rev)
3069 3089
3070 3090 if sidedata_helpers is not None:
3071 3091 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3072 3092 self, sidedata_helpers, sidedata, rev
3073 3093 )
3074 3094 flags = flags | new_flags[0] & ~new_flags[1]
3075 3095
3076 3096 with destrevlog._writing(tr):
3077 3097 destrevlog._addrevision(
3078 3098 node,
3079 3099 rawtext,
3080 3100 tr,
3081 3101 linkrev,
3082 3102 p1,
3083 3103 p2,
3084 3104 flags,
3085 3105 cachedelta,
3086 3106 deltacomputer=deltacomputer,
3087 3107 sidedata=sidedata,
3088 3108 )
3089 3109
3090 3110 if addrevisioncb:
3091 3111 addrevisioncb(self, rev, node)
3092 3112
3093 3113 def censorrevision(self, tr, censornode, tombstone=b''):
3094 3114 if self._format_version == REVLOGV0:
3095 3115 raise error.RevlogError(
3096 3116 _(b'cannot censor with version %d revlogs')
3097 3117 % self._format_version
3098 3118 )
3099 3119 elif self._format_version == REVLOGV1:
3100 3120 rewrite.v1_censor(self, tr, censornode, tombstone)
3101 3121 else:
3102 3122 rewrite.v2_censor(self, tr, censornode, tombstone)
3103 3123
3104 3124 def verifyintegrity(self, state):
3105 3125 """Verifies the integrity of the revlog.
3106 3126
3107 3127 Yields ``revlogproblem`` instances describing problems that are
3108 3128 found.
3109 3129 """
3110 3130 dd, di = self.checksize()
3111 3131 if dd:
3112 3132 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3113 3133 if di:
3114 3134 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3115 3135
3116 3136 version = self._format_version
3117 3137
3118 3138 # The verifier tells us what version revlog we should be.
3119 3139 if version != state[b'expectedversion']:
3120 3140 yield revlogproblem(
3121 3141 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3122 3142 % (self.display_id, version, state[b'expectedversion'])
3123 3143 )
3124 3144
3125 3145 state[b'skipread'] = set()
3126 3146 state[b'safe_renamed'] = set()
3127 3147
3128 3148 for rev in self:
3129 3149 node = self.node(rev)
3130 3150
3131 3151 # Verify contents. 4 cases to care about:
3132 3152 #
3133 3153 # common: the most common case
3134 3154 # rename: with a rename
3135 3155 # meta: file content starts with b'\1\n', the metadata
3136 3156 # header defined in filelog.py, but without a rename
3137 3157 # ext: content stored externally
3138 3158 #
3139 3159 # More formally, their differences are shown below:
3140 3160 #
3141 3161 # | common | rename | meta | ext
3142 3162 # -------------------------------------------------------
3143 3163 # flags() | 0 | 0 | 0 | not 0
3144 3164 # renamed() | False | True | False | ?
3145 3165 # rawtext[0:2]=='\1\n'| False | True | True | ?
3146 3166 #
3147 3167 # "rawtext" means the raw text stored in revlog data, which
3148 3168 # could be retrieved by "rawdata(rev)". "text"
3149 3169 # mentioned below is "revision(rev)".
3150 3170 #
3151 3171 # There are 3 different lengths stored physically:
3152 3172 # 1. L1: rawsize, stored in revlog index
3153 3173 # 2. L2: len(rawtext), stored in revlog data
3154 3174 # 3. L3: len(text), stored in revlog data if flags==0, or
3155 3175 # possibly somewhere else if flags!=0
3156 3176 #
3157 3177 # L1 should be equal to L2. L3 could be different from them.
3158 3178 # "text" may or may not affect commit hash depending on flag
3159 3179 # processors (see flagutil.addflagprocessor).
3160 3180 #
3161 3181 # | common | rename | meta | ext
3162 3182 # -------------------------------------------------
3163 3183 # rawsize() | L1 | L1 | L1 | L1
3164 3184 # size() | L1 | L2-LM | L1(*) | L1 (?)
3165 3185 # len(rawtext) | L2 | L2 | L2 | L2
3166 3186 # len(text) | L2 | L2 | L2 | L3
3167 3187 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3168 3188 #
3169 3189 # LM: length of metadata, depending on rawtext
3170 3190 # (*): not ideal, see comment in filelog.size
3171 3191 # (?): could be "- len(meta)" if the resolved content has
3172 3192 # rename metadata
3173 3193 #
3174 3194 # Checks needed to be done:
3175 3195 # 1. length check: L1 == L2, in all cases.
3176 3196 # 2. hash check: depending on flag processor, we may need to
3177 3197 # use either "text" (external), or "rawtext" (in revlog).
3178 3198
3179 3199 try:
3180 3200 skipflags = state.get(b'skipflags', 0)
3181 3201 if skipflags:
3182 3202 skipflags &= self.flags(rev)
3183 3203
3184 3204 _verify_revision(self, skipflags, state, node)
3185 3205
3186 3206 l1 = self.rawsize(rev)
3187 3207 l2 = len(self.rawdata(node))
3188 3208
3189 3209 if l1 != l2:
3190 3210 yield revlogproblem(
3191 3211 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3192 3212 node=node,
3193 3213 )
3194 3214
3195 3215 except error.CensoredNodeError:
3196 3216 if state[b'erroroncensored']:
3197 3217 yield revlogproblem(
3198 3218 error=_(b'censored file data'), node=node
3199 3219 )
3200 3220 state[b'skipread'].add(node)
3201 3221 except Exception as e:
3202 3222 yield revlogproblem(
3203 3223 error=_(b'unpacking %s: %s')
3204 3224 % (short(node), stringutil.forcebytestr(e)),
3205 3225 node=node,
3206 3226 )
3207 3227 state[b'skipread'].add(node)
3208 3228
3209 3229 def storageinfo(
3210 3230 self,
3211 3231 exclusivefiles=False,
3212 3232 sharedfiles=False,
3213 3233 revisionscount=False,
3214 3234 trackedsize=False,
3215 3235 storedsize=False,
3216 3236 ):
3217 3237 d = {}
3218 3238
3219 3239 if exclusivefiles:
3220 3240 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3221 3241 if not self._inline:
3222 3242 d[b'exclusivefiles'].append((self.opener, self._datafile))
3223 3243
3224 3244 if sharedfiles:
3225 3245 d[b'sharedfiles'] = []
3226 3246
3227 3247 if revisionscount:
3228 3248 d[b'revisionscount'] = len(self)
3229 3249
3230 3250 if trackedsize:
3231 3251 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3232 3252
3233 3253 if storedsize:
3234 3254 d[b'storedsize'] = sum(
3235 3255 self.opener.stat(path).st_size for path in self.files()
3236 3256 )
3237 3257
3238 3258 return d
3239 3259
3240 3260 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3241 3261 if not self.hassidedata:
3242 3262 return
3243 3263 # revlog formats with sidedata support does not support inline
3244 3264 assert not self._inline
3245 3265 if not helpers[1] and not helpers[2]:
3246 3266 # Nothing to generate or remove
3247 3267 return
3248 3268
3249 3269 new_entries = []
3250 3270 # append the new sidedata
3251 3271 with self._writing(transaction):
3252 3272 ifh, dfh, sdfh = self._writinghandles
3253 3273 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3254 3274
3255 3275 current_offset = sdfh.tell()
3256 3276 for rev in range(startrev, endrev + 1):
3257 3277 entry = self.index[rev]
3258 3278 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3259 3279 store=self,
3260 3280 sidedata_helpers=helpers,
3261 3281 sidedata={},
3262 3282 rev=rev,
3263 3283 )
3264 3284
3265 3285 serialized_sidedata = sidedatautil.serialize_sidedata(
3266 3286 new_sidedata
3267 3287 )
3268 3288
3269 3289 sidedata_compression_mode = COMP_MODE_INLINE
3270 3290 if serialized_sidedata and self.hassidedata:
3271 3291 sidedata_compression_mode = COMP_MODE_PLAIN
3272 3292 h, comp_sidedata = self.compress(serialized_sidedata)
3273 3293 if (
3274 3294 h != b'u'
3275 3295 and comp_sidedata[0] != b'\0'
3276 3296 and len(comp_sidedata) < len(serialized_sidedata)
3277 3297 ):
3278 3298 assert not h
3279 3299 if (
3280 3300 comp_sidedata[0]
3281 3301 == self._docket.default_compression_header
3282 3302 ):
3283 3303 sidedata_compression_mode = COMP_MODE_DEFAULT
3284 3304 serialized_sidedata = comp_sidedata
3285 3305 else:
3286 3306 sidedata_compression_mode = COMP_MODE_INLINE
3287 3307 serialized_sidedata = comp_sidedata
3288 3308 if entry[8] != 0 or entry[9] != 0:
3289 3309 # rewriting entries that already have sidedata is not
3290 3310 # supported yet, because it introduces garbage data in the
3291 3311 # revlog.
3292 3312 msg = b"rewriting existing sidedata is not supported yet"
3293 3313 raise error.Abort(msg)
3294 3314
3295 3315 # Apply (potential) flags to add and to remove after running
3296 3316 # the sidedata helpers
3297 3317 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3298 3318 entry_update = (
3299 3319 current_offset,
3300 3320 len(serialized_sidedata),
3301 3321 new_offset_flags,
3302 3322 sidedata_compression_mode,
3303 3323 )
3304 3324
3305 3325 # the sidedata computation might have move the file cursors around
3306 3326 sdfh.seek(current_offset, os.SEEK_SET)
3307 3327 sdfh.write(serialized_sidedata)
3308 3328 new_entries.append(entry_update)
3309 3329 current_offset += len(serialized_sidedata)
3310 3330 self._docket.sidedata_end = sdfh.tell()
3311 3331
3312 3332 # rewrite the new index entries
3313 3333 ifh.seek(startrev * self.index.entry_size)
3314 3334 for i, e in enumerate(new_entries):
3315 3335 rev = startrev + i
3316 3336 self.index.replace_sidedata_info(rev, *e)
3317 3337 packed = self.index.entry_binary(rev)
3318 3338 if rev == 0 and self._docket is None:
3319 3339 header = self._format_flags | self._format_version
3320 3340 header = self.index.pack_header(header)
3321 3341 packed = header + packed
3322 3342 ifh.write(packed)
@@ -1,1040 +1,1065 b''
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify
32 32 checking changesets
33 33 checking manifests
34 34 crosschecking files in changesets and manifests
35 35 checking files
36 36 checked 9 changesets with 7 changes to 4 files
37 37 $ cd ..
38 38 $ hg init empty
39 39
40 40 Bundle and phase
41 41
42 42 $ hg -R test phase --force --secret 0
43 43 $ hg -R test bundle phase.hg empty
44 44 searching for changes
45 45 no changes found (ignored 9 secret changesets)
46 46 [1]
47 47 $ hg -R test phase --draft -r 'head()'
48 48
49 49 Bundle --all
50 50
51 51 $ hg -R test bundle --all all.hg
52 52 9 changesets found
53 53
54 54 Bundle test to full.hg
55 55
56 56 $ hg -R test bundle full.hg empty
57 57 searching for changes
58 58 9 changesets found
59 59
60 60 Unbundle full.hg in test
61 61
62 62 $ hg -R test unbundle full.hg
63 63 adding changesets
64 64 adding manifests
65 65 adding file changes
66 66 added 0 changesets with 0 changes to 4 files
67 67 (run 'hg update' to get a working copy)
68 68
69 69 Verify empty
70 70
71 71 $ hg -R empty heads
72 72 [1]
73 73 $ hg -R empty verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 checked 0 changesets with 0 changes to 0 files
79 79
80 80 #if repobundlerepo
81 81
82 82 Pull full.hg into test (using --cwd)
83 83
84 84 $ hg --cwd test pull ../full.hg
85 85 pulling from ../full.hg
86 86 searching for changes
87 87 no changes found
88 88
89 89 Verify that there are no leaked temporary files after pull (issue2797)
90 90
91 91 $ ls test/.hg | grep .hg10un
92 92 [1]
93 93
94 94 Pull full.hg into empty (using --cwd)
95 95
96 96 $ hg --cwd empty pull ../full.hg
97 97 pulling from ../full.hg
98 98 requesting all changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 104 (run 'hg heads' to see heads, 'hg merge' to merge)
105 105
106 106 Rollback empty
107 107
108 108 $ hg -R empty rollback
109 109 repository tip rolled back to revision -1 (undo pull)
110 110
111 111 Pull full.hg into empty again (using --cwd)
112 112
113 113 $ hg --cwd empty pull ../full.hg
114 114 pulling from ../full.hg
115 115 requesting all changes
116 116 adding changesets
117 117 adding manifests
118 118 adding file changes
119 119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 121 (run 'hg heads' to see heads, 'hg merge' to merge)
122 122
123 123 Pull full.hg into test (using -R)
124 124
125 125 $ hg -R test pull full.hg
126 126 pulling from full.hg
127 127 searching for changes
128 128 no changes found
129 129
130 130 Pull full.hg into empty (using -R)
131 131
132 132 $ hg -R empty pull full.hg
133 133 pulling from full.hg
134 134 searching for changes
135 135 no changes found
136 136
137 137 Rollback empty
138 138
139 139 $ hg -R empty rollback
140 140 repository tip rolled back to revision -1 (undo pull)
141 141
142 142 Pull full.hg into empty again (using -R)
143 143
144 144 $ hg -R empty pull full.hg
145 145 pulling from full.hg
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 152 (run 'hg heads' to see heads, 'hg merge' to merge)
153 153
154 154 Log -R full.hg in fresh empty
155 155
156 156 $ rm -r empty
157 157 $ hg init empty
158 158 $ cd empty
159 159 $ hg -R bundle://../full.hg log
160 160 changeset: 8:aa35859c02ea
161 161 tag: tip
162 162 parent: 3:eebf5a27f8ca
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: 0.3m
166 166
167 167 changeset: 7:a6a34bfa0076
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: 1.3m
171 171
172 172 changeset: 6:7373c1169842
173 173 user: test
174 174 date: Thu Jan 01 00:00:00 1970 +0000
175 175 summary: 1.3
176 176
177 177 changeset: 5:1bb50a9436a7
178 178 user: test
179 179 date: Thu Jan 01 00:00:00 1970 +0000
180 180 summary: 1.2
181 181
182 182 changeset: 4:095197eb4973
183 183 parent: 0:f9ee2f85a263
184 184 user: test
185 185 date: Thu Jan 01 00:00:00 1970 +0000
186 186 summary: 1.1
187 187
188 188 changeset: 3:eebf5a27f8ca
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: 0.3
192 192
193 193 changeset: 2:e38ba6f5b7e0
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: 0.2
197 197
198 198 changeset: 1:34c2bf6b0626
199 199 user: test
200 200 date: Thu Jan 01 00:00:00 1970 +0000
201 201 summary: 0.1
202 202
203 203 changeset: 0:f9ee2f85a263
204 204 user: test
205 205 date: Thu Jan 01 00:00:00 1970 +0000
206 206 summary: 0.0
207 207
208 208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209 209
210 210 $ ls .hg
211 211 00changelog.i
212 212 cache
213 213 requires
214 214 store
215 215 wcache
216 216
217 217 Pull ../full.hg into empty (with hook)
218 218
219 219 $ cat >> .hg/hgrc <<EOF
220 220 > [hooks]
221 221 > changegroup = sh -c "printenv.py --line changegroup"
222 222 > EOF
223 223
224 224 doesn't work (yet ?)
225 225 NOTE: msys is mangling the URL below
226 226
227 227 hg -R bundle://../full.hg verify
228 228
229 229 $ hg pull bundle://../full.hg
230 230 pulling from bundle:../full.hg
231 231 requesting all changes
232 232 adding changesets
233 233 adding manifests
234 234 adding file changes
235 235 added 9 changesets with 7 changes to 4 files (+1 heads)
236 236 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
237 237 changegroup hook: HG_HOOKNAME=changegroup
238 238 HG_HOOKTYPE=changegroup
239 239 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
240 240 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
241 241 HG_SOURCE=pull
242 242 HG_TXNID=TXN:$ID$
243 243 HG_TXNNAME=pull
244 244 bundle:../full.hg (no-msys !)
245 245 bundle;../full.hg (msys !)
246 246 HG_URL=bundle:../full.hg (no-msys !)
247 247 HG_URL=bundle;../full.hg (msys !)
248 248
249 249 (run 'hg heads' to see heads, 'hg merge' to merge)
250 250
251 251 Rollback empty
252 252
253 253 $ hg rollback
254 254 repository tip rolled back to revision -1 (undo pull)
255 255 $ cd ..
256 256
257 257 Log -R bundle:empty+full.hg
258 258
259 259 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
260 260 8 7 6 5 4 3 2 1 0
261 261
262 262 Pull full.hg into empty again (using -R; with hook)
263 263
264 264 $ hg -R empty pull full.hg
265 265 pulling from full.hg
266 266 requesting all changes
267 267 adding changesets
268 268 adding manifests
269 269 adding file changes
270 270 added 9 changesets with 7 changes to 4 files (+1 heads)
271 271 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
272 272 changegroup hook: HG_HOOKNAME=changegroup
273 273 HG_HOOKTYPE=changegroup
274 274 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
275 275 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
276 276 HG_SOURCE=pull
277 277 HG_TXNID=TXN:$ID$
278 278 HG_TXNNAME=pull
279 279 bundle:empty+full.hg
280 280 HG_URL=bundle:empty+full.hg
281 281
282 282 (run 'hg heads' to see heads, 'hg merge' to merge)
283 283
284 284 #endif
285 285
286 286 Cannot produce streaming clone bundles with "hg bundle"
287 287
288 288 $ hg -R test bundle -t packed1 packed.hg
289 289 abort: packed bundles cannot be produced by "hg bundle"
290 290 (use 'hg debugcreatestreamclonebundle')
291 291 [10]
292 292
293 293 packed1 is produced properly
294 294
295 295
296 296 #if reporevlogstore rust
297 297
298 298 $ hg -R test debugcreatestreamclonebundle packed.hg
299 299 writing 2665 bytes for 6 files
300 300 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
301 301
302 302 $ f -B 64 --size --sha1 --hexdump packed.hg
303 303 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
304 304 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
305 305 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
306 306 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
307 307 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
308 308 $ hg debugbundle --spec packed.hg
309 309 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
310 310 #endif
311 311
312 312 #if reporevlogstore no-rust zstd
313 313
314 314 $ hg -R test debugcreatestreamclonebundle packed.hg
315 315 writing 2665 bytes for 6 files
316 316 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
317 317
318 318 $ f -B 64 --size --sha1 --hexdump packed.hg
319 319 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
320 320 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
321 321 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
322 322 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
323 323 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
324 324 $ hg debugbundle --spec packed.hg
325 325 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
326 326 #endif
327 327
328 328 #if reporevlogstore no-rust no-zstd
329 329
330 330 $ hg -R test debugcreatestreamclonebundle packed.hg
331 331 writing 2664 bytes for 6 files
332 332 bundle requirements: generaldelta, revlogv1, sparserevlog
333 333
334 334 $ f -B 64 --size --sha1 --hexdump packed.hg
335 335 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
336 336 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
337 337 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
338 338 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
339 339 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
340 340 $ hg debugbundle --spec packed.hg
341 341 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
342 342 #endif
343 343
344 344 #if reporevlogstore
345 345
346 346 generaldelta requirement is not listed in stream clone bundles unless used
347 347
348 348 $ hg --config format.usegeneraldelta=false init testnongd
349 349 $ cd testnongd
350 350 $ touch foo
351 351 $ hg -q commit -A -m initial
352 352 $ cd ..
353 353
354 354 #endif
355 355
356 356 #if reporevlogstore rust
357 357
358 358 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
359 359 writing 301 bytes for 3 files
360 360 bundle requirements: revlog-compression-zstd, revlogv1
361 361
362 362 $ f -B 64 --size --sha1 --hexdump packednongd.hg
363 363 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
364 364 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
365 365 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
366 366 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
367 367 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
368 368
369 369 $ hg debugbundle --spec packednongd.hg
370 370 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
371 371
372 372 #endif
373 373
374 374 #if reporevlogstore no-rust zstd
375 375
376 376 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
377 377 writing 301 bytes for 3 files
378 378 bundle requirements: revlog-compression-zstd, revlogv1
379 379
380 380 $ f -B 64 --size --sha1 --hexdump packednongd.hg
381 381 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
382 382 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
383 383 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
384 384 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
385 385 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
386 386
387 387 $ hg debugbundle --spec packednongd.hg
388 388 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
389 389
390 390
391 391 #endif
392 392
393 393 #if reporevlogstore no-rust no-zstd
394 394
395 395 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
396 396 writing 301 bytes for 3 files
397 397 bundle requirements: revlogv1
398 398
399 399 $ f -B 64 --size --sha1 --hexdump packednongd.hg
400 400 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
401 401 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
402 402 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
403 403 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
404 404 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
405 405
406 406 $ hg debugbundle --spec packednongd.hg
407 407 none-packed1;requirements%3Drevlogv1
408 408
409 409
410 410 #endif
411 411
412 412 #if reporevlogstore
413 413
414 414 Warning emitted when packed bundles contain secret changesets
415 415
416 416 $ hg init testsecret
417 417 $ cd testsecret
418 418 $ touch foo
419 419 $ hg -q commit -A -m initial
420 420 $ hg phase --force --secret -r .
421 421 $ cd ..
422 422
423 423 #endif
424 424
425 425 #if reporevlogstore rust
426 426
427 427 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
428 428 (warning: stream clone bundle will contain secret revisions)
429 429 writing 301 bytes for 3 files
430 430 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
431 431
432 432 #endif
433 433
434 434 #if reporevlogstore no-rust zstd
435 435
436 436 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
437 437 (warning: stream clone bundle will contain secret revisions)
438 438 writing 301 bytes for 3 files
439 439 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
440 440
441 441 #endif
442 442
443 443 #if reporevlogstore no-rust no-zstd
444 444
445 445 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
446 446 (warning: stream clone bundle will contain secret revisions)
447 447 writing 301 bytes for 3 files
448 448 bundle requirements: generaldelta, revlogv1, sparserevlog
449 449
450 450 #endif
451 451
452 452 #if reporevlogstore
453 453
454 454 Unpacking packed1 bundles with "hg unbundle" isn't allowed
455 455
456 456 $ hg init packed
457 457 $ hg -R packed unbundle packed.hg
458 458 abort: packed bundles cannot be applied with "hg unbundle"
459 459 (use "hg debugapplystreamclonebundle")
460 460 [10]
461 461
462 462 packed1 can be consumed from debug command
463 463
464 464 (this also confirms that streamclone-ed changes are visible via
465 465 @filecache properties to in-process procedures before closing
466 466 transaction)
467 467
468 468 $ cat > $TESTTMP/showtip.py <<EOF
469 469 >
470 470 > def showtip(ui, repo, hooktype, **kwargs):
471 471 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
472 472 >
473 473 > def reposetup(ui, repo):
474 474 > # this confirms (and ensures) that (empty) 00changelog.i
475 475 > # before streamclone is already cached as repo.changelog
476 476 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
477 477 >
478 478 > # this confirms that streamclone-ed changes are visible to
479 479 > # in-process procedures before closing transaction
480 480 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
481 481 >
482 482 > # this confirms that streamclone-ed changes are still visible
483 483 > # after closing transaction
484 484 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
485 485 > EOF
486 486 $ cat >> $HGRCPATH <<EOF
487 487 > [extensions]
488 488 > showtip = $TESTTMP/showtip.py
489 489 > EOF
490 490
491 491 $ hg -R packed debugapplystreamclonebundle packed.hg
492 492 6 files to transfer, 2.60 KB of data
493 493 pretxnopen: 000000000000
494 494 pretxnclose: aa35859c02ea
495 495 transferred 2.60 KB in * seconds (* */sec) (glob)
496 496 txnclose: aa35859c02ea
497 497
498 498 (for safety, confirm visibility of streamclone-ed changes by another
499 499 process, too)
500 500
501 501 $ hg -R packed tip -T "{node|short}\n"
502 502 aa35859c02ea
503 503
504 504 $ cat >> $HGRCPATH <<EOF
505 505 > [extensions]
506 506 > showtip = !
507 507 > EOF
508 508
509 509 Does not work on non-empty repo
510 510
511 511 $ hg -R packed debugapplystreamclonebundle packed.hg
512 512 abort: cannot apply stream clone bundle on non-empty repo
513 513 [255]
514 514
515 515 #endif
516 516
517 517 Create partial clones
518 518
519 519 $ rm -r empty
520 520 $ hg init empty
521 521 $ hg clone -r 3 test partial
522 522 adding changesets
523 523 adding manifests
524 524 adding file changes
525 525 added 4 changesets with 4 changes to 1 files
526 526 new changesets f9ee2f85a263:eebf5a27f8ca
527 527 updating to branch default
528 528 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
529 529 $ hg clone partial partial2
530 530 updating to branch default
531 531 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
532 532 $ cd partial
533 533
534 534 #if repobundlerepo
535 535
536 536 Log -R full.hg in partial
537 537
538 538 $ hg -R bundle://../full.hg log -T phases
539 539 changeset: 8:aa35859c02ea
540 540 tag: tip
541 541 phase: draft
542 542 parent: 3:eebf5a27f8ca
543 543 user: test
544 544 date: Thu Jan 01 00:00:00 1970 +0000
545 545 summary: 0.3m
546 546
547 547 changeset: 7:a6a34bfa0076
548 548 phase: draft
549 549 user: test
550 550 date: Thu Jan 01 00:00:00 1970 +0000
551 551 summary: 1.3m
552 552
553 553 changeset: 6:7373c1169842
554 554 phase: draft
555 555 user: test
556 556 date: Thu Jan 01 00:00:00 1970 +0000
557 557 summary: 1.3
558 558
559 559 changeset: 5:1bb50a9436a7
560 560 phase: draft
561 561 user: test
562 562 date: Thu Jan 01 00:00:00 1970 +0000
563 563 summary: 1.2
564 564
565 565 changeset: 4:095197eb4973
566 566 phase: draft
567 567 parent: 0:f9ee2f85a263
568 568 user: test
569 569 date: Thu Jan 01 00:00:00 1970 +0000
570 570 summary: 1.1
571 571
572 572 changeset: 3:eebf5a27f8ca
573 573 phase: public
574 574 user: test
575 575 date: Thu Jan 01 00:00:00 1970 +0000
576 576 summary: 0.3
577 577
578 578 changeset: 2:e38ba6f5b7e0
579 579 phase: public
580 580 user: test
581 581 date: Thu Jan 01 00:00:00 1970 +0000
582 582 summary: 0.2
583 583
584 584 changeset: 1:34c2bf6b0626
585 585 phase: public
586 586 user: test
587 587 date: Thu Jan 01 00:00:00 1970 +0000
588 588 summary: 0.1
589 589
590 590 changeset: 0:f9ee2f85a263
591 591 phase: public
592 592 user: test
593 593 date: Thu Jan 01 00:00:00 1970 +0000
594 594 summary: 0.0
595 595
596 596
597 597 Incoming full.hg in partial
598 598
599 599 $ hg incoming bundle://../full.hg
600 600 comparing with bundle:../full.hg
601 601 searching for changes
602 602 changeset: 4:095197eb4973
603 603 parent: 0:f9ee2f85a263
604 604 user: test
605 605 date: Thu Jan 01 00:00:00 1970 +0000
606 606 summary: 1.1
607 607
608 608 changeset: 5:1bb50a9436a7
609 609 user: test
610 610 date: Thu Jan 01 00:00:00 1970 +0000
611 611 summary: 1.2
612 612
613 613 changeset: 6:7373c1169842
614 614 user: test
615 615 date: Thu Jan 01 00:00:00 1970 +0000
616 616 summary: 1.3
617 617
618 618 changeset: 7:a6a34bfa0076
619 619 user: test
620 620 date: Thu Jan 01 00:00:00 1970 +0000
621 621 summary: 1.3m
622 622
623 623 changeset: 8:aa35859c02ea
624 624 tag: tip
625 625 parent: 3:eebf5a27f8ca
626 626 user: test
627 627 date: Thu Jan 01 00:00:00 1970 +0000
628 628 summary: 0.3m
629 629
630 630
631 631 Outgoing -R full.hg vs partial2 in partial
632 632
633 633 $ hg -R bundle://../full.hg outgoing ../partial2
634 634 comparing with ../partial2
635 635 searching for changes
636 636 changeset: 4:095197eb4973
637 637 parent: 0:f9ee2f85a263
638 638 user: test
639 639 date: Thu Jan 01 00:00:00 1970 +0000
640 640 summary: 1.1
641 641
642 642 changeset: 5:1bb50a9436a7
643 643 user: test
644 644 date: Thu Jan 01 00:00:00 1970 +0000
645 645 summary: 1.2
646 646
647 647 changeset: 6:7373c1169842
648 648 user: test
649 649 date: Thu Jan 01 00:00:00 1970 +0000
650 650 summary: 1.3
651 651
652 652 changeset: 7:a6a34bfa0076
653 653 user: test
654 654 date: Thu Jan 01 00:00:00 1970 +0000
655 655 summary: 1.3m
656 656
657 657 changeset: 8:aa35859c02ea
658 658 tag: tip
659 659 parent: 3:eebf5a27f8ca
660 660 user: test
661 661 date: Thu Jan 01 00:00:00 1970 +0000
662 662 summary: 0.3m
663 663
664 664
665 665 Outgoing -R does-not-exist.hg vs partial2 in partial
666 666
667 667 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
668 668 abort: *../does-not-exist.hg* (glob)
669 669 [255]
670 670
671 671 #endif
672 672
673 673 $ cd ..
674 674
675 675 hide outer repo
676 676 $ hg init
677 677
678 678 Direct clone from bundle (all-history)
679 679
680 680 #if repobundlerepo
681 681
682 682 $ hg clone full.hg full-clone
683 683 requesting all changes
684 684 adding changesets
685 685 adding manifests
686 686 adding file changes
687 687 added 9 changesets with 7 changes to 4 files (+1 heads)
688 688 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
689 689 updating to branch default
690 690 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
691 691 $ hg -R full-clone heads
692 692 changeset: 8:aa35859c02ea
693 693 tag: tip
694 694 parent: 3:eebf5a27f8ca
695 695 user: test
696 696 date: Thu Jan 01 00:00:00 1970 +0000
697 697 summary: 0.3m
698 698
699 699 changeset: 7:a6a34bfa0076
700 700 user: test
701 701 date: Thu Jan 01 00:00:00 1970 +0000
702 702 summary: 1.3m
703 703
704 704 $ rm -r full-clone
705 705
706 706 When cloning from a non-copiable repository into '', do not
707 707 recurse infinitely (issue2528)
708 708
709 709 $ hg clone full.hg ''
710 710 abort: empty destination path is not valid
711 711 [10]
712 712
713 713 test for https://bz.mercurial-scm.org/216
714 714
715 715 Unbundle incremental bundles into fresh empty in one go
716 716
717 717 $ rm -r empty
718 718 $ hg init empty
719 719 $ hg -R test bundle --base null -r 0 ../0.hg
720 720 1 changesets found
721 721 $ hg -R test bundle --base 0 -r 1 ../1.hg
722 722 1 changesets found
723 723 $ hg -R empty unbundle -u ../0.hg ../1.hg
724 724 adding changesets
725 725 adding manifests
726 726 adding file changes
727 727 added 1 changesets with 1 changes to 1 files
728 728 new changesets f9ee2f85a263 (1 drafts)
729 729 adding changesets
730 730 adding manifests
731 731 adding file changes
732 732 added 1 changesets with 1 changes to 1 files
733 733 new changesets 34c2bf6b0626 (1 drafts)
734 734 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
735 735
736 736 View full contents of the bundle
737 737 $ hg -R test bundle --base null -r 3 ../partial.hg
738 738 4 changesets found
739 739 $ cd test
740 740 $ hg -R ../../partial.hg log -r "bundle()"
741 741 changeset: 0:f9ee2f85a263
742 742 user: test
743 743 date: Thu Jan 01 00:00:00 1970 +0000
744 744 summary: 0.0
745 745
746 746 changeset: 1:34c2bf6b0626
747 747 user: test
748 748 date: Thu Jan 01 00:00:00 1970 +0000
749 749 summary: 0.1
750 750
751 751 changeset: 2:e38ba6f5b7e0
752 752 user: test
753 753 date: Thu Jan 01 00:00:00 1970 +0000
754 754 summary: 0.2
755 755
756 756 changeset: 3:eebf5a27f8ca
757 757 user: test
758 758 date: Thu Jan 01 00:00:00 1970 +0000
759 759 summary: 0.3
760 760
761 761 $ cd ..
762 762
763 763 #endif
764 764
765 765 test for 540d1059c802
766 766
767 767 $ hg init orig
768 768 $ cd orig
769 769 $ echo foo > foo
770 770 $ hg add foo
771 771 $ hg ci -m 'add foo'
772 772
773 773 $ hg clone . ../copy
774 774 updating to branch default
775 775 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
776 776 $ hg tag foo
777 777
778 778 $ cd ../copy
779 779 $ echo >> foo
780 780 $ hg ci -m 'change foo'
781 781 $ hg bundle ../bundle.hg ../orig
782 782 searching for changes
783 783 1 changesets found
784 784
785 785 $ cd ..
786 786
787 787 #if repobundlerepo
788 788 $ cd orig
789 789 $ hg incoming ../bundle.hg
790 790 comparing with ../bundle.hg
791 791 searching for changes
792 792 changeset: 2:ed1b79f46b9a
793 793 tag: tip
794 794 parent: 0:bbd179dfa0a7
795 795 user: test
796 796 date: Thu Jan 01 00:00:00 1970 +0000
797 797 summary: change foo
798 798
799 799 $ cd ..
800 800
801 801 test bundle with # in the filename (issue2154):
802 802
803 803 $ cp bundle.hg 'test#bundle.hg'
804 804 $ cd orig
805 805 $ hg incoming '../test#bundle.hg'
806 806 comparing with ../test
807 807 abort: unknown revision 'bundle.hg'
808 808 [10]
809 809
810 810 note that percent encoding is not handled:
811 811
812 812 $ hg incoming ../test%23bundle.hg
813 813 abort: repository ../test%23bundle.hg not found
814 814 [255]
815 815 $ cd ..
816 816
817 817 #endif
818 818
819 819 test to bundle revisions on the newly created branch (issue3828):
820 820
821 821 $ hg -q clone -U test test-clone
822 822 $ cd test
823 823
824 824 $ hg -q branch foo
825 825 $ hg commit -m "create foo branch"
826 826 $ hg -q outgoing ../test-clone
827 827 9:b4f5acb1ee27
828 828 $ hg -q bundle --branch foo foo.hg ../test-clone
829 829 #if repobundlerepo
830 830 $ hg -R foo.hg -q log -r "bundle()"
831 831 9:b4f5acb1ee27
832 832 #endif
833 833
834 834 $ cd ..
835 835
836 836 test for https://bz.mercurial-scm.org/1144
837 837
838 838 test that verify bundle does not traceback
839 839
840 840 partial history bundle, fails w/ unknown parent
841 841
842 842 $ hg -R bundle.hg verify
843 843 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
844 844 [50]
845 845
846 846 full history bundle, refuses to verify non-local repo
847 847
848 848 #if repobundlerepo
849 849 $ hg -R all.hg verify
850 850 abort: cannot verify bundle or remote repos
851 851 [255]
852 852 #endif
853 853
854 854 but, regular verify must continue to work
855 855
856 856 $ hg -R orig verify
857 857 checking changesets
858 858 checking manifests
859 859 crosschecking files in changesets and manifests
860 860 checking files
861 861 checked 2 changesets with 2 changes to 2 files
862 862
863 863 #if repobundlerepo
864 864 diff against bundle
865 865
866 866 $ hg init b
867 867 $ cd b
868 868 $ hg -R ../all.hg diff -r tip
869 869 diff -r aa35859c02ea anotherfile
870 870 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
871 871 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
872 872 @@ -1,4 +0,0 @@
873 873 -0
874 874 -1
875 875 -2
876 876 -3
877 877 $ cd ..
878 878 #endif
879 879
880 880 bundle single branch
881 881
882 882 $ hg init branchy
883 883 $ cd branchy
884 884 $ echo a >a
885 885 $ echo x >x
886 886 $ hg ci -Ama
887 887 adding a
888 888 adding x
889 889 $ echo c >c
890 890 $ echo xx >x
891 891 $ hg ci -Amc
892 892 adding c
893 893 $ echo c1 >c1
894 894 $ hg ci -Amc1
895 895 adding c1
896 896 $ hg up 0
897 897 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
898 898 $ echo b >b
899 899 $ hg ci -Amb
900 900 adding b
901 901 created new head
902 902 $ echo b1 >b1
903 903 $ echo xx >x
904 904 $ hg ci -Amb1
905 905 adding b1
906 906 $ hg clone -q -r2 . part
907 907
908 908 == bundling via incoming
909 909
910 910 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
911 911 comparing with .
912 912 searching for changes
913 913 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
914 914 057f4db07f61970e1c11e83be79e9d08adc4dc31
915 915
916 916 == bundling
917 917
918 918 $ hg bundle bundle.hg part --debug --config progress.debug=true
919 919 query 1; heads
920 920 searching for changes
921 921 all remote heads known locally
922 922 2 changesets found
923 923 list of changesets:
924 924 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
925 925 057f4db07f61970e1c11e83be79e9d08adc4dc31
926 926 bundle2-output-bundle: "HG20", (1 params) 2 parts total
927 927 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
928 928 changesets: 1/2 chunks (50.00%)
929 929 changesets: 2/2 chunks (100.00%)
930 930 manifests: 1/2 chunks (50.00%)
931 931 manifests: 2/2 chunks (100.00%)
932 932 files: b 1/3 files (33.33%)
933 933 files: b1 2/3 files (66.67%)
934 934 files: x 3/3 files (100.00%)
935 935 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
936 936
937 937 #if repobundlerepo
938 938 == Test for issue3441
939 939
940 940 $ hg clone -q -r0 . part2
941 941 $ hg -q -R part2 pull bundle.hg
942 942 $ hg -R part2 verify
943 943 checking changesets
944 944 checking manifests
945 945 crosschecking files in changesets and manifests
946 946 checking files
947 947 checked 3 changesets with 5 changes to 4 files
948 948 #endif
949 949
950 950 == Test bundling no commits
951 951
952 952 $ hg bundle -r 'public()' no-output.hg
953 953 abort: no commits to bundle
954 954 [10]
955 955
956 956 $ cd ..
957 957
958 958 When user merges to the revision existing only in the bundle,
959 959 it should show warning that second parent of the working
960 960 directory does not exist
961 961
962 962 $ hg init update2bundled
963 963 $ cd update2bundled
964 964 $ cat <<EOF >> .hg/hgrc
965 965 > [extensions]
966 966 > strip =
967 967 > EOF
968 968 $ echo "aaa" >> a
969 969 $ hg commit -A -m 0
970 970 adding a
971 971 $ echo "bbb" >> b
972 972 $ hg commit -A -m 1
973 973 adding b
974 974 $ echo "ccc" >> c
975 975 $ hg commit -A -m 2
976 976 adding c
977 977 $ hg update -r 1
978 978 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
979 979 $ echo "ddd" >> d
980 980 $ hg commit -A -m 3
981 981 adding d
982 982 created new head
983 983 $ hg update -r 2
984 984 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
985 985 $ hg log -G
986 986 o changeset: 3:8bd3e1f196af
987 987 | tag: tip
988 988 | parent: 1:a01eca7af26d
989 989 | user: test
990 990 | date: Thu Jan 01 00:00:00 1970 +0000
991 991 | summary: 3
992 992 |
993 993 | @ changeset: 2:4652c276ac4f
994 994 |/ user: test
995 995 | date: Thu Jan 01 00:00:00 1970 +0000
996 996 | summary: 2
997 997 |
998 998 o changeset: 1:a01eca7af26d
999 999 | user: test
1000 1000 | date: Thu Jan 01 00:00:00 1970 +0000
1001 1001 | summary: 1
1002 1002 |
1003 1003 o changeset: 0:4fe08cd4693e
1004 1004 user: test
1005 1005 date: Thu Jan 01 00:00:00 1970 +0000
1006 1006 summary: 0
1007 1007
1008 1008
1009 1009 #if repobundlerepo
1010 1010 $ hg bundle --base 1 -r 3 ../update2bundled.hg
1011 1011 1 changesets found
1012 1012 $ hg strip -r 3
1013 1013 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
1014 1014 $ hg merge -R ../update2bundled.hg -r 3
1015 1015 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1016 1016 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1017 1017 (branch merge, don't forget to commit)
1018 1018
1019 1019 When user updates to the revision existing only in the bundle,
1020 1020 it should show warning
1021 1021
1022 1022 $ hg update -R ../update2bundled.hg --clean -r 3
1023 1023 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1024 1024 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1025 1025
1026 1026 When user updates to the revision existing in the local repository
1027 1027 the warning shouldn't be emitted
1028 1028
1029 1029 $ hg update -R ../update2bundled.hg -r 0
1030 1030 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1031 1031 #endif
1032 1032
1033 1033 Test the option that create slim bundle
1034 1034
1035 1035 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1036 1036 3 changesets found
1037 1037
1038 1038 Test the option that create and no-delta's bundle
1039 1039 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1040 1040 3 changesets found
1041
1042 Test the debug output when applying delta
1043 -----------------------------------------
1044
1045 $ hg init foo
1046 $ hg -R foo unbundle ./slim.hg \
1047 > --config debug.revlog.debug-delta=yes \
1048 > --config storage.revlog.reuse-external-delta=no \
1049 > --config storage.revlog.reuse-external-delta-parent=no
1050 adding changesets
1051 DBG-DELTAS: CHANGELOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1052 DBG-DELTAS: CHANGELOG: rev=1: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1053 DBG-DELTAS: CHANGELOG: rev=2: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1054 adding manifests
1055 DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1056 DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1057 DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1058 adding file changes
1059 DBG-DELTAS: FILELOG:a: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 DBG-DELTAS: FILELOG:b: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1061 DBG-DELTAS: FILELOG:c: rev=0: search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1062 added 3 changesets with 3 changes to 3 files
1063 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1064 (run 'hg update' to get a working copy)
1065
General Comments 0
You need to be logged in to leave comments. Login now