##// END OF EJS Templates
revlog: store sidedata in their own file...
marmoute -
r48181:e6292eb3 default
parent child Browse files
Show More
@@ -1,2719 +1,2719 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section,
137 137 configprefix + b'nodates',
138 138 default=False,
139 139 )
140 140 coreconfigitem(
141 141 section,
142 142 configprefix + b'showfunc',
143 143 default=False,
144 144 )
145 145 coreconfigitem(
146 146 section,
147 147 configprefix + b'unified',
148 148 default=None,
149 149 )
150 150 coreconfigitem(
151 151 section,
152 152 configprefix + b'git',
153 153 default=False,
154 154 )
155 155 coreconfigitem(
156 156 section,
157 157 configprefix + b'ignorews',
158 158 default=False,
159 159 )
160 160 coreconfigitem(
161 161 section,
162 162 configprefix + b'ignorewsamount',
163 163 default=False,
164 164 )
165 165 coreconfigitem(
166 166 section,
167 167 configprefix + b'ignoreblanklines',
168 168 default=False,
169 169 )
170 170 coreconfigitem(
171 171 section,
172 172 configprefix + b'ignorewseol',
173 173 default=False,
174 174 )
175 175 coreconfigitem(
176 176 section,
177 177 configprefix + b'nobinary',
178 178 default=False,
179 179 )
180 180 coreconfigitem(
181 181 section,
182 182 configprefix + b'noprefix',
183 183 default=False,
184 184 )
185 185 coreconfigitem(
186 186 section,
187 187 configprefix + b'word-diff',
188 188 default=False,
189 189 )
190 190
191 191
192 192 coreconfigitem(
193 193 b'alias',
194 194 b'.*',
195 195 default=dynamicdefault,
196 196 generic=True,
197 197 )
198 198 coreconfigitem(
199 199 b'auth',
200 200 b'cookiefile',
201 201 default=None,
202 202 )
203 203 _registerdiffopts(section=b'annotate')
204 204 # bookmarks.pushing: internal hack for discovery
205 205 coreconfigitem(
206 206 b'bookmarks',
207 207 b'pushing',
208 208 default=list,
209 209 )
210 210 # bundle.mainreporoot: internal hack for bundlerepo
211 211 coreconfigitem(
212 212 b'bundle',
213 213 b'mainreporoot',
214 214 default=b'',
215 215 )
216 216 coreconfigitem(
217 217 b'censor',
218 218 b'policy',
219 219 default=b'abort',
220 220 experimental=True,
221 221 )
222 222 coreconfigitem(
223 223 b'chgserver',
224 224 b'idletimeout',
225 225 default=3600,
226 226 )
227 227 coreconfigitem(
228 228 b'chgserver',
229 229 b'skiphash',
230 230 default=False,
231 231 )
232 232 coreconfigitem(
233 233 b'cmdserver',
234 234 b'log',
235 235 default=None,
236 236 )
237 237 coreconfigitem(
238 238 b'cmdserver',
239 239 b'max-log-files',
240 240 default=7,
241 241 )
242 242 coreconfigitem(
243 243 b'cmdserver',
244 244 b'max-log-size',
245 245 default=b'1 MB',
246 246 )
247 247 coreconfigitem(
248 248 b'cmdserver',
249 249 b'max-repo-cache',
250 250 default=0,
251 251 experimental=True,
252 252 )
253 253 coreconfigitem(
254 254 b'cmdserver',
255 255 b'message-encodings',
256 256 default=list,
257 257 )
258 258 coreconfigitem(
259 259 b'cmdserver',
260 260 b'track-log',
261 261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 262 )
263 263 coreconfigitem(
264 264 b'cmdserver',
265 265 b'shutdown-on-interrupt',
266 266 default=True,
267 267 )
268 268 coreconfigitem(
269 269 b'color',
270 270 b'.*',
271 271 default=None,
272 272 generic=True,
273 273 )
274 274 coreconfigitem(
275 275 b'color',
276 276 b'mode',
277 277 default=b'auto',
278 278 )
279 279 coreconfigitem(
280 280 b'color',
281 281 b'pagermode',
282 282 default=dynamicdefault,
283 283 )
284 284 coreconfigitem(
285 285 b'command-templates',
286 286 b'graphnode',
287 287 default=None,
288 288 alias=[(b'ui', b'graphnodetemplate')],
289 289 )
290 290 coreconfigitem(
291 291 b'command-templates',
292 292 b'log',
293 293 default=None,
294 294 alias=[(b'ui', b'logtemplate')],
295 295 )
296 296 coreconfigitem(
297 297 b'command-templates',
298 298 b'mergemarker',
299 299 default=(
300 300 b'{node|short} '
301 301 b'{ifeq(tags, "tip", "", '
302 302 b'ifeq(tags, "", "", "{tags} "))}'
303 303 b'{if(bookmarks, "{bookmarks} ")}'
304 304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 305 b'- {author|user}: {desc|firstline}'
306 306 ),
307 307 alias=[(b'ui', b'mergemarkertemplate')],
308 308 )
309 309 coreconfigitem(
310 310 b'command-templates',
311 311 b'pre-merge-tool-output',
312 312 default=None,
313 313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 314 )
315 315 coreconfigitem(
316 316 b'command-templates',
317 317 b'oneline-summary',
318 318 default=None,
319 319 )
320 320 coreconfigitem(
321 321 b'command-templates',
322 322 b'oneline-summary.*',
323 323 default=dynamicdefault,
324 324 generic=True,
325 325 )
326 326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 327 coreconfigitem(
328 328 b'commands',
329 329 b'commit.post-status',
330 330 default=False,
331 331 )
332 332 coreconfigitem(
333 333 b'commands',
334 334 b'grep.all-files',
335 335 default=False,
336 336 experimental=True,
337 337 )
338 338 coreconfigitem(
339 339 b'commands',
340 340 b'merge.require-rev',
341 341 default=False,
342 342 )
343 343 coreconfigitem(
344 344 b'commands',
345 345 b'push.require-revs',
346 346 default=False,
347 347 )
348 348 coreconfigitem(
349 349 b'commands',
350 350 b'resolve.confirm',
351 351 default=False,
352 352 )
353 353 coreconfigitem(
354 354 b'commands',
355 355 b'resolve.explicit-re-merge',
356 356 default=False,
357 357 )
358 358 coreconfigitem(
359 359 b'commands',
360 360 b'resolve.mark-check',
361 361 default=b'none',
362 362 )
363 363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 364 coreconfigitem(
365 365 b'commands',
366 366 b'show.aliasprefix',
367 367 default=list,
368 368 )
369 369 coreconfigitem(
370 370 b'commands',
371 371 b'status.relative',
372 372 default=False,
373 373 )
374 374 coreconfigitem(
375 375 b'commands',
376 376 b'status.skipstates',
377 377 default=[],
378 378 experimental=True,
379 379 )
380 380 coreconfigitem(
381 381 b'commands',
382 382 b'status.terse',
383 383 default=b'',
384 384 )
385 385 coreconfigitem(
386 386 b'commands',
387 387 b'status.verbose',
388 388 default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'commands',
392 392 b'update.check',
393 393 default=None,
394 394 )
395 395 coreconfigitem(
396 396 b'commands',
397 397 b'update.requiredest',
398 398 default=False,
399 399 )
400 400 coreconfigitem(
401 401 b'committemplate',
402 402 b'.*',
403 403 default=None,
404 404 generic=True,
405 405 )
406 406 coreconfigitem(
407 407 b'convert',
408 408 b'bzr.saverev',
409 409 default=True,
410 410 )
411 411 coreconfigitem(
412 412 b'convert',
413 413 b'cvsps.cache',
414 414 default=True,
415 415 )
416 416 coreconfigitem(
417 417 b'convert',
418 418 b'cvsps.fuzz',
419 419 default=60,
420 420 )
421 421 coreconfigitem(
422 422 b'convert',
423 423 b'cvsps.logencoding',
424 424 default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'convert',
428 428 b'cvsps.mergefrom',
429 429 default=None,
430 430 )
431 431 coreconfigitem(
432 432 b'convert',
433 433 b'cvsps.mergeto',
434 434 default=None,
435 435 )
436 436 coreconfigitem(
437 437 b'convert',
438 438 b'git.committeractions',
439 439 default=lambda: [b'messagedifferent'],
440 440 )
441 441 coreconfigitem(
442 442 b'convert',
443 443 b'git.extrakeys',
444 444 default=list,
445 445 )
446 446 coreconfigitem(
447 447 b'convert',
448 448 b'git.findcopiesharder',
449 449 default=False,
450 450 )
451 451 coreconfigitem(
452 452 b'convert',
453 453 b'git.remoteprefix',
454 454 default=b'remote',
455 455 )
456 456 coreconfigitem(
457 457 b'convert',
458 458 b'git.renamelimit',
459 459 default=400,
460 460 )
461 461 coreconfigitem(
462 462 b'convert',
463 463 b'git.saverev',
464 464 default=True,
465 465 )
466 466 coreconfigitem(
467 467 b'convert',
468 468 b'git.similarity',
469 469 default=50,
470 470 )
471 471 coreconfigitem(
472 472 b'convert',
473 473 b'git.skipsubmodules',
474 474 default=False,
475 475 )
476 476 coreconfigitem(
477 477 b'convert',
478 478 b'hg.clonebranches',
479 479 default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'convert',
483 483 b'hg.ignoreerrors',
484 484 default=False,
485 485 )
486 486 coreconfigitem(
487 487 b'convert',
488 488 b'hg.preserve-hash',
489 489 default=False,
490 490 )
491 491 coreconfigitem(
492 492 b'convert',
493 493 b'hg.revs',
494 494 default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'convert',
498 498 b'hg.saverev',
499 499 default=False,
500 500 )
501 501 coreconfigitem(
502 502 b'convert',
503 503 b'hg.sourcename',
504 504 default=None,
505 505 )
506 506 coreconfigitem(
507 507 b'convert',
508 508 b'hg.startrev',
509 509 default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'convert',
513 513 b'hg.tagsbranch',
514 514 default=b'default',
515 515 )
516 516 coreconfigitem(
517 517 b'convert',
518 518 b'hg.usebranchnames',
519 519 default=True,
520 520 )
521 521 coreconfigitem(
522 522 b'convert',
523 523 b'ignoreancestorcheck',
524 524 default=False,
525 525 experimental=True,
526 526 )
527 527 coreconfigitem(
528 528 b'convert',
529 529 b'localtimezone',
530 530 default=False,
531 531 )
532 532 coreconfigitem(
533 533 b'convert',
534 534 b'p4.encoding',
535 535 default=dynamicdefault,
536 536 )
537 537 coreconfigitem(
538 538 b'convert',
539 539 b'p4.startrev',
540 540 default=0,
541 541 )
542 542 coreconfigitem(
543 543 b'convert',
544 544 b'skiptags',
545 545 default=False,
546 546 )
547 547 coreconfigitem(
548 548 b'convert',
549 549 b'svn.debugsvnlog',
550 550 default=True,
551 551 )
552 552 coreconfigitem(
553 553 b'convert',
554 554 b'svn.trunk',
555 555 default=None,
556 556 )
557 557 coreconfigitem(
558 558 b'convert',
559 559 b'svn.tags',
560 560 default=None,
561 561 )
562 562 coreconfigitem(
563 563 b'convert',
564 564 b'svn.branches',
565 565 default=None,
566 566 )
567 567 coreconfigitem(
568 568 b'convert',
569 569 b'svn.startrev',
570 570 default=0,
571 571 )
572 572 coreconfigitem(
573 573 b'convert',
574 574 b'svn.dangerous-set-commit-dates',
575 575 default=False,
576 576 )
577 577 coreconfigitem(
578 578 b'debug',
579 579 b'dirstate.delaywrite',
580 580 default=0,
581 581 )
582 582 coreconfigitem(
583 583 b'debug',
584 584 b'revlog.verifyposition.changelog',
585 585 default=b'',
586 586 )
587 587 coreconfigitem(
588 588 b'defaults',
589 589 b'.*',
590 590 default=None,
591 591 generic=True,
592 592 )
593 593 coreconfigitem(
594 594 b'devel',
595 595 b'all-warnings',
596 596 default=False,
597 597 )
598 598 coreconfigitem(
599 599 b'devel',
600 600 b'bundle2.debug',
601 601 default=False,
602 602 )
603 603 coreconfigitem(
604 604 b'devel',
605 605 b'bundle.delta',
606 606 default=b'',
607 607 )
608 608 coreconfigitem(
609 609 b'devel',
610 610 b'cache-vfs',
611 611 default=None,
612 612 )
613 613 coreconfigitem(
614 614 b'devel',
615 615 b'check-locks',
616 616 default=False,
617 617 )
618 618 coreconfigitem(
619 619 b'devel',
620 620 b'check-relroot',
621 621 default=False,
622 622 )
623 623 # Track copy information for all file, not just "added" one (very slow)
624 624 coreconfigitem(
625 625 b'devel',
626 626 b'copy-tracing.trace-all-files',
627 627 default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'devel',
631 631 b'default-date',
632 632 default=None,
633 633 )
634 634 coreconfigitem(
635 635 b'devel',
636 636 b'deprec-warn',
637 637 default=False,
638 638 )
639 639 coreconfigitem(
640 640 b'devel',
641 641 b'disableloaddefaultcerts',
642 642 default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'devel',
646 646 b'warn-empty-changegroup',
647 647 default=False,
648 648 )
649 649 coreconfigitem(
650 650 b'devel',
651 651 b'legacy.exchange',
652 652 default=list,
653 653 )
654 654 # When True, revlogs use a special reference version of the nodemap, that is not
655 655 # performant but is "known" to behave properly.
656 656 coreconfigitem(
657 657 b'devel',
658 658 b'persistent-nodemap',
659 659 default=False,
660 660 )
661 661 coreconfigitem(
662 662 b'devel',
663 663 b'servercafile',
664 664 default=b'',
665 665 )
666 666 coreconfigitem(
667 667 b'devel',
668 668 b'serverexactprotocol',
669 669 default=b'',
670 670 )
671 671 coreconfigitem(
672 672 b'devel',
673 673 b'serverrequirecert',
674 674 default=False,
675 675 )
676 676 coreconfigitem(
677 677 b'devel',
678 678 b'strip-obsmarkers',
679 679 default=True,
680 680 )
681 681 coreconfigitem(
682 682 b'devel',
683 683 b'warn-config',
684 684 default=None,
685 685 )
686 686 coreconfigitem(
687 687 b'devel',
688 688 b'warn-config-default',
689 689 default=None,
690 690 )
691 691 coreconfigitem(
692 692 b'devel',
693 693 b'user.obsmarker',
694 694 default=None,
695 695 )
696 696 coreconfigitem(
697 697 b'devel',
698 698 b'warn-config-unknown',
699 699 default=None,
700 700 )
701 701 coreconfigitem(
702 702 b'devel',
703 703 b'debug.copies',
704 704 default=False,
705 705 )
706 706 coreconfigitem(
707 707 b'devel',
708 708 b'copy-tracing.multi-thread',
709 709 default=True,
710 710 )
711 711 coreconfigitem(
712 712 b'devel',
713 713 b'debug.extensions',
714 714 default=False,
715 715 )
716 716 coreconfigitem(
717 717 b'devel',
718 718 b'debug.repo-filters',
719 719 default=False,
720 720 )
721 721 coreconfigitem(
722 722 b'devel',
723 723 b'debug.peer-request',
724 724 default=False,
725 725 )
726 726 # If discovery.exchange-heads is False, the discovery will not start with
727 727 # remote head fetching and local head querying.
728 728 coreconfigitem(
729 729 b'devel',
730 730 b'discovery.exchange-heads',
731 731 default=True,
732 732 )
733 733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 734 # not be increased through the process
735 735 coreconfigitem(
736 736 b'devel',
737 737 b'discovery.grow-sample',
738 738 default=True,
739 739 )
740 740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 741 # adapted to the shape of the undecided set (it is set to the max of:
742 742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 743 coreconfigitem(
744 744 b'devel',
745 745 b'discovery.grow-sample.dynamic',
746 746 default=True,
747 747 )
748 748 # discovery.grow-sample.rate control the rate at which the sample grow
749 749 coreconfigitem(
750 750 b'devel',
751 751 b'discovery.grow-sample.rate',
752 752 default=1.05,
753 753 )
754 754 # If discovery.randomize is False, random sampling during discovery are
755 755 # deterministic. It is meant for integration tests.
756 756 coreconfigitem(
757 757 b'devel',
758 758 b'discovery.randomize',
759 759 default=True,
760 760 )
761 761 # Control the initial size of the discovery sample
762 762 coreconfigitem(
763 763 b'devel',
764 764 b'discovery.sample-size',
765 765 default=200,
766 766 )
767 767 # Control the initial size of the discovery for initial change
768 768 coreconfigitem(
769 769 b'devel',
770 770 b'discovery.sample-size.initial',
771 771 default=100,
772 772 )
773 773 _registerdiffopts(section=b'diff')
774 774 coreconfigitem(
775 775 b'diff',
776 776 b'merge',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'email',
782 782 b'bcc',
783 783 default=None,
784 784 )
785 785 coreconfigitem(
786 786 b'email',
787 787 b'cc',
788 788 default=None,
789 789 )
790 790 coreconfigitem(
791 791 b'email',
792 792 b'charsets',
793 793 default=list,
794 794 )
795 795 coreconfigitem(
796 796 b'email',
797 797 b'from',
798 798 default=None,
799 799 )
800 800 coreconfigitem(
801 801 b'email',
802 802 b'method',
803 803 default=b'smtp',
804 804 )
805 805 coreconfigitem(
806 806 b'email',
807 807 b'reply-to',
808 808 default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'email',
812 812 b'to',
813 813 default=None,
814 814 )
815 815 coreconfigitem(
816 816 b'experimental',
817 817 b'archivemetatemplate',
818 818 default=dynamicdefault,
819 819 )
820 820 coreconfigitem(
821 821 b'experimental',
822 822 b'auto-publish',
823 823 default=b'publish',
824 824 )
825 825 coreconfigitem(
826 826 b'experimental',
827 827 b'bundle-phases',
828 828 default=False,
829 829 )
830 830 coreconfigitem(
831 831 b'experimental',
832 832 b'bundle2-advertise',
833 833 default=True,
834 834 )
835 835 coreconfigitem(
836 836 b'experimental',
837 837 b'bundle2-output-capture',
838 838 default=False,
839 839 )
840 840 coreconfigitem(
841 841 b'experimental',
842 842 b'bundle2.pushback',
843 843 default=False,
844 844 )
845 845 coreconfigitem(
846 846 b'experimental',
847 847 b'bundle2lazylocking',
848 848 default=False,
849 849 )
850 850 coreconfigitem(
851 851 b'experimental',
852 852 b'bundlecomplevel',
853 853 default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'experimental',
857 857 b'bundlecomplevel.bzip2',
858 858 default=None,
859 859 )
860 860 coreconfigitem(
861 861 b'experimental',
862 862 b'bundlecomplevel.gzip',
863 863 default=None,
864 864 )
865 865 coreconfigitem(
866 866 b'experimental',
867 867 b'bundlecomplevel.none',
868 868 default=None,
869 869 )
870 870 coreconfigitem(
871 871 b'experimental',
872 872 b'bundlecomplevel.zstd',
873 873 default=None,
874 874 )
875 875 coreconfigitem(
876 876 b'experimental',
877 877 b'bundlecompthreads',
878 878 default=None,
879 879 )
880 880 coreconfigitem(
881 881 b'experimental',
882 882 b'bundlecompthreads.bzip2',
883 883 default=None,
884 884 )
885 885 coreconfigitem(
886 886 b'experimental',
887 887 b'bundlecompthreads.gzip',
888 888 default=None,
889 889 )
890 890 coreconfigitem(
891 891 b'experimental',
892 892 b'bundlecompthreads.none',
893 893 default=None,
894 894 )
895 895 coreconfigitem(
896 896 b'experimental',
897 897 b'bundlecompthreads.zstd',
898 898 default=None,
899 899 )
900 900 coreconfigitem(
901 901 b'experimental',
902 902 b'changegroup3',
903 903 default=False,
904 904 )
905 905 coreconfigitem(
906 906 b'experimental',
907 907 b'changegroup4',
908 908 default=False,
909 909 )
910 910 coreconfigitem(
911 911 b'experimental',
912 912 b'cleanup-as-archived',
913 913 default=False,
914 914 )
915 915 coreconfigitem(
916 916 b'experimental',
917 917 b'clientcompressionengines',
918 918 default=list,
919 919 )
920 920 coreconfigitem(
921 921 b'experimental',
922 922 b'copytrace',
923 923 default=b'on',
924 924 )
925 925 coreconfigitem(
926 926 b'experimental',
927 927 b'copytrace.movecandidateslimit',
928 928 default=100,
929 929 )
930 930 coreconfigitem(
931 931 b'experimental',
932 932 b'copytrace.sourcecommitlimit',
933 933 default=100,
934 934 )
935 935 coreconfigitem(
936 936 b'experimental',
937 937 b'copies.read-from',
938 938 default=b"filelog-only",
939 939 )
940 940 coreconfigitem(
941 941 b'experimental',
942 942 b'copies.write-to',
943 943 default=b'filelog-only',
944 944 )
945 945 coreconfigitem(
946 946 b'experimental',
947 947 b'crecordtest',
948 948 default=None,
949 949 )
950 950 coreconfigitem(
951 951 b'experimental',
952 952 b'directaccess',
953 953 default=False,
954 954 )
955 955 coreconfigitem(
956 956 b'experimental',
957 957 b'directaccess.revnums',
958 958 default=False,
959 959 )
960 960 coreconfigitem(
961 961 b'experimental',
962 962 b'dirstate-tree.in-memory',
963 963 default=False,
964 964 )
965 965 coreconfigitem(
966 966 b'experimental',
967 967 b'editortmpinhg',
968 968 default=False,
969 969 )
970 970 coreconfigitem(
971 971 b'experimental',
972 972 b'evolution',
973 973 default=list,
974 974 )
975 975 coreconfigitem(
976 976 b'experimental',
977 977 b'evolution.allowdivergence',
978 978 default=False,
979 979 alias=[(b'experimental', b'allowdivergence')],
980 980 )
981 981 coreconfigitem(
982 982 b'experimental',
983 983 b'evolution.allowunstable',
984 984 default=None,
985 985 )
986 986 coreconfigitem(
987 987 b'experimental',
988 988 b'evolution.createmarkers',
989 989 default=None,
990 990 )
991 991 coreconfigitem(
992 992 b'experimental',
993 993 b'evolution.effect-flags',
994 994 default=True,
995 995 alias=[(b'experimental', b'effect-flags')],
996 996 )
997 997 coreconfigitem(
998 998 b'experimental',
999 999 b'evolution.exchange',
1000 1000 default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'experimental',
1004 1004 b'evolution.bundle-obsmarker',
1005 1005 default=False,
1006 1006 )
1007 1007 coreconfigitem(
1008 1008 b'experimental',
1009 1009 b'evolution.bundle-obsmarker:mandatory',
1010 1010 default=True,
1011 1011 )
1012 1012 coreconfigitem(
1013 1013 b'experimental',
1014 1014 b'log.topo',
1015 1015 default=False,
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'experimental',
1019 1019 b'evolution.report-instabilities',
1020 1020 default=True,
1021 1021 )
1022 1022 coreconfigitem(
1023 1023 b'experimental',
1024 1024 b'evolution.track-operation',
1025 1025 default=True,
1026 1026 )
1027 1027 # repo-level config to exclude a revset visibility
1028 1028 #
1029 1029 # The target use case is to use `share` to expose different subset of the same
1030 1030 # repository, especially server side. See also `server.view`.
1031 1031 coreconfigitem(
1032 1032 b'experimental',
1033 1033 b'extra-filter-revs',
1034 1034 default=None,
1035 1035 )
1036 1036 coreconfigitem(
1037 1037 b'experimental',
1038 1038 b'maxdeltachainspan',
1039 1039 default=-1,
1040 1040 )
1041 1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 1042 # kept/undeleted them) and creates new filenodes for them
1043 1043 coreconfigitem(
1044 1044 b'experimental',
1045 1045 b'merge-track-salvaged',
1046 1046 default=False,
1047 1047 )
1048 1048 coreconfigitem(
1049 1049 b'experimental',
1050 1050 b'mergetempdirprefix',
1051 1051 default=None,
1052 1052 )
1053 1053 coreconfigitem(
1054 1054 b'experimental',
1055 1055 b'mmapindexthreshold',
1056 1056 default=None,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'experimental',
1060 1060 b'narrow',
1061 1061 default=False,
1062 1062 )
1063 1063 coreconfigitem(
1064 1064 b'experimental',
1065 1065 b'nonnormalparanoidcheck',
1066 1066 default=False,
1067 1067 )
1068 1068 coreconfigitem(
1069 1069 b'experimental',
1070 1070 b'exportableenviron',
1071 1071 default=list,
1072 1072 )
1073 1073 coreconfigitem(
1074 1074 b'experimental',
1075 1075 b'extendedheader.index',
1076 1076 default=None,
1077 1077 )
1078 1078 coreconfigitem(
1079 1079 b'experimental',
1080 1080 b'extendedheader.similarity',
1081 1081 default=False,
1082 1082 )
1083 1083 coreconfigitem(
1084 1084 b'experimental',
1085 1085 b'graphshorten',
1086 1086 default=False,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'experimental',
1090 1090 b'graphstyle.parent',
1091 1091 default=dynamicdefault,
1092 1092 )
1093 1093 coreconfigitem(
1094 1094 b'experimental',
1095 1095 b'graphstyle.missing',
1096 1096 default=dynamicdefault,
1097 1097 )
1098 1098 coreconfigitem(
1099 1099 b'experimental',
1100 1100 b'graphstyle.grandparent',
1101 1101 default=dynamicdefault,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'experimental',
1105 1105 b'hook-track-tags',
1106 1106 default=False,
1107 1107 )
1108 1108 coreconfigitem(
1109 1109 b'experimental',
1110 1110 b'httppeer.advertise-v2',
1111 1111 default=False,
1112 1112 )
1113 1113 coreconfigitem(
1114 1114 b'experimental',
1115 1115 b'httppeer.v2-encoder-order',
1116 1116 default=None,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'experimental',
1120 1120 b'httppostargs',
1121 1121 default=False,
1122 1122 )
1123 1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1125
1126 1126 coreconfigitem(
1127 1127 b'experimental',
1128 1128 b'obsmarkers-exchange-debug',
1129 1129 default=False,
1130 1130 )
1131 1131 coreconfigitem(
1132 1132 b'experimental',
1133 1133 b'remotenames',
1134 1134 default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'experimental',
1138 1138 b'removeemptydirs',
1139 1139 default=True,
1140 1140 )
1141 1141 coreconfigitem(
1142 1142 b'experimental',
1143 1143 b'revert.interactive.select-to-keep',
1144 1144 default=False,
1145 1145 )
1146 1146 coreconfigitem(
1147 1147 b'experimental',
1148 1148 b'revisions.prefixhexnode',
1149 1149 default=False,
1150 1150 )
1151 1151 # "out of experimental" todo list.
1152 1152 #
1153 1153 # * include management of a persistent nodemap in the main docket
1154 1154 # * enforce a "no-truncate" policy for mmap safety
1155 1155 # - for censoring operation
1156 1156 # - for stripping operation
1157 1157 # - for rollback operation
1158 1158 # * proper streaming (race free) of the docket file
1159 1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 1160 # * Exchange-wise, we will also need to do something more efficient than
1161 1161 # keeping references to the affected revlogs, especially memory-wise when
1162 1162 # rewriting sidedata.
1163 1163 # * introduce a proper solution to reduce the number of filelog related files.
1164 1164 # * use caching for reading sidedata (similar to what we do for data).
1165 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1165 1166 # * Improvement to consider
1166 1167 # - avoid compression header in chunk using the default compression?
1167 1168 # - forbid "inline" compression mode entirely?
1168 1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 1171 # - keep track of chain base or size (probably not that useful anymore)
1171 # - store data and sidedata in different files
1172 1172 coreconfigitem(
1173 1173 b'experimental',
1174 1174 b'revlogv2',
1175 1175 default=None,
1176 1176 )
1177 1177 coreconfigitem(
1178 1178 b'experimental',
1179 1179 b'revisions.disambiguatewithin',
1180 1180 default=None,
1181 1181 )
1182 1182 coreconfigitem(
1183 1183 b'experimental',
1184 1184 b'rust.index',
1185 1185 default=False,
1186 1186 )
1187 1187 coreconfigitem(
1188 1188 b'experimental',
1189 1189 b'server.filesdata.recommended-batch-size',
1190 1190 default=50000,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'experimental',
1194 1194 b'server.manifestdata.recommended-batch-size',
1195 1195 default=100000,
1196 1196 )
1197 1197 coreconfigitem(
1198 1198 b'experimental',
1199 1199 b'server.stream-narrow-clones',
1200 1200 default=False,
1201 1201 )
1202 1202 coreconfigitem(
1203 1203 b'experimental',
1204 1204 b'single-head-per-branch',
1205 1205 default=False,
1206 1206 )
1207 1207 coreconfigitem(
1208 1208 b'experimental',
1209 1209 b'single-head-per-branch:account-closed-heads',
1210 1210 default=False,
1211 1211 )
1212 1212 coreconfigitem(
1213 1213 b'experimental',
1214 1214 b'single-head-per-branch:public-changes-only',
1215 1215 default=False,
1216 1216 )
1217 1217 coreconfigitem(
1218 1218 b'experimental',
1219 1219 b'sshserver.support-v2',
1220 1220 default=False,
1221 1221 )
1222 1222 coreconfigitem(
1223 1223 b'experimental',
1224 1224 b'sparse-read',
1225 1225 default=False,
1226 1226 )
1227 1227 coreconfigitem(
1228 1228 b'experimental',
1229 1229 b'sparse-read.density-threshold',
1230 1230 default=0.50,
1231 1231 )
1232 1232 coreconfigitem(
1233 1233 b'experimental',
1234 1234 b'sparse-read.min-gap-size',
1235 1235 default=b'65K',
1236 1236 )
1237 1237 coreconfigitem(
1238 1238 b'experimental',
1239 1239 b'treemanifest',
1240 1240 default=False,
1241 1241 )
1242 1242 coreconfigitem(
1243 1243 b'experimental',
1244 1244 b'update.atomic-file',
1245 1245 default=False,
1246 1246 )
1247 1247 coreconfigitem(
1248 1248 b'experimental',
1249 1249 b'sshpeer.advertise-v2',
1250 1250 default=False,
1251 1251 )
1252 1252 coreconfigitem(
1253 1253 b'experimental',
1254 1254 b'web.apiserver',
1255 1255 default=False,
1256 1256 )
1257 1257 coreconfigitem(
1258 1258 b'experimental',
1259 1259 b'web.api.http-v2',
1260 1260 default=False,
1261 1261 )
1262 1262 coreconfigitem(
1263 1263 b'experimental',
1264 1264 b'web.api.debugreflect',
1265 1265 default=False,
1266 1266 )
1267 1267 coreconfigitem(
1268 1268 b'experimental',
1269 1269 b'worker.wdir-get-thread-safe',
1270 1270 default=False,
1271 1271 )
1272 1272 coreconfigitem(
1273 1273 b'experimental',
1274 1274 b'worker.repository-upgrade',
1275 1275 default=False,
1276 1276 )
1277 1277 coreconfigitem(
1278 1278 b'experimental',
1279 1279 b'xdiff',
1280 1280 default=False,
1281 1281 )
1282 1282 coreconfigitem(
1283 1283 b'extensions',
1284 1284 b'.*',
1285 1285 default=None,
1286 1286 generic=True,
1287 1287 )
1288 1288 coreconfigitem(
1289 1289 b'extdata',
1290 1290 b'.*',
1291 1291 default=None,
1292 1292 generic=True,
1293 1293 )
1294 1294 coreconfigitem(
1295 1295 b'format',
1296 1296 b'bookmarks-in-store',
1297 1297 default=False,
1298 1298 )
1299 1299 coreconfigitem(
1300 1300 b'format',
1301 1301 b'chunkcachesize',
1302 1302 default=None,
1303 1303 experimental=True,
1304 1304 )
1305 1305 coreconfigitem(
1306 1306 # Enable this dirstate format *when creating a new repository*.
1307 1307 # Which format to use for existing repos is controlled by .hg/requires
1308 1308 b'format',
1309 1309 b'exp-dirstate-v2',
1310 1310 default=False,
1311 1311 experimental=True,
1312 1312 )
1313 1313 coreconfigitem(
1314 1314 b'format',
1315 1315 b'dotencode',
1316 1316 default=True,
1317 1317 )
1318 1318 coreconfigitem(
1319 1319 b'format',
1320 1320 b'generaldelta',
1321 1321 default=False,
1322 1322 experimental=True,
1323 1323 )
1324 1324 coreconfigitem(
1325 1325 b'format',
1326 1326 b'manifestcachesize',
1327 1327 default=None,
1328 1328 experimental=True,
1329 1329 )
1330 1330 coreconfigitem(
1331 1331 b'format',
1332 1332 b'maxchainlen',
1333 1333 default=dynamicdefault,
1334 1334 experimental=True,
1335 1335 )
1336 1336 coreconfigitem(
1337 1337 b'format',
1338 1338 b'obsstore-version',
1339 1339 default=None,
1340 1340 )
1341 1341 coreconfigitem(
1342 1342 b'format',
1343 1343 b'sparse-revlog',
1344 1344 default=True,
1345 1345 )
1346 1346 coreconfigitem(
1347 1347 b'format',
1348 1348 b'revlog-compression',
1349 1349 default=lambda: [b'zstd', b'zlib'],
1350 1350 alias=[(b'experimental', b'format.compression')],
1351 1351 )
1352 1352 # Experimental TODOs:
1353 1353 #
1354 1354 # * Same as for evlogv2 (but for the reduction of the number of files)
1355 1355 # * Improvement to investigate
1356 1356 # - storing .hgtags fnode
1357 1357 # - storing `rank` of changesets
1358 1358 # - storing branch related identifier
1359 1359
1360 1360 coreconfigitem(
1361 1361 b'format',
1362 1362 b'exp-use-changelog-v2',
1363 1363 default=None,
1364 1364 experimental=True,
1365 1365 )
1366 1366 coreconfigitem(
1367 1367 b'format',
1368 1368 b'usefncache',
1369 1369 default=True,
1370 1370 )
1371 1371 coreconfigitem(
1372 1372 b'format',
1373 1373 b'usegeneraldelta',
1374 1374 default=True,
1375 1375 )
1376 1376 coreconfigitem(
1377 1377 b'format',
1378 1378 b'usestore',
1379 1379 default=True,
1380 1380 )
1381 1381
1382 1382
1383 1383 def _persistent_nodemap_default():
1384 1384 """compute `use-persistent-nodemap` default value
1385 1385
1386 1386 The feature is disabled unless a fast implementation is available.
1387 1387 """
1388 1388 from . import policy
1389 1389
1390 1390 return policy.importrust('revlog') is not None
1391 1391
1392 1392
1393 1393 coreconfigitem(
1394 1394 b'format',
1395 1395 b'use-persistent-nodemap',
1396 1396 default=_persistent_nodemap_default,
1397 1397 )
1398 1398 coreconfigitem(
1399 1399 b'format',
1400 1400 b'exp-use-copies-side-data-changeset',
1401 1401 default=False,
1402 1402 experimental=True,
1403 1403 )
1404 1404 coreconfigitem(
1405 1405 b'format',
1406 1406 b'use-share-safe',
1407 1407 default=False,
1408 1408 )
1409 1409 coreconfigitem(
1410 1410 b'format',
1411 1411 b'internal-phase',
1412 1412 default=False,
1413 1413 experimental=True,
1414 1414 )
1415 1415 coreconfigitem(
1416 1416 b'fsmonitor',
1417 1417 b'warn_when_unused',
1418 1418 default=True,
1419 1419 )
1420 1420 coreconfigitem(
1421 1421 b'fsmonitor',
1422 1422 b'warn_update_file_count',
1423 1423 default=50000,
1424 1424 )
1425 1425 coreconfigitem(
1426 1426 b'fsmonitor',
1427 1427 b'warn_update_file_count_rust',
1428 1428 default=400000,
1429 1429 )
1430 1430 coreconfigitem(
1431 1431 b'help',
1432 1432 br'hidden-command\..*',
1433 1433 default=False,
1434 1434 generic=True,
1435 1435 )
1436 1436 coreconfigitem(
1437 1437 b'help',
1438 1438 br'hidden-topic\..*',
1439 1439 default=False,
1440 1440 generic=True,
1441 1441 )
1442 1442 coreconfigitem(
1443 1443 b'hooks',
1444 1444 b'[^:]*',
1445 1445 default=dynamicdefault,
1446 1446 generic=True,
1447 1447 )
1448 1448 coreconfigitem(
1449 1449 b'hooks',
1450 1450 b'.*:run-with-plain',
1451 1451 default=True,
1452 1452 generic=True,
1453 1453 )
1454 1454 coreconfigitem(
1455 1455 b'hgweb-paths',
1456 1456 b'.*',
1457 1457 default=list,
1458 1458 generic=True,
1459 1459 )
1460 1460 coreconfigitem(
1461 1461 b'hostfingerprints',
1462 1462 b'.*',
1463 1463 default=list,
1464 1464 generic=True,
1465 1465 )
1466 1466 coreconfigitem(
1467 1467 b'hostsecurity',
1468 1468 b'ciphers',
1469 1469 default=None,
1470 1470 )
1471 1471 coreconfigitem(
1472 1472 b'hostsecurity',
1473 1473 b'minimumprotocol',
1474 1474 default=dynamicdefault,
1475 1475 )
1476 1476 coreconfigitem(
1477 1477 b'hostsecurity',
1478 1478 b'.*:minimumprotocol$',
1479 1479 default=dynamicdefault,
1480 1480 generic=True,
1481 1481 )
1482 1482 coreconfigitem(
1483 1483 b'hostsecurity',
1484 1484 b'.*:ciphers$',
1485 1485 default=dynamicdefault,
1486 1486 generic=True,
1487 1487 )
1488 1488 coreconfigitem(
1489 1489 b'hostsecurity',
1490 1490 b'.*:fingerprints$',
1491 1491 default=list,
1492 1492 generic=True,
1493 1493 )
1494 1494 coreconfigitem(
1495 1495 b'hostsecurity',
1496 1496 b'.*:verifycertsfile$',
1497 1497 default=None,
1498 1498 generic=True,
1499 1499 )
1500 1500
1501 1501 coreconfigitem(
1502 1502 b'http_proxy',
1503 1503 b'always',
1504 1504 default=False,
1505 1505 )
1506 1506 coreconfigitem(
1507 1507 b'http_proxy',
1508 1508 b'host',
1509 1509 default=None,
1510 1510 )
1511 1511 coreconfigitem(
1512 1512 b'http_proxy',
1513 1513 b'no',
1514 1514 default=list,
1515 1515 )
1516 1516 coreconfigitem(
1517 1517 b'http_proxy',
1518 1518 b'passwd',
1519 1519 default=None,
1520 1520 )
1521 1521 coreconfigitem(
1522 1522 b'http_proxy',
1523 1523 b'user',
1524 1524 default=None,
1525 1525 )
1526 1526
1527 1527 coreconfigitem(
1528 1528 b'http',
1529 1529 b'timeout',
1530 1530 default=None,
1531 1531 )
1532 1532
1533 1533 coreconfigitem(
1534 1534 b'logtoprocess',
1535 1535 b'commandexception',
1536 1536 default=None,
1537 1537 )
1538 1538 coreconfigitem(
1539 1539 b'logtoprocess',
1540 1540 b'commandfinish',
1541 1541 default=None,
1542 1542 )
1543 1543 coreconfigitem(
1544 1544 b'logtoprocess',
1545 1545 b'command',
1546 1546 default=None,
1547 1547 )
1548 1548 coreconfigitem(
1549 1549 b'logtoprocess',
1550 1550 b'develwarn',
1551 1551 default=None,
1552 1552 )
1553 1553 coreconfigitem(
1554 1554 b'logtoprocess',
1555 1555 b'uiblocked',
1556 1556 default=None,
1557 1557 )
1558 1558 coreconfigitem(
1559 1559 b'merge',
1560 1560 b'checkunknown',
1561 1561 default=b'abort',
1562 1562 )
1563 1563 coreconfigitem(
1564 1564 b'merge',
1565 1565 b'checkignored',
1566 1566 default=b'abort',
1567 1567 )
1568 1568 coreconfigitem(
1569 1569 b'experimental',
1570 1570 b'merge.checkpathconflicts',
1571 1571 default=False,
1572 1572 )
1573 1573 coreconfigitem(
1574 1574 b'merge',
1575 1575 b'followcopies',
1576 1576 default=True,
1577 1577 )
1578 1578 coreconfigitem(
1579 1579 b'merge',
1580 1580 b'on-failure',
1581 1581 default=b'continue',
1582 1582 )
1583 1583 coreconfigitem(
1584 1584 b'merge',
1585 1585 b'preferancestor',
1586 1586 default=lambda: [b'*'],
1587 1587 experimental=True,
1588 1588 )
1589 1589 coreconfigitem(
1590 1590 b'merge',
1591 1591 b'strict-capability-check',
1592 1592 default=False,
1593 1593 )
1594 1594 coreconfigitem(
1595 1595 b'merge-tools',
1596 1596 b'.*',
1597 1597 default=None,
1598 1598 generic=True,
1599 1599 )
1600 1600 coreconfigitem(
1601 1601 b'merge-tools',
1602 1602 br'.*\.args$',
1603 1603 default=b"$local $base $other",
1604 1604 generic=True,
1605 1605 priority=-1,
1606 1606 )
1607 1607 coreconfigitem(
1608 1608 b'merge-tools',
1609 1609 br'.*\.binary$',
1610 1610 default=False,
1611 1611 generic=True,
1612 1612 priority=-1,
1613 1613 )
1614 1614 coreconfigitem(
1615 1615 b'merge-tools',
1616 1616 br'.*\.check$',
1617 1617 default=list,
1618 1618 generic=True,
1619 1619 priority=-1,
1620 1620 )
1621 1621 coreconfigitem(
1622 1622 b'merge-tools',
1623 1623 br'.*\.checkchanged$',
1624 1624 default=False,
1625 1625 generic=True,
1626 1626 priority=-1,
1627 1627 )
1628 1628 coreconfigitem(
1629 1629 b'merge-tools',
1630 1630 br'.*\.executable$',
1631 1631 default=dynamicdefault,
1632 1632 generic=True,
1633 1633 priority=-1,
1634 1634 )
1635 1635 coreconfigitem(
1636 1636 b'merge-tools',
1637 1637 br'.*\.fixeol$',
1638 1638 default=False,
1639 1639 generic=True,
1640 1640 priority=-1,
1641 1641 )
1642 1642 coreconfigitem(
1643 1643 b'merge-tools',
1644 1644 br'.*\.gui$',
1645 1645 default=False,
1646 1646 generic=True,
1647 1647 priority=-1,
1648 1648 )
1649 1649 coreconfigitem(
1650 1650 b'merge-tools',
1651 1651 br'.*\.mergemarkers$',
1652 1652 default=b'basic',
1653 1653 generic=True,
1654 1654 priority=-1,
1655 1655 )
1656 1656 coreconfigitem(
1657 1657 b'merge-tools',
1658 1658 br'.*\.mergemarkertemplate$',
1659 1659 default=dynamicdefault, # take from command-templates.mergemarker
1660 1660 generic=True,
1661 1661 priority=-1,
1662 1662 )
1663 1663 coreconfigitem(
1664 1664 b'merge-tools',
1665 1665 br'.*\.priority$',
1666 1666 default=0,
1667 1667 generic=True,
1668 1668 priority=-1,
1669 1669 )
1670 1670 coreconfigitem(
1671 1671 b'merge-tools',
1672 1672 br'.*\.premerge$',
1673 1673 default=dynamicdefault,
1674 1674 generic=True,
1675 1675 priority=-1,
1676 1676 )
1677 1677 coreconfigitem(
1678 1678 b'merge-tools',
1679 1679 br'.*\.symlink$',
1680 1680 default=False,
1681 1681 generic=True,
1682 1682 priority=-1,
1683 1683 )
1684 1684 coreconfigitem(
1685 1685 b'pager',
1686 1686 b'attend-.*',
1687 1687 default=dynamicdefault,
1688 1688 generic=True,
1689 1689 )
1690 1690 coreconfigitem(
1691 1691 b'pager',
1692 1692 b'ignore',
1693 1693 default=list,
1694 1694 )
1695 1695 coreconfigitem(
1696 1696 b'pager',
1697 1697 b'pager',
1698 1698 default=dynamicdefault,
1699 1699 )
1700 1700 coreconfigitem(
1701 1701 b'patch',
1702 1702 b'eol',
1703 1703 default=b'strict',
1704 1704 )
1705 1705 coreconfigitem(
1706 1706 b'patch',
1707 1707 b'fuzz',
1708 1708 default=2,
1709 1709 )
1710 1710 coreconfigitem(
1711 1711 b'paths',
1712 1712 b'default',
1713 1713 default=None,
1714 1714 )
1715 1715 coreconfigitem(
1716 1716 b'paths',
1717 1717 b'default-push',
1718 1718 default=None,
1719 1719 )
1720 1720 coreconfigitem(
1721 1721 b'paths',
1722 1722 b'.*',
1723 1723 default=None,
1724 1724 generic=True,
1725 1725 )
1726 1726 coreconfigitem(
1727 1727 b'phases',
1728 1728 b'checksubrepos',
1729 1729 default=b'follow',
1730 1730 )
1731 1731 coreconfigitem(
1732 1732 b'phases',
1733 1733 b'new-commit',
1734 1734 default=b'draft',
1735 1735 )
1736 1736 coreconfigitem(
1737 1737 b'phases',
1738 1738 b'publish',
1739 1739 default=True,
1740 1740 )
1741 1741 coreconfigitem(
1742 1742 b'profiling',
1743 1743 b'enabled',
1744 1744 default=False,
1745 1745 )
1746 1746 coreconfigitem(
1747 1747 b'profiling',
1748 1748 b'format',
1749 1749 default=b'text',
1750 1750 )
1751 1751 coreconfigitem(
1752 1752 b'profiling',
1753 1753 b'freq',
1754 1754 default=1000,
1755 1755 )
1756 1756 coreconfigitem(
1757 1757 b'profiling',
1758 1758 b'limit',
1759 1759 default=30,
1760 1760 )
1761 1761 coreconfigitem(
1762 1762 b'profiling',
1763 1763 b'nested',
1764 1764 default=0,
1765 1765 )
1766 1766 coreconfigitem(
1767 1767 b'profiling',
1768 1768 b'output',
1769 1769 default=None,
1770 1770 )
1771 1771 coreconfigitem(
1772 1772 b'profiling',
1773 1773 b'showmax',
1774 1774 default=0.999,
1775 1775 )
1776 1776 coreconfigitem(
1777 1777 b'profiling',
1778 1778 b'showmin',
1779 1779 default=dynamicdefault,
1780 1780 )
1781 1781 coreconfigitem(
1782 1782 b'profiling',
1783 1783 b'showtime',
1784 1784 default=True,
1785 1785 )
1786 1786 coreconfigitem(
1787 1787 b'profiling',
1788 1788 b'sort',
1789 1789 default=b'inlinetime',
1790 1790 )
1791 1791 coreconfigitem(
1792 1792 b'profiling',
1793 1793 b'statformat',
1794 1794 default=b'hotpath',
1795 1795 )
1796 1796 coreconfigitem(
1797 1797 b'profiling',
1798 1798 b'time-track',
1799 1799 default=dynamicdefault,
1800 1800 )
1801 1801 coreconfigitem(
1802 1802 b'profiling',
1803 1803 b'type',
1804 1804 default=b'stat',
1805 1805 )
1806 1806 coreconfigitem(
1807 1807 b'progress',
1808 1808 b'assume-tty',
1809 1809 default=False,
1810 1810 )
1811 1811 coreconfigitem(
1812 1812 b'progress',
1813 1813 b'changedelay',
1814 1814 default=1,
1815 1815 )
1816 1816 coreconfigitem(
1817 1817 b'progress',
1818 1818 b'clear-complete',
1819 1819 default=True,
1820 1820 )
1821 1821 coreconfigitem(
1822 1822 b'progress',
1823 1823 b'debug',
1824 1824 default=False,
1825 1825 )
1826 1826 coreconfigitem(
1827 1827 b'progress',
1828 1828 b'delay',
1829 1829 default=3,
1830 1830 )
1831 1831 coreconfigitem(
1832 1832 b'progress',
1833 1833 b'disable',
1834 1834 default=False,
1835 1835 )
1836 1836 coreconfigitem(
1837 1837 b'progress',
1838 1838 b'estimateinterval',
1839 1839 default=60.0,
1840 1840 )
1841 1841 coreconfigitem(
1842 1842 b'progress',
1843 1843 b'format',
1844 1844 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1845 1845 )
1846 1846 coreconfigitem(
1847 1847 b'progress',
1848 1848 b'refresh',
1849 1849 default=0.1,
1850 1850 )
1851 1851 coreconfigitem(
1852 1852 b'progress',
1853 1853 b'width',
1854 1854 default=dynamicdefault,
1855 1855 )
1856 1856 coreconfigitem(
1857 1857 b'pull',
1858 1858 b'confirm',
1859 1859 default=False,
1860 1860 )
1861 1861 coreconfigitem(
1862 1862 b'push',
1863 1863 b'pushvars.server',
1864 1864 default=False,
1865 1865 )
1866 1866 coreconfigitem(
1867 1867 b'rewrite',
1868 1868 b'backup-bundle',
1869 1869 default=True,
1870 1870 alias=[(b'ui', b'history-editing-backup')],
1871 1871 )
1872 1872 coreconfigitem(
1873 1873 b'rewrite',
1874 1874 b'update-timestamp',
1875 1875 default=False,
1876 1876 )
1877 1877 coreconfigitem(
1878 1878 b'rewrite',
1879 1879 b'empty-successor',
1880 1880 default=b'skip',
1881 1881 experimental=True,
1882 1882 )
1883 1883 coreconfigitem(
1884 1884 b'storage',
1885 1885 b'new-repo-backend',
1886 1886 default=b'revlogv1',
1887 1887 experimental=True,
1888 1888 )
1889 1889 coreconfigitem(
1890 1890 b'storage',
1891 1891 b'revlog.optimize-delta-parent-choice',
1892 1892 default=True,
1893 1893 alias=[(b'format', b'aggressivemergedeltas')],
1894 1894 )
1895 1895 # experimental as long as rust is experimental (or a C version is implemented)
1896 1896 coreconfigitem(
1897 1897 b'storage',
1898 1898 b'revlog.persistent-nodemap.mmap',
1899 1899 default=True,
1900 1900 )
1901 1901 # experimental as long as format.use-persistent-nodemap is.
1902 1902 coreconfigitem(
1903 1903 b'storage',
1904 1904 b'revlog.persistent-nodemap.slow-path',
1905 1905 default=b"abort",
1906 1906 )
1907 1907
1908 1908 coreconfigitem(
1909 1909 b'storage',
1910 1910 b'revlog.reuse-external-delta',
1911 1911 default=True,
1912 1912 )
1913 1913 coreconfigitem(
1914 1914 b'storage',
1915 1915 b'revlog.reuse-external-delta-parent',
1916 1916 default=None,
1917 1917 )
1918 1918 coreconfigitem(
1919 1919 b'storage',
1920 1920 b'revlog.zlib.level',
1921 1921 default=None,
1922 1922 )
1923 1923 coreconfigitem(
1924 1924 b'storage',
1925 1925 b'revlog.zstd.level',
1926 1926 default=None,
1927 1927 )
1928 1928 coreconfigitem(
1929 1929 b'server',
1930 1930 b'bookmarks-pushkey-compat',
1931 1931 default=True,
1932 1932 )
1933 1933 coreconfigitem(
1934 1934 b'server',
1935 1935 b'bundle1',
1936 1936 default=True,
1937 1937 )
1938 1938 coreconfigitem(
1939 1939 b'server',
1940 1940 b'bundle1gd',
1941 1941 default=None,
1942 1942 )
1943 1943 coreconfigitem(
1944 1944 b'server',
1945 1945 b'bundle1.pull',
1946 1946 default=None,
1947 1947 )
1948 1948 coreconfigitem(
1949 1949 b'server',
1950 1950 b'bundle1gd.pull',
1951 1951 default=None,
1952 1952 )
1953 1953 coreconfigitem(
1954 1954 b'server',
1955 1955 b'bundle1.push',
1956 1956 default=None,
1957 1957 )
1958 1958 coreconfigitem(
1959 1959 b'server',
1960 1960 b'bundle1gd.push',
1961 1961 default=None,
1962 1962 )
1963 1963 coreconfigitem(
1964 1964 b'server',
1965 1965 b'bundle2.stream',
1966 1966 default=True,
1967 1967 alias=[(b'experimental', b'bundle2.stream')],
1968 1968 )
1969 1969 coreconfigitem(
1970 1970 b'server',
1971 1971 b'compressionengines',
1972 1972 default=list,
1973 1973 )
1974 1974 coreconfigitem(
1975 1975 b'server',
1976 1976 b'concurrent-push-mode',
1977 1977 default=b'check-related',
1978 1978 )
1979 1979 coreconfigitem(
1980 1980 b'server',
1981 1981 b'disablefullbundle',
1982 1982 default=False,
1983 1983 )
1984 1984 coreconfigitem(
1985 1985 b'server',
1986 1986 b'maxhttpheaderlen',
1987 1987 default=1024,
1988 1988 )
1989 1989 coreconfigitem(
1990 1990 b'server',
1991 1991 b'pullbundle',
1992 1992 default=False,
1993 1993 )
1994 1994 coreconfigitem(
1995 1995 b'server',
1996 1996 b'preferuncompressed',
1997 1997 default=False,
1998 1998 )
1999 1999 coreconfigitem(
2000 2000 b'server',
2001 2001 b'streamunbundle',
2002 2002 default=False,
2003 2003 )
2004 2004 coreconfigitem(
2005 2005 b'server',
2006 2006 b'uncompressed',
2007 2007 default=True,
2008 2008 )
2009 2009 coreconfigitem(
2010 2010 b'server',
2011 2011 b'uncompressedallowsecret',
2012 2012 default=False,
2013 2013 )
2014 2014 coreconfigitem(
2015 2015 b'server',
2016 2016 b'view',
2017 2017 default=b'served',
2018 2018 )
2019 2019 coreconfigitem(
2020 2020 b'server',
2021 2021 b'validate',
2022 2022 default=False,
2023 2023 )
2024 2024 coreconfigitem(
2025 2025 b'server',
2026 2026 b'zliblevel',
2027 2027 default=-1,
2028 2028 )
2029 2029 coreconfigitem(
2030 2030 b'server',
2031 2031 b'zstdlevel',
2032 2032 default=3,
2033 2033 )
2034 2034 coreconfigitem(
2035 2035 b'share',
2036 2036 b'pool',
2037 2037 default=None,
2038 2038 )
2039 2039 coreconfigitem(
2040 2040 b'share',
2041 2041 b'poolnaming',
2042 2042 default=b'identity',
2043 2043 )
2044 2044 coreconfigitem(
2045 2045 b'share',
2046 2046 b'safe-mismatch.source-not-safe',
2047 2047 default=b'abort',
2048 2048 )
2049 2049 coreconfigitem(
2050 2050 b'share',
2051 2051 b'safe-mismatch.source-safe',
2052 2052 default=b'abort',
2053 2053 )
2054 2054 coreconfigitem(
2055 2055 b'share',
2056 2056 b'safe-mismatch.source-not-safe.warn',
2057 2057 default=True,
2058 2058 )
2059 2059 coreconfigitem(
2060 2060 b'share',
2061 2061 b'safe-mismatch.source-safe.warn',
2062 2062 default=True,
2063 2063 )
2064 2064 coreconfigitem(
2065 2065 b'shelve',
2066 2066 b'maxbackups',
2067 2067 default=10,
2068 2068 )
2069 2069 coreconfigitem(
2070 2070 b'smtp',
2071 2071 b'host',
2072 2072 default=None,
2073 2073 )
2074 2074 coreconfigitem(
2075 2075 b'smtp',
2076 2076 b'local_hostname',
2077 2077 default=None,
2078 2078 )
2079 2079 coreconfigitem(
2080 2080 b'smtp',
2081 2081 b'password',
2082 2082 default=None,
2083 2083 )
2084 2084 coreconfigitem(
2085 2085 b'smtp',
2086 2086 b'port',
2087 2087 default=dynamicdefault,
2088 2088 )
2089 2089 coreconfigitem(
2090 2090 b'smtp',
2091 2091 b'tls',
2092 2092 default=b'none',
2093 2093 )
2094 2094 coreconfigitem(
2095 2095 b'smtp',
2096 2096 b'username',
2097 2097 default=None,
2098 2098 )
2099 2099 coreconfigitem(
2100 2100 b'sparse',
2101 2101 b'missingwarning',
2102 2102 default=True,
2103 2103 experimental=True,
2104 2104 )
2105 2105 coreconfigitem(
2106 2106 b'subrepos',
2107 2107 b'allowed',
2108 2108 default=dynamicdefault, # to make backporting simpler
2109 2109 )
2110 2110 coreconfigitem(
2111 2111 b'subrepos',
2112 2112 b'hg:allowed',
2113 2113 default=dynamicdefault,
2114 2114 )
2115 2115 coreconfigitem(
2116 2116 b'subrepos',
2117 2117 b'git:allowed',
2118 2118 default=dynamicdefault,
2119 2119 )
2120 2120 coreconfigitem(
2121 2121 b'subrepos',
2122 2122 b'svn:allowed',
2123 2123 default=dynamicdefault,
2124 2124 )
2125 2125 coreconfigitem(
2126 2126 b'templates',
2127 2127 b'.*',
2128 2128 default=None,
2129 2129 generic=True,
2130 2130 )
2131 2131 coreconfigitem(
2132 2132 b'templateconfig',
2133 2133 b'.*',
2134 2134 default=dynamicdefault,
2135 2135 generic=True,
2136 2136 )
2137 2137 coreconfigitem(
2138 2138 b'trusted',
2139 2139 b'groups',
2140 2140 default=list,
2141 2141 )
2142 2142 coreconfigitem(
2143 2143 b'trusted',
2144 2144 b'users',
2145 2145 default=list,
2146 2146 )
2147 2147 coreconfigitem(
2148 2148 b'ui',
2149 2149 b'_usedassubrepo',
2150 2150 default=False,
2151 2151 )
2152 2152 coreconfigitem(
2153 2153 b'ui',
2154 2154 b'allowemptycommit',
2155 2155 default=False,
2156 2156 )
2157 2157 coreconfigitem(
2158 2158 b'ui',
2159 2159 b'archivemeta',
2160 2160 default=True,
2161 2161 )
2162 2162 coreconfigitem(
2163 2163 b'ui',
2164 2164 b'askusername',
2165 2165 default=False,
2166 2166 )
2167 2167 coreconfigitem(
2168 2168 b'ui',
2169 2169 b'available-memory',
2170 2170 default=None,
2171 2171 )
2172 2172
2173 2173 coreconfigitem(
2174 2174 b'ui',
2175 2175 b'clonebundlefallback',
2176 2176 default=False,
2177 2177 )
2178 2178 coreconfigitem(
2179 2179 b'ui',
2180 2180 b'clonebundleprefers',
2181 2181 default=list,
2182 2182 )
2183 2183 coreconfigitem(
2184 2184 b'ui',
2185 2185 b'clonebundles',
2186 2186 default=True,
2187 2187 )
2188 2188 coreconfigitem(
2189 2189 b'ui',
2190 2190 b'color',
2191 2191 default=b'auto',
2192 2192 )
2193 2193 coreconfigitem(
2194 2194 b'ui',
2195 2195 b'commitsubrepos',
2196 2196 default=False,
2197 2197 )
2198 2198 coreconfigitem(
2199 2199 b'ui',
2200 2200 b'debug',
2201 2201 default=False,
2202 2202 )
2203 2203 coreconfigitem(
2204 2204 b'ui',
2205 2205 b'debugger',
2206 2206 default=None,
2207 2207 )
2208 2208 coreconfigitem(
2209 2209 b'ui',
2210 2210 b'editor',
2211 2211 default=dynamicdefault,
2212 2212 )
2213 2213 coreconfigitem(
2214 2214 b'ui',
2215 2215 b'detailed-exit-code',
2216 2216 default=False,
2217 2217 experimental=True,
2218 2218 )
2219 2219 coreconfigitem(
2220 2220 b'ui',
2221 2221 b'fallbackencoding',
2222 2222 default=None,
2223 2223 )
2224 2224 coreconfigitem(
2225 2225 b'ui',
2226 2226 b'forcecwd',
2227 2227 default=None,
2228 2228 )
2229 2229 coreconfigitem(
2230 2230 b'ui',
2231 2231 b'forcemerge',
2232 2232 default=None,
2233 2233 )
2234 2234 coreconfigitem(
2235 2235 b'ui',
2236 2236 b'formatdebug',
2237 2237 default=False,
2238 2238 )
2239 2239 coreconfigitem(
2240 2240 b'ui',
2241 2241 b'formatjson',
2242 2242 default=False,
2243 2243 )
2244 2244 coreconfigitem(
2245 2245 b'ui',
2246 2246 b'formatted',
2247 2247 default=None,
2248 2248 )
2249 2249 coreconfigitem(
2250 2250 b'ui',
2251 2251 b'interactive',
2252 2252 default=None,
2253 2253 )
2254 2254 coreconfigitem(
2255 2255 b'ui',
2256 2256 b'interface',
2257 2257 default=None,
2258 2258 )
2259 2259 coreconfigitem(
2260 2260 b'ui',
2261 2261 b'interface.chunkselector',
2262 2262 default=None,
2263 2263 )
2264 2264 coreconfigitem(
2265 2265 b'ui',
2266 2266 b'large-file-limit',
2267 2267 default=10000000,
2268 2268 )
2269 2269 coreconfigitem(
2270 2270 b'ui',
2271 2271 b'logblockedtimes',
2272 2272 default=False,
2273 2273 )
2274 2274 coreconfigitem(
2275 2275 b'ui',
2276 2276 b'merge',
2277 2277 default=None,
2278 2278 )
2279 2279 coreconfigitem(
2280 2280 b'ui',
2281 2281 b'mergemarkers',
2282 2282 default=b'basic',
2283 2283 )
2284 2284 coreconfigitem(
2285 2285 b'ui',
2286 2286 b'message-output',
2287 2287 default=b'stdio',
2288 2288 )
2289 2289 coreconfigitem(
2290 2290 b'ui',
2291 2291 b'nontty',
2292 2292 default=False,
2293 2293 )
2294 2294 coreconfigitem(
2295 2295 b'ui',
2296 2296 b'origbackuppath',
2297 2297 default=None,
2298 2298 )
2299 2299 coreconfigitem(
2300 2300 b'ui',
2301 2301 b'paginate',
2302 2302 default=True,
2303 2303 )
2304 2304 coreconfigitem(
2305 2305 b'ui',
2306 2306 b'patch',
2307 2307 default=None,
2308 2308 )
2309 2309 coreconfigitem(
2310 2310 b'ui',
2311 2311 b'portablefilenames',
2312 2312 default=b'warn',
2313 2313 )
2314 2314 coreconfigitem(
2315 2315 b'ui',
2316 2316 b'promptecho',
2317 2317 default=False,
2318 2318 )
2319 2319 coreconfigitem(
2320 2320 b'ui',
2321 2321 b'quiet',
2322 2322 default=False,
2323 2323 )
2324 2324 coreconfigitem(
2325 2325 b'ui',
2326 2326 b'quietbookmarkmove',
2327 2327 default=False,
2328 2328 )
2329 2329 coreconfigitem(
2330 2330 b'ui',
2331 2331 b'relative-paths',
2332 2332 default=b'legacy',
2333 2333 )
2334 2334 coreconfigitem(
2335 2335 b'ui',
2336 2336 b'remotecmd',
2337 2337 default=b'hg',
2338 2338 )
2339 2339 coreconfigitem(
2340 2340 b'ui',
2341 2341 b'report_untrusted',
2342 2342 default=True,
2343 2343 )
2344 2344 coreconfigitem(
2345 2345 b'ui',
2346 2346 b'rollback',
2347 2347 default=True,
2348 2348 )
2349 2349 coreconfigitem(
2350 2350 b'ui',
2351 2351 b'signal-safe-lock',
2352 2352 default=True,
2353 2353 )
2354 2354 coreconfigitem(
2355 2355 b'ui',
2356 2356 b'slash',
2357 2357 default=False,
2358 2358 )
2359 2359 coreconfigitem(
2360 2360 b'ui',
2361 2361 b'ssh',
2362 2362 default=b'ssh',
2363 2363 )
2364 2364 coreconfigitem(
2365 2365 b'ui',
2366 2366 b'ssherrorhint',
2367 2367 default=None,
2368 2368 )
2369 2369 coreconfigitem(
2370 2370 b'ui',
2371 2371 b'statuscopies',
2372 2372 default=False,
2373 2373 )
2374 2374 coreconfigitem(
2375 2375 b'ui',
2376 2376 b'strict',
2377 2377 default=False,
2378 2378 )
2379 2379 coreconfigitem(
2380 2380 b'ui',
2381 2381 b'style',
2382 2382 default=b'',
2383 2383 )
2384 2384 coreconfigitem(
2385 2385 b'ui',
2386 2386 b'supportcontact',
2387 2387 default=None,
2388 2388 )
2389 2389 coreconfigitem(
2390 2390 b'ui',
2391 2391 b'textwidth',
2392 2392 default=78,
2393 2393 )
2394 2394 coreconfigitem(
2395 2395 b'ui',
2396 2396 b'timeout',
2397 2397 default=b'600',
2398 2398 )
2399 2399 coreconfigitem(
2400 2400 b'ui',
2401 2401 b'timeout.warn',
2402 2402 default=0,
2403 2403 )
2404 2404 coreconfigitem(
2405 2405 b'ui',
2406 2406 b'timestamp-output',
2407 2407 default=False,
2408 2408 )
2409 2409 coreconfigitem(
2410 2410 b'ui',
2411 2411 b'traceback',
2412 2412 default=False,
2413 2413 )
2414 2414 coreconfigitem(
2415 2415 b'ui',
2416 2416 b'tweakdefaults',
2417 2417 default=False,
2418 2418 )
2419 2419 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2420 2420 coreconfigitem(
2421 2421 b'ui',
2422 2422 b'verbose',
2423 2423 default=False,
2424 2424 )
2425 2425 coreconfigitem(
2426 2426 b'verify',
2427 2427 b'skipflags',
2428 2428 default=None,
2429 2429 )
2430 2430 coreconfigitem(
2431 2431 b'web',
2432 2432 b'allowbz2',
2433 2433 default=False,
2434 2434 )
2435 2435 coreconfigitem(
2436 2436 b'web',
2437 2437 b'allowgz',
2438 2438 default=False,
2439 2439 )
2440 2440 coreconfigitem(
2441 2441 b'web',
2442 2442 b'allow-pull',
2443 2443 alias=[(b'web', b'allowpull')],
2444 2444 default=True,
2445 2445 )
2446 2446 coreconfigitem(
2447 2447 b'web',
2448 2448 b'allow-push',
2449 2449 alias=[(b'web', b'allow_push')],
2450 2450 default=list,
2451 2451 )
2452 2452 coreconfigitem(
2453 2453 b'web',
2454 2454 b'allowzip',
2455 2455 default=False,
2456 2456 )
2457 2457 coreconfigitem(
2458 2458 b'web',
2459 2459 b'archivesubrepos',
2460 2460 default=False,
2461 2461 )
2462 2462 coreconfigitem(
2463 2463 b'web',
2464 2464 b'cache',
2465 2465 default=True,
2466 2466 )
2467 2467 coreconfigitem(
2468 2468 b'web',
2469 2469 b'comparisoncontext',
2470 2470 default=5,
2471 2471 )
2472 2472 coreconfigitem(
2473 2473 b'web',
2474 2474 b'contact',
2475 2475 default=None,
2476 2476 )
2477 2477 coreconfigitem(
2478 2478 b'web',
2479 2479 b'deny_push',
2480 2480 default=list,
2481 2481 )
2482 2482 coreconfigitem(
2483 2483 b'web',
2484 2484 b'guessmime',
2485 2485 default=False,
2486 2486 )
2487 2487 coreconfigitem(
2488 2488 b'web',
2489 2489 b'hidden',
2490 2490 default=False,
2491 2491 )
2492 2492 coreconfigitem(
2493 2493 b'web',
2494 2494 b'labels',
2495 2495 default=list,
2496 2496 )
2497 2497 coreconfigitem(
2498 2498 b'web',
2499 2499 b'logoimg',
2500 2500 default=b'hglogo.png',
2501 2501 )
2502 2502 coreconfigitem(
2503 2503 b'web',
2504 2504 b'logourl',
2505 2505 default=b'https://mercurial-scm.org/',
2506 2506 )
2507 2507 coreconfigitem(
2508 2508 b'web',
2509 2509 b'accesslog',
2510 2510 default=b'-',
2511 2511 )
2512 2512 coreconfigitem(
2513 2513 b'web',
2514 2514 b'address',
2515 2515 default=b'',
2516 2516 )
2517 2517 coreconfigitem(
2518 2518 b'web',
2519 2519 b'allow-archive',
2520 2520 alias=[(b'web', b'allow_archive')],
2521 2521 default=list,
2522 2522 )
2523 2523 coreconfigitem(
2524 2524 b'web',
2525 2525 b'allow_read',
2526 2526 default=list,
2527 2527 )
2528 2528 coreconfigitem(
2529 2529 b'web',
2530 2530 b'baseurl',
2531 2531 default=None,
2532 2532 )
2533 2533 coreconfigitem(
2534 2534 b'web',
2535 2535 b'cacerts',
2536 2536 default=None,
2537 2537 )
2538 2538 coreconfigitem(
2539 2539 b'web',
2540 2540 b'certificate',
2541 2541 default=None,
2542 2542 )
2543 2543 coreconfigitem(
2544 2544 b'web',
2545 2545 b'collapse',
2546 2546 default=False,
2547 2547 )
2548 2548 coreconfigitem(
2549 2549 b'web',
2550 2550 b'csp',
2551 2551 default=None,
2552 2552 )
2553 2553 coreconfigitem(
2554 2554 b'web',
2555 2555 b'deny_read',
2556 2556 default=list,
2557 2557 )
2558 2558 coreconfigitem(
2559 2559 b'web',
2560 2560 b'descend',
2561 2561 default=True,
2562 2562 )
2563 2563 coreconfigitem(
2564 2564 b'web',
2565 2565 b'description',
2566 2566 default=b"",
2567 2567 )
2568 2568 coreconfigitem(
2569 2569 b'web',
2570 2570 b'encoding',
2571 2571 default=lambda: encoding.encoding,
2572 2572 )
2573 2573 coreconfigitem(
2574 2574 b'web',
2575 2575 b'errorlog',
2576 2576 default=b'-',
2577 2577 )
2578 2578 coreconfigitem(
2579 2579 b'web',
2580 2580 b'ipv6',
2581 2581 default=False,
2582 2582 )
2583 2583 coreconfigitem(
2584 2584 b'web',
2585 2585 b'maxchanges',
2586 2586 default=10,
2587 2587 )
2588 2588 coreconfigitem(
2589 2589 b'web',
2590 2590 b'maxfiles',
2591 2591 default=10,
2592 2592 )
2593 2593 coreconfigitem(
2594 2594 b'web',
2595 2595 b'maxshortchanges',
2596 2596 default=60,
2597 2597 )
2598 2598 coreconfigitem(
2599 2599 b'web',
2600 2600 b'motd',
2601 2601 default=b'',
2602 2602 )
2603 2603 coreconfigitem(
2604 2604 b'web',
2605 2605 b'name',
2606 2606 default=dynamicdefault,
2607 2607 )
2608 2608 coreconfigitem(
2609 2609 b'web',
2610 2610 b'port',
2611 2611 default=8000,
2612 2612 )
2613 2613 coreconfigitem(
2614 2614 b'web',
2615 2615 b'prefix',
2616 2616 default=b'',
2617 2617 )
2618 2618 coreconfigitem(
2619 2619 b'web',
2620 2620 b'push_ssl',
2621 2621 default=True,
2622 2622 )
2623 2623 coreconfigitem(
2624 2624 b'web',
2625 2625 b'refreshinterval',
2626 2626 default=20,
2627 2627 )
2628 2628 coreconfigitem(
2629 2629 b'web',
2630 2630 b'server-header',
2631 2631 default=None,
2632 2632 )
2633 2633 coreconfigitem(
2634 2634 b'web',
2635 2635 b'static',
2636 2636 default=None,
2637 2637 )
2638 2638 coreconfigitem(
2639 2639 b'web',
2640 2640 b'staticurl',
2641 2641 default=None,
2642 2642 )
2643 2643 coreconfigitem(
2644 2644 b'web',
2645 2645 b'stripes',
2646 2646 default=1,
2647 2647 )
2648 2648 coreconfigitem(
2649 2649 b'web',
2650 2650 b'style',
2651 2651 default=b'paper',
2652 2652 )
2653 2653 coreconfigitem(
2654 2654 b'web',
2655 2655 b'templates',
2656 2656 default=None,
2657 2657 )
2658 2658 coreconfigitem(
2659 2659 b'web',
2660 2660 b'view',
2661 2661 default=b'served',
2662 2662 experimental=True,
2663 2663 )
2664 2664 coreconfigitem(
2665 2665 b'worker',
2666 2666 b'backgroundclose',
2667 2667 default=dynamicdefault,
2668 2668 )
2669 2669 # Windows defaults to a limit of 512 open files. A buffer of 128
2670 2670 # should give us enough headway.
2671 2671 coreconfigitem(
2672 2672 b'worker',
2673 2673 b'backgroundclosemaxqueue',
2674 2674 default=384,
2675 2675 )
2676 2676 coreconfigitem(
2677 2677 b'worker',
2678 2678 b'backgroundcloseminfilecount',
2679 2679 default=2048,
2680 2680 )
2681 2681 coreconfigitem(
2682 2682 b'worker',
2683 2683 b'backgroundclosethreadcount',
2684 2684 default=4,
2685 2685 )
2686 2686 coreconfigitem(
2687 2687 b'worker',
2688 2688 b'enabled',
2689 2689 default=True,
2690 2690 )
2691 2691 coreconfigitem(
2692 2692 b'worker',
2693 2693 b'numcpus',
2694 2694 default=None,
2695 2695 )
2696 2696
2697 2697 # Rebase related configuration moved to core because other extension are doing
2698 2698 # strange things. For example, shelve import the extensions to reuse some bit
2699 2699 # without formally loading it.
2700 2700 coreconfigitem(
2701 2701 b'commands',
2702 2702 b'rebase.requiredest',
2703 2703 default=False,
2704 2704 )
2705 2705 coreconfigitem(
2706 2706 b'experimental',
2707 2707 b'rebaseskipobsolete',
2708 2708 default=True,
2709 2709 )
2710 2710 coreconfigitem(
2711 2711 b'rebase',
2712 2712 b'singletransaction',
2713 2713 default=False,
2714 2714 )
2715 2715 coreconfigitem(
2716 2716 b'rebase',
2717 2717 b'experimental.inmemory',
2718 2718 default=False,
2719 2719 )
@@ -1,3475 +1,3535 b''
1 1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 3 #
3 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 5 #
5 6 # This software may be used and distributed according to the terms of the
6 7 # GNU General Public License version 2 or any later version.
7 8
8 9 """Storage back-end for Mercurial.
9 10
10 11 This provides efficient delta storage with O(1) retrieve and append
11 12 and O(changes) merge between branches.
12 13 """
13 14
14 15 from __future__ import absolute_import
15 16
16 17 import binascii
17 18 import collections
18 19 import contextlib
19 20 import errno
20 21 import io
21 22 import os
22 23 import struct
23 24 import zlib
24 25
25 26 # import stuff from node for others to import from revlog
26 27 from .node import (
27 28 bin,
28 29 hex,
29 30 nullrev,
30 31 sha1nodeconstants,
31 32 short,
32 33 wdirrev,
33 34 )
34 35 from .i18n import _
35 36 from .pycompat import getattr
36 37 from .revlogutils.constants import (
37 38 ALL_KINDS,
38 39 CHANGELOGV2,
39 40 COMP_MODE_DEFAULT,
40 41 COMP_MODE_INLINE,
41 42 COMP_MODE_PLAIN,
42 43 FEATURES_BY_VERSION,
43 44 FLAG_GENERALDELTA,
44 45 FLAG_INLINE_DATA,
45 46 INDEX_HEADER,
46 47 KIND_CHANGELOG,
47 48 REVLOGV0,
48 49 REVLOGV1,
49 50 REVLOGV1_FLAGS,
50 51 REVLOGV2,
51 52 REVLOGV2_FLAGS,
52 53 REVLOG_DEFAULT_FLAGS,
53 54 REVLOG_DEFAULT_FORMAT,
54 55 REVLOG_DEFAULT_VERSION,
55 56 SUPPORTED_FLAGS,
56 57 )
57 58 from .revlogutils.flagutil import (
58 59 REVIDX_DEFAULT_FLAGS,
59 60 REVIDX_ELLIPSIS,
60 61 REVIDX_EXTSTORED,
61 62 REVIDX_FLAGS_ORDER,
62 63 REVIDX_HASCOPIESINFO,
63 64 REVIDX_ISCENSORED,
64 65 REVIDX_RAWTEXT_CHANGING_FLAGS,
65 66 )
66 67 from .thirdparty import attr
67 68 from . import (
68 69 ancestor,
69 70 dagop,
70 71 error,
71 72 mdiff,
72 73 policy,
73 74 pycompat,
74 75 templatefilters,
75 76 util,
76 77 )
77 78 from .interfaces import (
78 79 repository,
79 80 util as interfaceutil,
80 81 )
81 82 from .revlogutils import (
82 83 deltas as deltautil,
83 84 docket as docketutil,
84 85 flagutil,
85 86 nodemap as nodemaputil,
86 87 revlogv0,
87 88 sidedata as sidedatautil,
88 89 )
89 90 from .utils import (
90 91 storageutil,
91 92 stringutil,
92 93 )
93 94
94 95 # blanked usage of all the name to prevent pyflakes constraints
95 96 # We need these name available in the module for extensions.
96 97
97 98 REVLOGV0
98 99 REVLOGV1
99 100 REVLOGV2
100 101 FLAG_INLINE_DATA
101 102 FLAG_GENERALDELTA
102 103 REVLOG_DEFAULT_FLAGS
103 104 REVLOG_DEFAULT_FORMAT
104 105 REVLOG_DEFAULT_VERSION
105 106 REVLOGV1_FLAGS
106 107 REVLOGV2_FLAGS
107 108 REVIDX_ISCENSORED
108 109 REVIDX_ELLIPSIS
109 110 REVIDX_HASCOPIESINFO
110 111 REVIDX_EXTSTORED
111 112 REVIDX_DEFAULT_FLAGS
112 113 REVIDX_FLAGS_ORDER
113 114 REVIDX_RAWTEXT_CHANGING_FLAGS
114 115
115 116 parsers = policy.importmod('parsers')
116 117 rustancestor = policy.importrust('ancestor')
117 118 rustdagop = policy.importrust('dagop')
118 119 rustrevlog = policy.importrust('revlog')
119 120
120 121 # Aliased for performance.
121 122 _zlibdecompress = zlib.decompress
122 123
123 124 # max size of revlog with inline data
124 125 _maxinline = 131072
125 126 _chunksize = 1048576
126 127
127 128 # Flag processors for REVIDX_ELLIPSIS.
128 129 def ellipsisreadprocessor(rl, text):
129 130 return text, False
130 131
131 132
132 133 def ellipsiswriteprocessor(rl, text):
133 134 return text, False
134 135
135 136
136 137 def ellipsisrawprocessor(rl, text):
137 138 return False
138 139
139 140
140 141 ellipsisprocessor = (
141 142 ellipsisreadprocessor,
142 143 ellipsiswriteprocessor,
143 144 ellipsisrawprocessor,
144 145 )
145 146
146 147
147 148 def offset_type(offset, type):
148 149 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
149 150 raise ValueError(b'unknown revlog index flags')
150 151 return int(int(offset) << 16 | type)
151 152
152 153
153 154 def _verify_revision(rl, skipflags, state, node):
154 155 """Verify the integrity of the given revlog ``node`` while providing a hook
155 156 point for extensions to influence the operation."""
156 157 if skipflags:
157 158 state[b'skipread'].add(node)
158 159 else:
159 160 # Side-effect: read content and verify hash.
160 161 rl.revision(node)
161 162
162 163
163 164 # True if a fast implementation for persistent-nodemap is available
164 165 #
165 166 # We also consider we have a "fast" implementation in "pure" python because
166 167 # people using pure don't really have performance consideration (and a
167 168 # wheelbarrow of other slowness source)
168 169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
169 170 parsers, 'BaseIndexObject'
170 171 )
171 172
172 173
173 174 @attr.s(slots=True, frozen=True)
174 175 class _revisioninfo(object):
175 176 """Information about a revision that allows building its fulltext
176 177 node: expected hash of the revision
177 178 p1, p2: parent revs of the revision
178 179 btext: built text cache consisting of a one-element list
179 180 cachedelta: (baserev, uncompressed_delta) or None
180 181 flags: flags associated to the revision storage
181 182
182 183 One of btext[0] or cachedelta must be set.
183 184 """
184 185
185 186 node = attr.ib()
186 187 p1 = attr.ib()
187 188 p2 = attr.ib()
188 189 btext = attr.ib()
189 190 textlen = attr.ib()
190 191 cachedelta = attr.ib()
191 192 flags = attr.ib()
192 193
193 194
194 195 @interfaceutil.implementer(repository.irevisiondelta)
195 196 @attr.s(slots=True)
196 197 class revlogrevisiondelta(object):
197 198 node = attr.ib()
198 199 p1node = attr.ib()
199 200 p2node = attr.ib()
200 201 basenode = attr.ib()
201 202 flags = attr.ib()
202 203 baserevisionsize = attr.ib()
203 204 revision = attr.ib()
204 205 delta = attr.ib()
205 206 sidedata = attr.ib()
206 207 protocol_flags = attr.ib()
207 208 linknode = attr.ib(default=None)
208 209
209 210
210 211 @interfaceutil.implementer(repository.iverifyproblem)
211 212 @attr.s(frozen=True)
212 213 class revlogproblem(object):
213 214 warning = attr.ib(default=None)
214 215 error = attr.ib(default=None)
215 216 node = attr.ib(default=None)
216 217
217 218
218 219 def parse_index_v1(data, inline):
219 220 # call the C implementation to parse the index data
220 221 index, cache = parsers.parse_index2(data, inline)
221 222 return index, cache
222 223
223 224
224 225 def parse_index_v2(data, inline):
225 226 # call the C implementation to parse the index data
226 227 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
227 228 return index, cache
228 229
229 230
230 231 def parse_index_cl_v2(data, inline):
231 232 # call the C implementation to parse the index data
232 233 assert not inline
233 234 from .pure.parsers import parse_index_cl_v2
234 235
235 236 index, cache = parse_index_cl_v2(data)
236 237 return index, cache
237 238
238 239
239 240 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
240 241
241 242 def parse_index_v1_nodemap(data, inline):
242 243 index, cache = parsers.parse_index_devel_nodemap(data, inline)
243 244 return index, cache
244 245
245 246
246 247 else:
247 248 parse_index_v1_nodemap = None
248 249
249 250
250 251 def parse_index_v1_mixed(data, inline):
251 252 index, cache = parse_index_v1(data, inline)
252 253 return rustrevlog.MixedIndex(index), cache
253 254
254 255
255 256 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
256 257 # signed integer)
257 258 _maxentrysize = 0x7FFFFFFF
258 259
259 260 PARTIAL_READ_MSG = _(
260 261 b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
261 262 )
262 263
264 FILE_TOO_SHORT_MSG = _(
265 b'cannot read from revlog %s;'
266 b' expected %d bytes from offset %d, data size is %d'
267 )
268
263 269
264 270 class revlog(object):
265 271 """
266 272 the underlying revision storage object
267 273
268 274 A revlog consists of two parts, an index and the revision data.
269 275
270 276 The index is a file with a fixed record size containing
271 277 information on each revision, including its nodeid (hash), the
272 278 nodeids of its parents, the position and offset of its data within
273 279 the data file, and the revision it's based on. Finally, each entry
274 280 contains a linkrev entry that can serve as a pointer to external
275 281 data.
276 282
277 283 The revision data itself is a linear collection of data chunks.
278 284 Each chunk represents a revision and is usually represented as a
279 285 delta against the previous chunk. To bound lookup time, runs of
280 286 deltas are limited to about 2 times the length of the original
281 287 version data. This makes retrieval of a version proportional to
282 288 its size, or O(1) relative to the number of revisions.
283 289
284 290 Both pieces of the revlog are written to in an append-only
285 291 fashion, which means we never need to rewrite a file to insert or
286 292 remove data, and can use some simple techniques to avoid the need
287 293 for locking while reading.
288 294
289 295 If checkambig, indexfile is opened with checkambig=True at
290 296 writing, to avoid file stat ambiguity.
291 297
292 298 If mmaplargeindex is True, and an mmapindexthreshold is set, the
293 299 index will be mmapped rather than read if it is larger than the
294 300 configured threshold.
295 301
296 302 If censorable is True, the revlog can have censored revisions.
297 303
298 304 If `upperboundcomp` is not None, this is the expected maximal gain from
299 305 compression for the data content.
300 306
301 307 `concurrencychecker` is an optional function that receives 3 arguments: a
302 308 file handle, a filename, and an expected position. It should check whether
303 309 the current position in the file handle is valid, and log/warn/fail (by
304 310 raising).
305 311
306 312
307 313 Internal details
308 314 ----------------
309 315
310 316 A large part of the revlog logic deals with revisions' "index entries", tuple
311 317 objects that contains the same "items" whatever the revlog version.
312 318 Different versions will have different ways of storing these items (sometimes
313 319 not having them at all), but the tuple will always be the same. New fields
314 320 are usually added at the end to avoid breaking existing code that relies
315 321 on the existing order. The field are defined as follows:
316 322
317 323 [0] offset:
318 324 The byte index of the start of revision data chunk.
319 325 That value is shifted up by 16 bits. use "offset = field >> 16" to
320 326 retrieve it.
321 327
322 328 flags:
323 329 A flag field that carries special information or changes the behavior
324 330 of the revision. (see `REVIDX_*` constants for details)
325 331 The flag field only occupies the first 16 bits of this field,
326 332 use "flags = field & 0xFFFF" to retrieve the value.
327 333
328 334 [1] compressed length:
329 335 The size, in bytes, of the chunk on disk
330 336
331 337 [2] uncompressed length:
332 338 The size, in bytes, of the full revision once reconstructed.
333 339
334 340 [3] base rev:
335 341 Either the base of the revision delta chain (without general
336 342 delta), or the base of the delta (stored in the data chunk)
337 343 with general delta.
338 344
339 345 [4] link rev:
340 346 Changelog revision number of the changeset introducing this
341 347 revision.
342 348
343 349 [5] parent 1 rev:
344 350 Revision number of the first parent
345 351
346 352 [6] parent 2 rev:
347 353 Revision number of the second parent
348 354
349 355 [7] node id:
350 356 The node id of the current revision
351 357
352 358 [8] sidedata offset:
353 359 The byte index of the start of the revision's side-data chunk.
354 360
355 361 [9] sidedata chunk length:
356 362 The size, in bytes, of the revision's side-data chunk.
357 363
358 364 [10] data compression mode:
359 365 two bits that detail the way the data chunk is compressed on disk.
360 366 (see "COMP_MODE_*" constants for details). For revlog version 0 and
361 367 1 this will always be COMP_MODE_INLINE.
362 368
363 369 [11] side-data compression mode:
364 370 two bits that detail the way the sidedata chunk is compressed on disk.
365 371 (see "COMP_MODE_*" constants for details)
366 372 """
367 373
368 374 _flagserrorclass = error.RevlogError
369 375
370 376 def __init__(
371 377 self,
372 378 opener,
373 379 target,
374 380 radix,
375 381 postfix=None, # only exist for `tmpcensored` now
376 382 checkambig=False,
377 383 mmaplargeindex=False,
378 384 censorable=False,
379 385 upperboundcomp=None,
380 386 persistentnodemap=False,
381 387 concurrencychecker=None,
382 388 trypending=False,
383 389 ):
384 390 """
385 391 create a revlog object
386 392
387 393 opener is a function that abstracts the file opening operation
388 394 and can be used to implement COW semantics or the like.
389 395
390 396 `target`: a (KIND, ID) tuple that identify the content stored in
391 397 this revlog. It help the rest of the code to understand what the revlog
392 398 is about without having to resort to heuristic and index filename
393 399 analysis. Note: that this must be reliably be set by normal code, but
394 400 that test, debug, or performance measurement code might not set this to
395 401 accurate value.
396 402 """
397 403 self.upperboundcomp = upperboundcomp
398 404
399 405 self.radix = radix
400 406
401 407 self._docket_file = None
402 408 self._indexfile = None
403 409 self._datafile = None
410 self._sidedatafile = None
404 411 self._nodemap_file = None
405 412 self.postfix = postfix
406 413 self._trypending = trypending
407 414 self.opener = opener
408 415 if persistentnodemap:
409 416 self._nodemap_file = nodemaputil.get_nodemap_file(self)
410 417
411 418 assert target[0] in ALL_KINDS
412 419 assert len(target) == 2
413 420 self.target = target
414 421 # When True, indexfile is opened with checkambig=True at writing, to
415 422 # avoid file stat ambiguity.
416 423 self._checkambig = checkambig
417 424 self._mmaplargeindex = mmaplargeindex
418 425 self._censorable = censorable
419 426 # 3-tuple of (node, rev, text) for a raw revision.
420 427 self._revisioncache = None
421 428 # Maps rev to chain base rev.
422 429 self._chainbasecache = util.lrucachedict(100)
423 430 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
424 431 self._chunkcache = (0, b'')
425 432 # How much data to read and cache into the raw revlog data cache.
426 433 self._chunkcachesize = 65536
427 434 self._maxchainlen = None
428 435 self._deltabothparents = True
429 436 self.index = None
430 437 self._docket = None
431 438 self._nodemap_docket = None
432 439 # Mapping of partial identifiers to full nodes.
433 440 self._pcache = {}
434 441 # Mapping of revision integer to full node.
435 442 self._compengine = b'zlib'
436 443 self._compengineopts = {}
437 444 self._maxdeltachainspan = -1
438 445 self._withsparseread = False
439 446 self._sparserevlog = False
440 447 self.hassidedata = False
441 448 self._srdensitythreshold = 0.50
442 449 self._srmingapsize = 262144
443 450
444 451 # Make copy of flag processors so each revlog instance can support
445 452 # custom flags.
446 453 self._flagprocessors = dict(flagutil.flagprocessors)
447 454
448 # 2-tuple of file handles being used for active writing.
455 # 3-tuple of file handles being used for active writing.
449 456 self._writinghandles = None
450 457 # prevent nesting of addgroup
451 458 self._adding_group = None
452 459
453 460 self._loadindex()
454 461
455 462 self._concurrencychecker = concurrencychecker
456 463
457 464 def _init_opts(self):
458 465 """process options (from above/config) to setup associated default revlog mode
459 466
460 467 These values might be affected when actually reading on disk information.
461 468
462 469 The relevant values are returned for use in _loadindex().
463 470
464 471 * newversionflags:
465 472 version header to use if we need to create a new revlog
466 473
467 474 * mmapindexthreshold:
468 475 minimal index size for start to use mmap
469 476
470 477 * force_nodemap:
471 478 force the usage of a "development" version of the nodemap code
472 479 """
473 480 mmapindexthreshold = None
474 481 opts = self.opener.options
475 482
476 483 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
477 484 new_header = CHANGELOGV2
478 485 elif b'revlogv2' in opts:
479 486 new_header = REVLOGV2
480 487 elif b'revlogv1' in opts:
481 488 new_header = REVLOGV1 | FLAG_INLINE_DATA
482 489 if b'generaldelta' in opts:
483 490 new_header |= FLAG_GENERALDELTA
484 491 elif b'revlogv0' in self.opener.options:
485 492 new_header = REVLOGV0
486 493 else:
487 494 new_header = REVLOG_DEFAULT_VERSION
488 495
489 496 if b'chunkcachesize' in opts:
490 497 self._chunkcachesize = opts[b'chunkcachesize']
491 498 if b'maxchainlen' in opts:
492 499 self._maxchainlen = opts[b'maxchainlen']
493 500 if b'deltabothparents' in opts:
494 501 self._deltabothparents = opts[b'deltabothparents']
495 502 self._lazydelta = bool(opts.get(b'lazydelta', True))
496 503 self._lazydeltabase = False
497 504 if self._lazydelta:
498 505 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
499 506 if b'compengine' in opts:
500 507 self._compengine = opts[b'compengine']
501 508 if b'zlib.level' in opts:
502 509 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
503 510 if b'zstd.level' in opts:
504 511 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
505 512 if b'maxdeltachainspan' in opts:
506 513 self._maxdeltachainspan = opts[b'maxdeltachainspan']
507 514 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
508 515 mmapindexthreshold = opts[b'mmapindexthreshold']
509 516 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
510 517 withsparseread = bool(opts.get(b'with-sparse-read', False))
511 518 # sparse-revlog forces sparse-read
512 519 self._withsparseread = self._sparserevlog or withsparseread
513 520 if b'sparse-read-density-threshold' in opts:
514 521 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
515 522 if b'sparse-read-min-gap-size' in opts:
516 523 self._srmingapsize = opts[b'sparse-read-min-gap-size']
517 524 if opts.get(b'enableellipsis'):
518 525 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
519 526
520 527 # revlog v0 doesn't have flag processors
521 528 for flag, processor in pycompat.iteritems(
522 529 opts.get(b'flagprocessors', {})
523 530 ):
524 531 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
525 532
526 533 if self._chunkcachesize <= 0:
527 534 raise error.RevlogError(
528 535 _(b'revlog chunk cache size %r is not greater than 0')
529 536 % self._chunkcachesize
530 537 )
531 538 elif self._chunkcachesize & (self._chunkcachesize - 1):
532 539 raise error.RevlogError(
533 540 _(b'revlog chunk cache size %r is not a power of 2')
534 541 % self._chunkcachesize
535 542 )
536 543 force_nodemap = opts.get(b'devel-force-nodemap', False)
537 544 return new_header, mmapindexthreshold, force_nodemap
538 545
539 546 def _get_data(self, filepath, mmap_threshold, size=None):
540 547 """return a file content with or without mmap
541 548
542 549 If the file is missing return the empty string"""
543 550 try:
544 551 with self.opener(filepath) as fp:
545 552 if mmap_threshold is not None:
546 553 file_size = self.opener.fstat(fp).st_size
547 554 if file_size >= mmap_threshold:
548 555 if size is not None:
549 556 # avoid potentiel mmap crash
550 557 size = min(file_size, size)
551 558 # TODO: should .close() to release resources without
552 559 # relying on Python GC
553 560 if size is None:
554 561 return util.buffer(util.mmapread(fp))
555 562 else:
556 563 return util.buffer(util.mmapread(fp, size))
557 564 if size is None:
558 565 return fp.read()
559 566 else:
560 567 return fp.read(size)
561 568 except IOError as inst:
562 569 if inst.errno != errno.ENOENT:
563 570 raise
564 571 return b''
565 572
566 573 def _loadindex(self):
567 574
568 575 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
569 576
570 577 if self.postfix is not None:
571 578 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
572 579 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
573 580 entry_point = b'%s.i.a' % self.radix
574 581 else:
575 582 entry_point = b'%s.i' % self.radix
576 583
577 584 entry_data = b''
578 585 self._initempty = True
579 586 entry_data = self._get_data(entry_point, mmapindexthreshold)
580 587 if len(entry_data) > 0:
581 588 header = INDEX_HEADER.unpack(entry_data[:4])[0]
582 589 self._initempty = False
583 590 else:
584 591 header = new_header
585 592
586 593 self._format_flags = header & ~0xFFFF
587 594 self._format_version = header & 0xFFFF
588 595
589 596 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
590 597 if supported_flags is None:
591 598 msg = _(b'unknown version (%d) in revlog %s')
592 599 msg %= (self._format_version, self.display_id)
593 600 raise error.RevlogError(msg)
594 601 elif self._format_flags & ~supported_flags:
595 602 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
596 603 display_flag = self._format_flags >> 16
597 604 msg %= (display_flag, self._format_version, self.display_id)
598 605 raise error.RevlogError(msg)
599 606
600 607 features = FEATURES_BY_VERSION[self._format_version]
601 608 self._inline = features[b'inline'](self._format_flags)
602 609 self._generaldelta = features[b'generaldelta'](self._format_flags)
603 610 self.hassidedata = features[b'sidedata']
604 611
605 612 if not features[b'docket']:
606 613 self._indexfile = entry_point
607 614 index_data = entry_data
608 615 else:
609 616 self._docket_file = entry_point
610 617 if self._initempty:
611 618 self._docket = docketutil.default_docket(self, header)
612 619 else:
613 620 self._docket = docketutil.parse_docket(
614 621 self, entry_data, use_pending=self._trypending
615 622 )
616 623 self._indexfile = self._docket.index_filepath()
617 624 index_data = b''
618 625 index_size = self._docket.index_end
619 626 if index_size > 0:
620 627 index_data = self._get_data(
621 628 self._indexfile, mmapindexthreshold, size=index_size
622 629 )
623 630 if len(index_data) < index_size:
624 631 msg = _(b'too few index data for %s: got %d, expected %d')
625 632 msg %= (self.display_id, len(index_data), index_size)
626 633 raise error.RevlogError(msg)
627 634
628 635 self._inline = False
629 636 # generaldelta implied by version 2 revlogs.
630 637 self._generaldelta = True
631 638 # the logic for persistent nodemap will be dealt with within the
632 639 # main docket, so disable it for now.
633 640 self._nodemap_file = None
634 641
635 642 if self._docket is not None:
636 643 self._datafile = self._docket.data_filepath()
644 self._sidedatafile = self._docket.sidedata_filepath()
637 645 elif self.postfix is None:
638 646 self._datafile = b'%s.d' % self.radix
639 647 else:
640 648 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
641 649
642 650 self.nodeconstants = sha1nodeconstants
643 651 self.nullid = self.nodeconstants.nullid
644 652
645 653 # sparse-revlog can't be on without general-delta (issue6056)
646 654 if not self._generaldelta:
647 655 self._sparserevlog = False
648 656
649 657 self._storedeltachains = True
650 658
651 659 devel_nodemap = (
652 660 self._nodemap_file
653 661 and force_nodemap
654 662 and parse_index_v1_nodemap is not None
655 663 )
656 664
657 665 use_rust_index = False
658 666 if rustrevlog is not None:
659 667 if self._nodemap_file is not None:
660 668 use_rust_index = True
661 669 else:
662 670 use_rust_index = self.opener.options.get(b'rust.index')
663 671
664 672 self._parse_index = parse_index_v1
665 673 if self._format_version == REVLOGV0:
666 674 self._parse_index = revlogv0.parse_index_v0
667 675 elif self._format_version == REVLOGV2:
668 676 self._parse_index = parse_index_v2
669 677 elif self._format_version == CHANGELOGV2:
670 678 self._parse_index = parse_index_cl_v2
671 679 elif devel_nodemap:
672 680 self._parse_index = parse_index_v1_nodemap
673 681 elif use_rust_index:
674 682 self._parse_index = parse_index_v1_mixed
675 683 try:
676 684 d = self._parse_index(index_data, self._inline)
677 685 index, _chunkcache = d
678 686 use_nodemap = (
679 687 not self._inline
680 688 and self._nodemap_file is not None
681 689 and util.safehasattr(index, 'update_nodemap_data')
682 690 )
683 691 if use_nodemap:
684 692 nodemap_data = nodemaputil.persisted_data(self)
685 693 if nodemap_data is not None:
686 694 docket = nodemap_data[0]
687 695 if (
688 696 len(d[0]) > docket.tip_rev
689 697 and d[0][docket.tip_rev][7] == docket.tip_node
690 698 ):
691 699 # no changelog tampering
692 700 self._nodemap_docket = docket
693 701 index.update_nodemap_data(*nodemap_data)
694 702 except (ValueError, IndexError):
695 703 raise error.RevlogError(
696 704 _(b"index %s is corrupted") % self.display_id
697 705 )
698 706 self.index, self._chunkcache = d
699 707 if not self._chunkcache:
700 708 self._chunkclear()
701 709 # revnum -> (chain-length, sum-delta-length)
702 710 self._chaininfocache = util.lrucachedict(500)
703 711 # revlog header -> revlog compressor
704 712 self._decompressors = {}
705 713
706 714 @util.propertycache
707 715 def revlog_kind(self):
708 716 return self.target[0]
709 717
710 718 @util.propertycache
711 719 def display_id(self):
712 720 """The public facing "ID" of the revlog that we use in message"""
713 721 # Maybe we should build a user facing representation of
714 722 # revlog.target instead of using `self.radix`
715 723 return self.radix
716 724
717 725 def _get_decompressor(self, t):
718 726 try:
719 727 compressor = self._decompressors[t]
720 728 except KeyError:
721 729 try:
722 730 engine = util.compengines.forrevlogheader(t)
723 731 compressor = engine.revlogcompressor(self._compengineopts)
724 732 self._decompressors[t] = compressor
725 733 except KeyError:
726 734 raise error.RevlogError(
727 735 _(b'unknown compression type %s') % binascii.hexlify(t)
728 736 )
729 737 return compressor
730 738
731 739 @util.propertycache
732 740 def _compressor(self):
733 741 engine = util.compengines[self._compengine]
734 742 return engine.revlogcompressor(self._compengineopts)
735 743
736 744 @util.propertycache
737 745 def _decompressor(self):
738 746 """the default decompressor"""
739 747 if self._docket is None:
740 748 return None
741 749 t = self._docket.default_compression_header
742 750 c = self._get_decompressor(t)
743 751 return c.decompress
744 752
745 753 def _indexfp(self):
746 754 """file object for the revlog's index file"""
747 755 return self.opener(self._indexfile, mode=b"r")
748 756
749 757 def __index_write_fp(self):
750 758 # You should not use this directly and use `_writing` instead
751 759 try:
752 760 f = self.opener(
753 761 self._indexfile, mode=b"r+", checkambig=self._checkambig
754 762 )
755 763 if self._docket is None:
756 764 f.seek(0, os.SEEK_END)
757 765 else:
758 766 f.seek(self._docket.index_end, os.SEEK_SET)
759 767 return f
760 768 except IOError as inst:
761 769 if inst.errno != errno.ENOENT:
762 770 raise
763 771 return self.opener(
764 772 self._indexfile, mode=b"w+", checkambig=self._checkambig
765 773 )
766 774
767 775 def __index_new_fp(self):
768 776 # You should not use this unless you are upgrading from inline revlog
769 777 return self.opener(
770 778 self._indexfile,
771 779 mode=b"w",
772 780 checkambig=self._checkambig,
773 781 atomictemp=True,
774 782 )
775 783
776 784 def _datafp(self, mode=b'r'):
777 785 """file object for the revlog's data file"""
778 786 return self.opener(self._datafile, mode=mode)
779 787
780 788 @contextlib.contextmanager
781 789 def _datareadfp(self, existingfp=None):
782 790 """file object suitable to read data"""
783 791 # Use explicit file handle, if given.
784 792 if existingfp is not None:
785 793 yield existingfp
786 794
787 795 # Use a file handle being actively used for writes, if available.
788 796 # There is some danger to doing this because reads will seek the
789 797 # file. However, _writeentry() performs a SEEK_END before all writes,
790 798 # so we should be safe.
791 799 elif self._writinghandles:
792 800 if self._inline:
793 801 yield self._writinghandles[0]
794 802 else:
795 803 yield self._writinghandles[1]
796 804
797 805 # Otherwise open a new file handle.
798 806 else:
799 807 if self._inline:
800 808 func = self._indexfp
801 809 else:
802 810 func = self._datafp
803 811 with func() as fp:
804 812 yield fp
805 813
814 @contextlib.contextmanager
806 815 def _sidedatareadfp(self):
807 816 """file object suitable to read sidedata"""
808 return self._datareadfp()
817 if self._writinghandles:
818 yield self._writinghandles[2]
819 else:
820 with self.opener(self._sidedatafile) as fp:
821 yield fp
809 822
810 823 def tiprev(self):
811 824 return len(self.index) - 1
812 825
813 826 def tip(self):
814 827 return self.node(self.tiprev())
815 828
816 829 def __contains__(self, rev):
817 830 return 0 <= rev < len(self)
818 831
819 832 def __len__(self):
820 833 return len(self.index)
821 834
822 835 def __iter__(self):
823 836 return iter(pycompat.xrange(len(self)))
824 837
825 838 def revs(self, start=0, stop=None):
826 839 """iterate over all rev in this revlog (from start to stop)"""
827 840 return storageutil.iterrevs(len(self), start=start, stop=stop)
828 841
829 842 @property
830 843 def nodemap(self):
831 844 msg = (
832 845 b"revlog.nodemap is deprecated, "
833 846 b"use revlog.index.[has_node|rev|get_rev]"
834 847 )
835 848 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
836 849 return self.index.nodemap
837 850
838 851 @property
839 852 def _nodecache(self):
840 853 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
841 854 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
842 855 return self.index.nodemap
843 856
844 857 def hasnode(self, node):
845 858 try:
846 859 self.rev(node)
847 860 return True
848 861 except KeyError:
849 862 return False
850 863
851 864 def candelta(self, baserev, rev):
852 865 """whether two revisions (baserev, rev) can be delta-ed or not"""
853 866 # Disable delta if either rev requires a content-changing flag
854 867 # processor (ex. LFS). This is because such flag processor can alter
855 868 # the rawtext content that the delta will be based on, and two clients
856 869 # could have a same revlog node with different flags (i.e. different
857 870 # rawtext contents) and the delta could be incompatible.
858 871 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
859 872 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
860 873 ):
861 874 return False
862 875 return True
863 876
864 877 def update_caches(self, transaction):
865 878 if self._nodemap_file is not None:
866 879 if transaction is None:
867 880 nodemaputil.update_persistent_nodemap(self)
868 881 else:
869 882 nodemaputil.setup_persistent_nodemap(transaction, self)
870 883
871 884 def clearcaches(self):
872 885 self._revisioncache = None
873 886 self._chainbasecache.clear()
874 887 self._chunkcache = (0, b'')
875 888 self._pcache = {}
876 889 self._nodemap_docket = None
877 890 self.index.clearcaches()
878 891 # The python code is the one responsible for validating the docket, we
879 892 # end up having to refresh it here.
880 893 use_nodemap = (
881 894 not self._inline
882 895 and self._nodemap_file is not None
883 896 and util.safehasattr(self.index, 'update_nodemap_data')
884 897 )
885 898 if use_nodemap:
886 899 nodemap_data = nodemaputil.persisted_data(self)
887 900 if nodemap_data is not None:
888 901 self._nodemap_docket = nodemap_data[0]
889 902 self.index.update_nodemap_data(*nodemap_data)
890 903
891 904 def rev(self, node):
892 905 try:
893 906 return self.index.rev(node)
894 907 except TypeError:
895 908 raise
896 909 except error.RevlogError:
897 910 # parsers.c radix tree lookup failed
898 911 if (
899 912 node == self.nodeconstants.wdirid
900 913 or node in self.nodeconstants.wdirfilenodeids
901 914 ):
902 915 raise error.WdirUnsupported
903 916 raise error.LookupError(node, self.display_id, _(b'no node'))
904 917
905 918 # Accessors for index entries.
906 919
907 920 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
908 921 # are flags.
909 922 def start(self, rev):
910 923 return int(self.index[rev][0] >> 16)
911 924
925 def sidedata_cut_off(self, rev):
926 sd_cut_off = self.index[rev][8]
927 if sd_cut_off != 0:
928 return sd_cut_off
929 # This is some annoying dance, because entries without sidedata
930 # currently use 0 as their ofsset. (instead of previous-offset +
931 # previous-size)
932 #
933 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
934 # In the meantime, we need this.
935 while 0 <= rev:
936 e = self.index[rev]
937 if e[9] != 0:
938 return e[8] + e[9]
939 rev -= 1
940 return 0
941
912 942 def flags(self, rev):
913 943 return self.index[rev][0] & 0xFFFF
914 944
915 945 def length(self, rev):
916 946 return self.index[rev][1]
917 947
918 948 def sidedata_length(self, rev):
919 949 if not self.hassidedata:
920 950 return 0
921 951 return self.index[rev][9]
922 952
923 953 def rawsize(self, rev):
924 954 """return the length of the uncompressed text for a given revision"""
925 955 l = self.index[rev][2]
926 956 if l >= 0:
927 957 return l
928 958
929 959 t = self.rawdata(rev)
930 960 return len(t)
931 961
932 962 def size(self, rev):
933 963 """length of non-raw text (processed by a "read" flag processor)"""
934 964 # fast path: if no "read" flag processor could change the content,
935 965 # size is rawsize. note: ELLIPSIS is known to not change the content.
936 966 flags = self.flags(rev)
937 967 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
938 968 return self.rawsize(rev)
939 969
940 970 return len(self.revision(rev, raw=False))
941 971
942 972 def chainbase(self, rev):
943 973 base = self._chainbasecache.get(rev)
944 974 if base is not None:
945 975 return base
946 976
947 977 index = self.index
948 978 iterrev = rev
949 979 base = index[iterrev][3]
950 980 while base != iterrev:
951 981 iterrev = base
952 982 base = index[iterrev][3]
953 983
954 984 self._chainbasecache[rev] = base
955 985 return base
956 986
957 987 def linkrev(self, rev):
958 988 return self.index[rev][4]
959 989
960 990 def parentrevs(self, rev):
961 991 try:
962 992 entry = self.index[rev]
963 993 except IndexError:
964 994 if rev == wdirrev:
965 995 raise error.WdirUnsupported
966 996 raise
967 997 if entry[5] == nullrev:
968 998 return entry[6], entry[5]
969 999 else:
970 1000 return entry[5], entry[6]
971 1001
972 1002 # fast parentrevs(rev) where rev isn't filtered
973 1003 _uncheckedparentrevs = parentrevs
974 1004
975 1005 def node(self, rev):
976 1006 try:
977 1007 return self.index[rev][7]
978 1008 except IndexError:
979 1009 if rev == wdirrev:
980 1010 raise error.WdirUnsupported
981 1011 raise
982 1012
983 1013 # Derived from index values.
984 1014
985 1015 def end(self, rev):
986 1016 return self.start(rev) + self.length(rev)
987 1017
988 1018 def parents(self, node):
989 1019 i = self.index
990 1020 d = i[self.rev(node)]
991 1021 # inline node() to avoid function call overhead
992 1022 if d[5] == self.nullid:
993 1023 return i[d[6]][7], i[d[5]][7]
994 1024 else:
995 1025 return i[d[5]][7], i[d[6]][7]
996 1026
997 1027 def chainlen(self, rev):
998 1028 return self._chaininfo(rev)[0]
999 1029
1000 1030 def _chaininfo(self, rev):
1001 1031 chaininfocache = self._chaininfocache
1002 1032 if rev in chaininfocache:
1003 1033 return chaininfocache[rev]
1004 1034 index = self.index
1005 1035 generaldelta = self._generaldelta
1006 1036 iterrev = rev
1007 1037 e = index[iterrev]
1008 1038 clen = 0
1009 1039 compresseddeltalen = 0
1010 1040 while iterrev != e[3]:
1011 1041 clen += 1
1012 1042 compresseddeltalen += e[1]
1013 1043 if generaldelta:
1014 1044 iterrev = e[3]
1015 1045 else:
1016 1046 iterrev -= 1
1017 1047 if iterrev in chaininfocache:
1018 1048 t = chaininfocache[iterrev]
1019 1049 clen += t[0]
1020 1050 compresseddeltalen += t[1]
1021 1051 break
1022 1052 e = index[iterrev]
1023 1053 else:
1024 1054 # Add text length of base since decompressing that also takes
1025 1055 # work. For cache hits the length is already included.
1026 1056 compresseddeltalen += e[1]
1027 1057 r = (clen, compresseddeltalen)
1028 1058 chaininfocache[rev] = r
1029 1059 return r
1030 1060
1031 1061 def _deltachain(self, rev, stoprev=None):
1032 1062 """Obtain the delta chain for a revision.
1033 1063
1034 1064 ``stoprev`` specifies a revision to stop at. If not specified, we
1035 1065 stop at the base of the chain.
1036 1066
1037 1067 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1038 1068 revs in ascending order and ``stopped`` is a bool indicating whether
1039 1069 ``stoprev`` was hit.
1040 1070 """
1041 1071 # Try C implementation.
1042 1072 try:
1043 1073 return self.index.deltachain(rev, stoprev, self._generaldelta)
1044 1074 except AttributeError:
1045 1075 pass
1046 1076
1047 1077 chain = []
1048 1078
1049 1079 # Alias to prevent attribute lookup in tight loop.
1050 1080 index = self.index
1051 1081 generaldelta = self._generaldelta
1052 1082
1053 1083 iterrev = rev
1054 1084 e = index[iterrev]
1055 1085 while iterrev != e[3] and iterrev != stoprev:
1056 1086 chain.append(iterrev)
1057 1087 if generaldelta:
1058 1088 iterrev = e[3]
1059 1089 else:
1060 1090 iterrev -= 1
1061 1091 e = index[iterrev]
1062 1092
1063 1093 if iterrev == stoprev:
1064 1094 stopped = True
1065 1095 else:
1066 1096 chain.append(iterrev)
1067 1097 stopped = False
1068 1098
1069 1099 chain.reverse()
1070 1100 return chain, stopped
1071 1101
1072 1102 def ancestors(self, revs, stoprev=0, inclusive=False):
1073 1103 """Generate the ancestors of 'revs' in reverse revision order.
1074 1104 Does not generate revs lower than stoprev.
1075 1105
1076 1106 See the documentation for ancestor.lazyancestors for more details."""
1077 1107
1078 1108 # first, make sure start revisions aren't filtered
1079 1109 revs = list(revs)
1080 1110 checkrev = self.node
1081 1111 for r in revs:
1082 1112 checkrev(r)
1083 1113 # and we're sure ancestors aren't filtered as well
1084 1114
1085 1115 if rustancestor is not None and self.index.rust_ext_compat:
1086 1116 lazyancestors = rustancestor.LazyAncestors
1087 1117 arg = self.index
1088 1118 else:
1089 1119 lazyancestors = ancestor.lazyancestors
1090 1120 arg = self._uncheckedparentrevs
1091 1121 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1092 1122
1093 1123 def descendants(self, revs):
1094 1124 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1095 1125
1096 1126 def findcommonmissing(self, common=None, heads=None):
1097 1127 """Return a tuple of the ancestors of common and the ancestors of heads
1098 1128 that are not ancestors of common. In revset terminology, we return the
1099 1129 tuple:
1100 1130
1101 1131 ::common, (::heads) - (::common)
1102 1132
1103 1133 The list is sorted by revision number, meaning it is
1104 1134 topologically sorted.
1105 1135
1106 1136 'heads' and 'common' are both lists of node IDs. If heads is
1107 1137 not supplied, uses all of the revlog's heads. If common is not
1108 1138 supplied, uses nullid."""
1109 1139 if common is None:
1110 1140 common = [self.nullid]
1111 1141 if heads is None:
1112 1142 heads = self.heads()
1113 1143
1114 1144 common = [self.rev(n) for n in common]
1115 1145 heads = [self.rev(n) for n in heads]
1116 1146
1117 1147 # we want the ancestors, but inclusive
1118 1148 class lazyset(object):
1119 1149 def __init__(self, lazyvalues):
1120 1150 self.addedvalues = set()
1121 1151 self.lazyvalues = lazyvalues
1122 1152
1123 1153 def __contains__(self, value):
1124 1154 return value in self.addedvalues or value in self.lazyvalues
1125 1155
1126 1156 def __iter__(self):
1127 1157 added = self.addedvalues
1128 1158 for r in added:
1129 1159 yield r
1130 1160 for r in self.lazyvalues:
1131 1161 if not r in added:
1132 1162 yield r
1133 1163
1134 1164 def add(self, value):
1135 1165 self.addedvalues.add(value)
1136 1166
1137 1167 def update(self, values):
1138 1168 self.addedvalues.update(values)
1139 1169
1140 1170 has = lazyset(self.ancestors(common))
1141 1171 has.add(nullrev)
1142 1172 has.update(common)
1143 1173
1144 1174 # take all ancestors from heads that aren't in has
1145 1175 missing = set()
1146 1176 visit = collections.deque(r for r in heads if r not in has)
1147 1177 while visit:
1148 1178 r = visit.popleft()
1149 1179 if r in missing:
1150 1180 continue
1151 1181 else:
1152 1182 missing.add(r)
1153 1183 for p in self.parentrevs(r):
1154 1184 if p not in has:
1155 1185 visit.append(p)
1156 1186 missing = list(missing)
1157 1187 missing.sort()
1158 1188 return has, [self.node(miss) for miss in missing]
1159 1189
1160 1190 def incrementalmissingrevs(self, common=None):
1161 1191 """Return an object that can be used to incrementally compute the
1162 1192 revision numbers of the ancestors of arbitrary sets that are not
1163 1193 ancestors of common. This is an ancestor.incrementalmissingancestors
1164 1194 object.
1165 1195
1166 1196 'common' is a list of revision numbers. If common is not supplied, uses
1167 1197 nullrev.
1168 1198 """
1169 1199 if common is None:
1170 1200 common = [nullrev]
1171 1201
1172 1202 if rustancestor is not None and self.index.rust_ext_compat:
1173 1203 return rustancestor.MissingAncestors(self.index, common)
1174 1204 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1175 1205
1176 1206 def findmissingrevs(self, common=None, heads=None):
1177 1207 """Return the revision numbers of the ancestors of heads that
1178 1208 are not ancestors of common.
1179 1209
1180 1210 More specifically, return a list of revision numbers corresponding to
1181 1211 nodes N such that every N satisfies the following constraints:
1182 1212
1183 1213 1. N is an ancestor of some node in 'heads'
1184 1214 2. N is not an ancestor of any node in 'common'
1185 1215
1186 1216 The list is sorted by revision number, meaning it is
1187 1217 topologically sorted.
1188 1218
1189 1219 'heads' and 'common' are both lists of revision numbers. If heads is
1190 1220 not supplied, uses all of the revlog's heads. If common is not
1191 1221 supplied, uses nullid."""
1192 1222 if common is None:
1193 1223 common = [nullrev]
1194 1224 if heads is None:
1195 1225 heads = self.headrevs()
1196 1226
1197 1227 inc = self.incrementalmissingrevs(common=common)
1198 1228 return inc.missingancestors(heads)
1199 1229
1200 1230 def findmissing(self, common=None, heads=None):
1201 1231 """Return the ancestors of heads that are not ancestors of common.
1202 1232
1203 1233 More specifically, return a list of nodes N such that every N
1204 1234 satisfies the following constraints:
1205 1235
1206 1236 1. N is an ancestor of some node in 'heads'
1207 1237 2. N is not an ancestor of any node in 'common'
1208 1238
1209 1239 The list is sorted by revision number, meaning it is
1210 1240 topologically sorted.
1211 1241
1212 1242 'heads' and 'common' are both lists of node IDs. If heads is
1213 1243 not supplied, uses all of the revlog's heads. If common is not
1214 1244 supplied, uses nullid."""
1215 1245 if common is None:
1216 1246 common = [self.nullid]
1217 1247 if heads is None:
1218 1248 heads = self.heads()
1219 1249
1220 1250 common = [self.rev(n) for n in common]
1221 1251 heads = [self.rev(n) for n in heads]
1222 1252
1223 1253 inc = self.incrementalmissingrevs(common=common)
1224 1254 return [self.node(r) for r in inc.missingancestors(heads)]
1225 1255
1226 1256 def nodesbetween(self, roots=None, heads=None):
1227 1257 """Return a topological path from 'roots' to 'heads'.
1228 1258
1229 1259 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1230 1260 topologically sorted list of all nodes N that satisfy both of
1231 1261 these constraints:
1232 1262
1233 1263 1. N is a descendant of some node in 'roots'
1234 1264 2. N is an ancestor of some node in 'heads'
1235 1265
1236 1266 Every node is considered to be both a descendant and an ancestor
1237 1267 of itself, so every reachable node in 'roots' and 'heads' will be
1238 1268 included in 'nodes'.
1239 1269
1240 1270 'outroots' is the list of reachable nodes in 'roots', i.e., the
1241 1271 subset of 'roots' that is returned in 'nodes'. Likewise,
1242 1272 'outheads' is the subset of 'heads' that is also in 'nodes'.
1243 1273
1244 1274 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1245 1275 unspecified, uses nullid as the only root. If 'heads' is
1246 1276 unspecified, uses list of all of the revlog's heads."""
1247 1277 nonodes = ([], [], [])
1248 1278 if roots is not None:
1249 1279 roots = list(roots)
1250 1280 if not roots:
1251 1281 return nonodes
1252 1282 lowestrev = min([self.rev(n) for n in roots])
1253 1283 else:
1254 1284 roots = [self.nullid] # Everybody's a descendant of nullid
1255 1285 lowestrev = nullrev
1256 1286 if (lowestrev == nullrev) and (heads is None):
1257 1287 # We want _all_ the nodes!
1258 1288 return (
1259 1289 [self.node(r) for r in self],
1260 1290 [self.nullid],
1261 1291 list(self.heads()),
1262 1292 )
1263 1293 if heads is None:
1264 1294 # All nodes are ancestors, so the latest ancestor is the last
1265 1295 # node.
1266 1296 highestrev = len(self) - 1
1267 1297 # Set ancestors to None to signal that every node is an ancestor.
1268 1298 ancestors = None
1269 1299 # Set heads to an empty dictionary for later discovery of heads
1270 1300 heads = {}
1271 1301 else:
1272 1302 heads = list(heads)
1273 1303 if not heads:
1274 1304 return nonodes
1275 1305 ancestors = set()
1276 1306 # Turn heads into a dictionary so we can remove 'fake' heads.
1277 1307 # Also, later we will be using it to filter out the heads we can't
1278 1308 # find from roots.
1279 1309 heads = dict.fromkeys(heads, False)
1280 1310 # Start at the top and keep marking parents until we're done.
1281 1311 nodestotag = set(heads)
1282 1312 # Remember where the top was so we can use it as a limit later.
1283 1313 highestrev = max([self.rev(n) for n in nodestotag])
1284 1314 while nodestotag:
1285 1315 # grab a node to tag
1286 1316 n = nodestotag.pop()
1287 1317 # Never tag nullid
1288 1318 if n == self.nullid:
1289 1319 continue
1290 1320 # A node's revision number represents its place in a
1291 1321 # topologically sorted list of nodes.
1292 1322 r = self.rev(n)
1293 1323 if r >= lowestrev:
1294 1324 if n not in ancestors:
1295 1325 # If we are possibly a descendant of one of the roots
1296 1326 # and we haven't already been marked as an ancestor
1297 1327 ancestors.add(n) # Mark as ancestor
1298 1328 # Add non-nullid parents to list of nodes to tag.
1299 1329 nodestotag.update(
1300 1330 [p for p in self.parents(n) if p != self.nullid]
1301 1331 )
1302 1332 elif n in heads: # We've seen it before, is it a fake head?
1303 1333 # So it is, real heads should not be the ancestors of
1304 1334 # any other heads.
1305 1335 heads.pop(n)
1306 1336 if not ancestors:
1307 1337 return nonodes
1308 1338 # Now that we have our set of ancestors, we want to remove any
1309 1339 # roots that are not ancestors.
1310 1340
1311 1341 # If one of the roots was nullid, everything is included anyway.
1312 1342 if lowestrev > nullrev:
1313 1343 # But, since we weren't, let's recompute the lowest rev to not
1314 1344 # include roots that aren't ancestors.
1315 1345
1316 1346 # Filter out roots that aren't ancestors of heads
1317 1347 roots = [root for root in roots if root in ancestors]
1318 1348 # Recompute the lowest revision
1319 1349 if roots:
1320 1350 lowestrev = min([self.rev(root) for root in roots])
1321 1351 else:
1322 1352 # No more roots? Return empty list
1323 1353 return nonodes
1324 1354 else:
1325 1355 # We are descending from nullid, and don't need to care about
1326 1356 # any other roots.
1327 1357 lowestrev = nullrev
1328 1358 roots = [self.nullid]
1329 1359 # Transform our roots list into a set.
1330 1360 descendants = set(roots)
1331 1361 # Also, keep the original roots so we can filter out roots that aren't
1332 1362 # 'real' roots (i.e. are descended from other roots).
1333 1363 roots = descendants.copy()
1334 1364 # Our topologically sorted list of output nodes.
1335 1365 orderedout = []
1336 1366 # Don't start at nullid since we don't want nullid in our output list,
1337 1367 # and if nullid shows up in descendants, empty parents will look like
1338 1368 # they're descendants.
1339 1369 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1340 1370 n = self.node(r)
1341 1371 isdescendant = False
1342 1372 if lowestrev == nullrev: # Everybody is a descendant of nullid
1343 1373 isdescendant = True
1344 1374 elif n in descendants:
1345 1375 # n is already a descendant
1346 1376 isdescendant = True
1347 1377 # This check only needs to be done here because all the roots
1348 1378 # will start being marked is descendants before the loop.
1349 1379 if n in roots:
1350 1380 # If n was a root, check if it's a 'real' root.
1351 1381 p = tuple(self.parents(n))
1352 1382 # If any of its parents are descendants, it's not a root.
1353 1383 if (p[0] in descendants) or (p[1] in descendants):
1354 1384 roots.remove(n)
1355 1385 else:
1356 1386 p = tuple(self.parents(n))
1357 1387 # A node is a descendant if either of its parents are
1358 1388 # descendants. (We seeded the dependents list with the roots
1359 1389 # up there, remember?)
1360 1390 if (p[0] in descendants) or (p[1] in descendants):
1361 1391 descendants.add(n)
1362 1392 isdescendant = True
1363 1393 if isdescendant and ((ancestors is None) or (n in ancestors)):
1364 1394 # Only include nodes that are both descendants and ancestors.
1365 1395 orderedout.append(n)
1366 1396 if (ancestors is not None) and (n in heads):
1367 1397 # We're trying to figure out which heads are reachable
1368 1398 # from roots.
1369 1399 # Mark this head as having been reached
1370 1400 heads[n] = True
1371 1401 elif ancestors is None:
1372 1402 # Otherwise, we're trying to discover the heads.
1373 1403 # Assume this is a head because if it isn't, the next step
1374 1404 # will eventually remove it.
1375 1405 heads[n] = True
1376 1406 # But, obviously its parents aren't.
1377 1407 for p in self.parents(n):
1378 1408 heads.pop(p, None)
1379 1409 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1380 1410 roots = list(roots)
1381 1411 assert orderedout
1382 1412 assert roots
1383 1413 assert heads
1384 1414 return (orderedout, roots, heads)
1385 1415
1386 1416 def headrevs(self, revs=None):
1387 1417 if revs is None:
1388 1418 try:
1389 1419 return self.index.headrevs()
1390 1420 except AttributeError:
1391 1421 return self._headrevs()
1392 1422 if rustdagop is not None and self.index.rust_ext_compat:
1393 1423 return rustdagop.headrevs(self.index, revs)
1394 1424 return dagop.headrevs(revs, self._uncheckedparentrevs)
1395 1425
1396 1426 def computephases(self, roots):
1397 1427 return self.index.computephasesmapsets(roots)
1398 1428
1399 1429 def _headrevs(self):
1400 1430 count = len(self)
1401 1431 if not count:
1402 1432 return [nullrev]
1403 1433 # we won't iter over filtered rev so nobody is a head at start
1404 1434 ishead = [0] * (count + 1)
1405 1435 index = self.index
1406 1436 for r in self:
1407 1437 ishead[r] = 1 # I may be an head
1408 1438 e = index[r]
1409 1439 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1410 1440 return [r for r, val in enumerate(ishead) if val]
1411 1441
1412 1442 def heads(self, start=None, stop=None):
1413 1443 """return the list of all nodes that have no children
1414 1444
1415 1445 if start is specified, only heads that are descendants of
1416 1446 start will be returned
1417 1447 if stop is specified, it will consider all the revs from stop
1418 1448 as if they had no children
1419 1449 """
1420 1450 if start is None and stop is None:
1421 1451 if not len(self):
1422 1452 return [self.nullid]
1423 1453 return [self.node(r) for r in self.headrevs()]
1424 1454
1425 1455 if start is None:
1426 1456 start = nullrev
1427 1457 else:
1428 1458 start = self.rev(start)
1429 1459
1430 1460 stoprevs = {self.rev(n) for n in stop or []}
1431 1461
1432 1462 revs = dagop.headrevssubset(
1433 1463 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1434 1464 )
1435 1465
1436 1466 return [self.node(rev) for rev in revs]
1437 1467
1438 1468 def children(self, node):
1439 1469 """find the children of a given node"""
1440 1470 c = []
1441 1471 p = self.rev(node)
1442 1472 for r in self.revs(start=p + 1):
1443 1473 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1444 1474 if prevs:
1445 1475 for pr in prevs:
1446 1476 if pr == p:
1447 1477 c.append(self.node(r))
1448 1478 elif p == nullrev:
1449 1479 c.append(self.node(r))
1450 1480 return c
1451 1481
1452 1482 def commonancestorsheads(self, a, b):
1453 1483 """calculate all the heads of the common ancestors of nodes a and b"""
1454 1484 a, b = self.rev(a), self.rev(b)
1455 1485 ancs = self._commonancestorsheads(a, b)
1456 1486 return pycompat.maplist(self.node, ancs)
1457 1487
1458 1488 def _commonancestorsheads(self, *revs):
1459 1489 """calculate all the heads of the common ancestors of revs"""
1460 1490 try:
1461 1491 ancs = self.index.commonancestorsheads(*revs)
1462 1492 except (AttributeError, OverflowError): # C implementation failed
1463 1493 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1464 1494 return ancs
1465 1495
1466 1496 def isancestor(self, a, b):
1467 1497 """return True if node a is an ancestor of node b
1468 1498
1469 1499 A revision is considered an ancestor of itself."""
1470 1500 a, b = self.rev(a), self.rev(b)
1471 1501 return self.isancestorrev(a, b)
1472 1502
1473 1503 def isancestorrev(self, a, b):
1474 1504 """return True if revision a is an ancestor of revision b
1475 1505
1476 1506 A revision is considered an ancestor of itself.
1477 1507
1478 1508 The implementation of this is trivial but the use of
1479 1509 reachableroots is not."""
1480 1510 if a == nullrev:
1481 1511 return True
1482 1512 elif a == b:
1483 1513 return True
1484 1514 elif a > b:
1485 1515 return False
1486 1516 return bool(self.reachableroots(a, [b], [a], includepath=False))
1487 1517
1488 1518 def reachableroots(self, minroot, heads, roots, includepath=False):
1489 1519 """return (heads(::(<roots> and <roots>::<heads>)))
1490 1520
1491 1521 If includepath is True, return (<roots>::<heads>)."""
1492 1522 try:
1493 1523 return self.index.reachableroots2(
1494 1524 minroot, heads, roots, includepath
1495 1525 )
1496 1526 except AttributeError:
1497 1527 return dagop._reachablerootspure(
1498 1528 self.parentrevs, minroot, roots, heads, includepath
1499 1529 )
1500 1530
1501 1531 def ancestor(self, a, b):
1502 1532 """calculate the "best" common ancestor of nodes a and b"""
1503 1533
1504 1534 a, b = self.rev(a), self.rev(b)
1505 1535 try:
1506 1536 ancs = self.index.ancestors(a, b)
1507 1537 except (AttributeError, OverflowError):
1508 1538 ancs = ancestor.ancestors(self.parentrevs, a, b)
1509 1539 if ancs:
1510 1540 # choose a consistent winner when there's a tie
1511 1541 return min(map(self.node, ancs))
1512 1542 return self.nullid
1513 1543
1514 1544 def _match(self, id):
1515 1545 if isinstance(id, int):
1516 1546 # rev
1517 1547 return self.node(id)
1518 1548 if len(id) == self.nodeconstants.nodelen:
1519 1549 # possibly a binary node
1520 1550 # odds of a binary node being all hex in ASCII are 1 in 10**25
1521 1551 try:
1522 1552 node = id
1523 1553 self.rev(node) # quick search the index
1524 1554 return node
1525 1555 except error.LookupError:
1526 1556 pass # may be partial hex id
1527 1557 try:
1528 1558 # str(rev)
1529 1559 rev = int(id)
1530 1560 if b"%d" % rev != id:
1531 1561 raise ValueError
1532 1562 if rev < 0:
1533 1563 rev = len(self) + rev
1534 1564 if rev < 0 or rev >= len(self):
1535 1565 raise ValueError
1536 1566 return self.node(rev)
1537 1567 except (ValueError, OverflowError):
1538 1568 pass
1539 1569 if len(id) == 2 * self.nodeconstants.nodelen:
1540 1570 try:
1541 1571 # a full hex nodeid?
1542 1572 node = bin(id)
1543 1573 self.rev(node)
1544 1574 return node
1545 1575 except (TypeError, error.LookupError):
1546 1576 pass
1547 1577
1548 1578 def _partialmatch(self, id):
1549 1579 # we don't care wdirfilenodeids as they should be always full hash
1550 1580 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1551 1581 ambiguous = False
1552 1582 try:
1553 1583 partial = self.index.partialmatch(id)
1554 1584 if partial and self.hasnode(partial):
1555 1585 if maybewdir:
1556 1586 # single 'ff...' match in radix tree, ambiguous with wdir
1557 1587 ambiguous = True
1558 1588 else:
1559 1589 return partial
1560 1590 elif maybewdir:
1561 1591 # no 'ff...' match in radix tree, wdir identified
1562 1592 raise error.WdirUnsupported
1563 1593 else:
1564 1594 return None
1565 1595 except error.RevlogError:
1566 1596 # parsers.c radix tree lookup gave multiple matches
1567 1597 # fast path: for unfiltered changelog, radix tree is accurate
1568 1598 if not getattr(self, 'filteredrevs', None):
1569 1599 ambiguous = True
1570 1600 # fall through to slow path that filters hidden revisions
1571 1601 except (AttributeError, ValueError):
1572 1602 # we are pure python, or key was too short to search radix tree
1573 1603 pass
1574 1604 if ambiguous:
1575 1605 raise error.AmbiguousPrefixLookupError(
1576 1606 id, self.display_id, _(b'ambiguous identifier')
1577 1607 )
1578 1608
1579 1609 if id in self._pcache:
1580 1610 return self._pcache[id]
1581 1611
1582 1612 if len(id) <= 40:
1583 1613 try:
1584 1614 # hex(node)[:...]
1585 1615 l = len(id) // 2 # grab an even number of digits
1586 1616 prefix = bin(id[: l * 2])
1587 1617 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1588 1618 nl = [
1589 1619 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1590 1620 ]
1591 1621 if self.nodeconstants.nullhex.startswith(id):
1592 1622 nl.append(self.nullid)
1593 1623 if len(nl) > 0:
1594 1624 if len(nl) == 1 and not maybewdir:
1595 1625 self._pcache[id] = nl[0]
1596 1626 return nl[0]
1597 1627 raise error.AmbiguousPrefixLookupError(
1598 1628 id, self.display_id, _(b'ambiguous identifier')
1599 1629 )
1600 1630 if maybewdir:
1601 1631 raise error.WdirUnsupported
1602 1632 return None
1603 1633 except TypeError:
1604 1634 pass
1605 1635
1606 1636 def lookup(self, id):
1607 1637 """locate a node based on:
1608 1638 - revision number or str(revision number)
1609 1639 - nodeid or subset of hex nodeid
1610 1640 """
1611 1641 n = self._match(id)
1612 1642 if n is not None:
1613 1643 return n
1614 1644 n = self._partialmatch(id)
1615 1645 if n:
1616 1646 return n
1617 1647
1618 1648 raise error.LookupError(id, self.display_id, _(b'no match found'))
1619 1649
1620 1650 def shortest(self, node, minlength=1):
1621 1651 """Find the shortest unambiguous prefix that matches node."""
1622 1652
1623 1653 def isvalid(prefix):
1624 1654 try:
1625 1655 matchednode = self._partialmatch(prefix)
1626 1656 except error.AmbiguousPrefixLookupError:
1627 1657 return False
1628 1658 except error.WdirUnsupported:
1629 1659 # single 'ff...' match
1630 1660 return True
1631 1661 if matchednode is None:
1632 1662 raise error.LookupError(node, self.display_id, _(b'no node'))
1633 1663 return True
1634 1664
1635 1665 def maybewdir(prefix):
1636 1666 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1637 1667
1638 1668 hexnode = hex(node)
1639 1669
1640 1670 def disambiguate(hexnode, minlength):
1641 1671 """Disambiguate against wdirid."""
1642 1672 for length in range(minlength, len(hexnode) + 1):
1643 1673 prefix = hexnode[:length]
1644 1674 if not maybewdir(prefix):
1645 1675 return prefix
1646 1676
1647 1677 if not getattr(self, 'filteredrevs', None):
1648 1678 try:
1649 1679 length = max(self.index.shortest(node), minlength)
1650 1680 return disambiguate(hexnode, length)
1651 1681 except error.RevlogError:
1652 1682 if node != self.nodeconstants.wdirid:
1653 1683 raise error.LookupError(
1654 1684 node, self.display_id, _(b'no node')
1655 1685 )
1656 1686 except AttributeError:
1657 1687 # Fall through to pure code
1658 1688 pass
1659 1689
1660 1690 if node == self.nodeconstants.wdirid:
1661 1691 for length in range(minlength, len(hexnode) + 1):
1662 1692 prefix = hexnode[:length]
1663 1693 if isvalid(prefix):
1664 1694 return prefix
1665 1695
1666 1696 for length in range(minlength, len(hexnode) + 1):
1667 1697 prefix = hexnode[:length]
1668 1698 if isvalid(prefix):
1669 1699 return disambiguate(hexnode, length)
1670 1700
1671 1701 def cmp(self, node, text):
1672 1702 """compare text with a given file revision
1673 1703
1674 1704 returns True if text is different than what is stored.
1675 1705 """
1676 1706 p1, p2 = self.parents(node)
1677 1707 return storageutil.hashrevisionsha1(text, p1, p2) != node
1678 1708
1679 1709 def _cachesegment(self, offset, data):
1680 1710 """Add a segment to the revlog cache.
1681 1711
1682 1712 Accepts an absolute offset and the data that is at that location.
1683 1713 """
1684 1714 o, d = self._chunkcache
1685 1715 # try to add to existing cache
1686 1716 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1687 1717 self._chunkcache = o, d + data
1688 1718 else:
1689 1719 self._chunkcache = offset, data
1690 1720
1691 1721 def _readsegment(self, offset, length, df=None):
1692 1722 """Load a segment of raw data from the revlog.
1693 1723
1694 1724 Accepts an absolute offset, length to read, and an optional existing
1695 1725 file handle to read from.
1696 1726
1697 1727 If an existing file handle is passed, it will be seeked and the
1698 1728 original seek position will NOT be restored.
1699 1729
1700 1730 Returns a str or buffer of raw byte data.
1701 1731
1702 1732 Raises if the requested number of bytes could not be read.
1703 1733 """
1704 1734 # Cache data both forward and backward around the requested
1705 1735 # data, in a fixed size window. This helps speed up operations
1706 1736 # involving reading the revlog backwards.
1707 1737 cachesize = self._chunkcachesize
1708 1738 realoffset = offset & ~(cachesize - 1)
1709 1739 reallength = (
1710 1740 (offset + length + cachesize) & ~(cachesize - 1)
1711 1741 ) - realoffset
1712 1742 with self._datareadfp(df) as df:
1713 1743 df.seek(realoffset)
1714 1744 d = df.read(reallength)
1715 1745
1716 1746 self._cachesegment(realoffset, d)
1717 1747 if offset != realoffset or reallength != length:
1718 1748 startoffset = offset - realoffset
1719 1749 if len(d) - startoffset < length:
1720 1750 filename = self._indexfile if self._inline else self._datafile
1721 1751 got = len(d) - startoffset
1722 1752 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1723 1753 raise error.RevlogError(m)
1724 1754 return util.buffer(d, startoffset, length)
1725 1755
1726 1756 if len(d) < length:
1727 1757 filename = self._indexfile if self._inline else self._datafile
1728 1758 got = len(d) - startoffset
1729 1759 m = PARTIAL_READ_MSG % (filename, length, offset, got)
1730 1760 raise error.RevlogError(m)
1731 1761
1732 1762 return d
1733 1763
1734 1764 def _getsegment(self, offset, length, df=None):
1735 1765 """Obtain a segment of raw data from the revlog.
1736 1766
1737 1767 Accepts an absolute offset, length of bytes to obtain, and an
1738 1768 optional file handle to the already-opened revlog. If the file
1739 1769 handle is used, it's original seek position will not be preserved.
1740 1770
1741 1771 Requests for data may be returned from a cache.
1742 1772
1743 1773 Returns a str or a buffer instance of raw byte data.
1744 1774 """
1745 1775 o, d = self._chunkcache
1746 1776 l = len(d)
1747 1777
1748 1778 # is it in the cache?
1749 1779 cachestart = offset - o
1750 1780 cacheend = cachestart + length
1751 1781 if cachestart >= 0 and cacheend <= l:
1752 1782 if cachestart == 0 and cacheend == l:
1753 1783 return d # avoid a copy
1754 1784 return util.buffer(d, cachestart, cacheend - cachestart)
1755 1785
1756 1786 return self._readsegment(offset, length, df=df)
1757 1787
1758 1788 def _getsegmentforrevs(self, startrev, endrev, df=None):
1759 1789 """Obtain a segment of raw data corresponding to a range of revisions.
1760 1790
1761 1791 Accepts the start and end revisions and an optional already-open
1762 1792 file handle to be used for reading. If the file handle is read, its
1763 1793 seek position will not be preserved.
1764 1794
1765 1795 Requests for data may be satisfied by a cache.
1766 1796
1767 1797 Returns a 2-tuple of (offset, data) for the requested range of
1768 1798 revisions. Offset is the integer offset from the beginning of the
1769 1799 revlog and data is a str or buffer of the raw byte data.
1770 1800
1771 1801 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1772 1802 to determine where each revision's data begins and ends.
1773 1803 """
1774 1804 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1775 1805 # (functions are expensive).
1776 1806 index = self.index
1777 1807 istart = index[startrev]
1778 1808 start = int(istart[0] >> 16)
1779 1809 if startrev == endrev:
1780 1810 end = start + istart[1]
1781 1811 else:
1782 1812 iend = index[endrev]
1783 1813 end = int(iend[0] >> 16) + iend[1]
1784 1814
1785 1815 if self._inline:
1786 1816 start += (startrev + 1) * self.index.entry_size
1787 1817 end += (endrev + 1) * self.index.entry_size
1788 1818 length = end - start
1789 1819
1790 1820 return start, self._getsegment(start, length, df=df)
1791 1821
1792 1822 def _chunk(self, rev, df=None):
1793 1823 """Obtain a single decompressed chunk for a revision.
1794 1824
1795 1825 Accepts an integer revision and an optional already-open file handle
1796 1826 to be used for reading. If used, the seek position of the file will not
1797 1827 be preserved.
1798 1828
1799 1829 Returns a str holding uncompressed data for the requested revision.
1800 1830 """
1801 1831 compression_mode = self.index[rev][10]
1802 1832 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1803 1833 if compression_mode == COMP_MODE_PLAIN:
1804 1834 return data
1805 1835 elif compression_mode == COMP_MODE_DEFAULT:
1806 1836 return self._decompressor(data)
1807 1837 elif compression_mode == COMP_MODE_INLINE:
1808 1838 return self.decompress(data)
1809 1839 else:
1810 1840 msg = 'unknown compression mode %d'
1811 1841 msg %= compression_mode
1812 1842 raise error.RevlogError(msg)
1813 1843
1814 1844 def _chunks(self, revs, df=None, targetsize=None):
1815 1845 """Obtain decompressed chunks for the specified revisions.
1816 1846
1817 1847 Accepts an iterable of numeric revisions that are assumed to be in
1818 1848 ascending order. Also accepts an optional already-open file handle
1819 1849 to be used for reading. If used, the seek position of the file will
1820 1850 not be preserved.
1821 1851
1822 1852 This function is similar to calling ``self._chunk()`` multiple times,
1823 1853 but is faster.
1824 1854
1825 1855 Returns a list with decompressed data for each requested revision.
1826 1856 """
1827 1857 if not revs:
1828 1858 return []
1829 1859 start = self.start
1830 1860 length = self.length
1831 1861 inline = self._inline
1832 1862 iosize = self.index.entry_size
1833 1863 buffer = util.buffer
1834 1864
1835 1865 l = []
1836 1866 ladd = l.append
1837 1867
1838 1868 if not self._withsparseread:
1839 1869 slicedchunks = (revs,)
1840 1870 else:
1841 1871 slicedchunks = deltautil.slicechunk(
1842 1872 self, revs, targetsize=targetsize
1843 1873 )
1844 1874
1845 1875 for revschunk in slicedchunks:
1846 1876 firstrev = revschunk[0]
1847 1877 # Skip trailing revisions with empty diff
1848 1878 for lastrev in revschunk[::-1]:
1849 1879 if length(lastrev) != 0:
1850 1880 break
1851 1881
1852 1882 try:
1853 1883 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1854 1884 except OverflowError:
1855 1885 # issue4215 - we can't cache a run of chunks greater than
1856 1886 # 2G on Windows
1857 1887 return [self._chunk(rev, df=df) for rev in revschunk]
1858 1888
1859 1889 decomp = self.decompress
1860 1890 # self._decompressor might be None, but will not be used in that case
1861 1891 def_decomp = self._decompressor
1862 1892 for rev in revschunk:
1863 1893 chunkstart = start(rev)
1864 1894 if inline:
1865 1895 chunkstart += (rev + 1) * iosize
1866 1896 chunklength = length(rev)
1867 1897 comp_mode = self.index[rev][10]
1868 1898 c = buffer(data, chunkstart - offset, chunklength)
1869 1899 if comp_mode == COMP_MODE_PLAIN:
1870 1900 ladd(c)
1871 1901 elif comp_mode == COMP_MODE_INLINE:
1872 1902 ladd(decomp(c))
1873 1903 elif comp_mode == COMP_MODE_DEFAULT:
1874 1904 ladd(def_decomp(c))
1875 1905 else:
1876 1906 msg = 'unknown compression mode %d'
1877 1907 msg %= comp_mode
1878 1908 raise error.RevlogError(msg)
1879 1909
1880 1910 return l
1881 1911
1882 1912 def _chunkclear(self):
1883 1913 """Clear the raw chunk cache."""
1884 1914 self._chunkcache = (0, b'')
1885 1915
1886 1916 def deltaparent(self, rev):
1887 1917 """return deltaparent of the given revision"""
1888 1918 base = self.index[rev][3]
1889 1919 if base == rev:
1890 1920 return nullrev
1891 1921 elif self._generaldelta:
1892 1922 return base
1893 1923 else:
1894 1924 return rev - 1
1895 1925
1896 1926 def issnapshot(self, rev):
1897 1927 """tells whether rev is a snapshot"""
1898 1928 if not self._sparserevlog:
1899 1929 return self.deltaparent(rev) == nullrev
1900 1930 elif util.safehasattr(self.index, b'issnapshot'):
1901 1931 # directly assign the method to cache the testing and access
1902 1932 self.issnapshot = self.index.issnapshot
1903 1933 return self.issnapshot(rev)
1904 1934 if rev == nullrev:
1905 1935 return True
1906 1936 entry = self.index[rev]
1907 1937 base = entry[3]
1908 1938 if base == rev:
1909 1939 return True
1910 1940 if base == nullrev:
1911 1941 return True
1912 1942 p1 = entry[5]
1913 1943 p2 = entry[6]
1914 1944 if base == p1 or base == p2:
1915 1945 return False
1916 1946 return self.issnapshot(base)
1917 1947
1918 1948 def snapshotdepth(self, rev):
1919 1949 """number of snapshot in the chain before this one"""
1920 1950 if not self.issnapshot(rev):
1921 1951 raise error.ProgrammingError(b'revision %d not a snapshot')
1922 1952 return len(self._deltachain(rev)[0]) - 1
1923 1953
1924 1954 def revdiff(self, rev1, rev2):
1925 1955 """return or calculate a delta between two revisions
1926 1956
1927 1957 The delta calculated is in binary form and is intended to be written to
1928 1958 revlog data directly. So this function needs raw revision data.
1929 1959 """
1930 1960 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1931 1961 return bytes(self._chunk(rev2))
1932 1962
1933 1963 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1934 1964
1935 1965 def _processflags(self, text, flags, operation, raw=False):
1936 1966 """deprecated entry point to access flag processors"""
1937 1967 msg = b'_processflag(...) use the specialized variant'
1938 1968 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1939 1969 if raw:
1940 1970 return text, flagutil.processflagsraw(self, text, flags)
1941 1971 elif operation == b'read':
1942 1972 return flagutil.processflagsread(self, text, flags)
1943 1973 else: # write operation
1944 1974 return flagutil.processflagswrite(self, text, flags)
1945 1975
1946 1976 def revision(self, nodeorrev, _df=None, raw=False):
1947 1977 """return an uncompressed revision of a given node or revision
1948 1978 number.
1949 1979
1950 1980 _df - an existing file handle to read from. (internal-only)
1951 1981 raw - an optional argument specifying if the revision data is to be
1952 1982 treated as raw data when applying flag transforms. 'raw' should be set
1953 1983 to True when generating changegroups or in debug commands.
1954 1984 """
1955 1985 if raw:
1956 1986 msg = (
1957 1987 b'revlog.revision(..., raw=True) is deprecated, '
1958 1988 b'use revlog.rawdata(...)'
1959 1989 )
1960 1990 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1961 1991 return self._revisiondata(nodeorrev, _df, raw=raw)
1962 1992
1963 1993 def sidedata(self, nodeorrev, _df=None):
1964 1994 """a map of extra data related to the changeset but not part of the hash
1965 1995
1966 1996 This function currently return a dictionary. However, more advanced
1967 1997 mapping object will likely be used in the future for a more
1968 1998 efficient/lazy code.
1969 1999 """
1970 2000 # deal with <nodeorrev> argument type
1971 2001 if isinstance(nodeorrev, int):
1972 2002 rev = nodeorrev
1973 2003 else:
1974 2004 rev = self.rev(nodeorrev)
1975 2005 return self._sidedata(rev)
1976 2006
1977 2007 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1978 2008 # deal with <nodeorrev> argument type
1979 2009 if isinstance(nodeorrev, int):
1980 2010 rev = nodeorrev
1981 2011 node = self.node(rev)
1982 2012 else:
1983 2013 node = nodeorrev
1984 2014 rev = None
1985 2015
1986 2016 # fast path the special `nullid` rev
1987 2017 if node == self.nullid:
1988 2018 return b""
1989 2019
1990 2020 # ``rawtext`` is the text as stored inside the revlog. Might be the
1991 2021 # revision or might need to be processed to retrieve the revision.
1992 2022 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1993 2023
1994 2024 if raw and validated:
1995 2025 # if we don't want to process the raw text and that raw
1996 2026 # text is cached, we can exit early.
1997 2027 return rawtext
1998 2028 if rev is None:
1999 2029 rev = self.rev(node)
2000 2030 # the revlog's flag for this revision
2001 2031 # (usually alter its state or content)
2002 2032 flags = self.flags(rev)
2003 2033
2004 2034 if validated and flags == REVIDX_DEFAULT_FLAGS:
2005 2035 # no extra flags set, no flag processor runs, text = rawtext
2006 2036 return rawtext
2007 2037
2008 2038 if raw:
2009 2039 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2010 2040 text = rawtext
2011 2041 else:
2012 2042 r = flagutil.processflagsread(self, rawtext, flags)
2013 2043 text, validatehash = r
2014 2044 if validatehash:
2015 2045 self.checkhash(text, node, rev=rev)
2016 2046 if not validated:
2017 2047 self._revisioncache = (node, rev, rawtext)
2018 2048
2019 2049 return text
2020 2050
2021 2051 def _rawtext(self, node, rev, _df=None):
2022 2052 """return the possibly unvalidated rawtext for a revision
2023 2053
2024 2054 returns (rev, rawtext, validated)
2025 2055 """
2026 2056
2027 2057 # revision in the cache (could be useful to apply delta)
2028 2058 cachedrev = None
2029 2059 # An intermediate text to apply deltas to
2030 2060 basetext = None
2031 2061
2032 2062 # Check if we have the entry in cache
2033 2063 # The cache entry looks like (node, rev, rawtext)
2034 2064 if self._revisioncache:
2035 2065 if self._revisioncache[0] == node:
2036 2066 return (rev, self._revisioncache[2], True)
2037 2067 cachedrev = self._revisioncache[1]
2038 2068
2039 2069 if rev is None:
2040 2070 rev = self.rev(node)
2041 2071
2042 2072 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2043 2073 if stopped:
2044 2074 basetext = self._revisioncache[2]
2045 2075
2046 2076 # drop cache to save memory, the caller is expected to
2047 2077 # update self._revisioncache after validating the text
2048 2078 self._revisioncache = None
2049 2079
2050 2080 targetsize = None
2051 2081 rawsize = self.index[rev][2]
2052 2082 if 0 <= rawsize:
2053 2083 targetsize = 4 * rawsize
2054 2084
2055 2085 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2056 2086 if basetext is None:
2057 2087 basetext = bytes(bins[0])
2058 2088 bins = bins[1:]
2059 2089
2060 2090 rawtext = mdiff.patches(basetext, bins)
2061 2091 del basetext # let us have a chance to free memory early
2062 2092 return (rev, rawtext, False)
2063 2093
2064 2094 def _sidedata(self, rev):
2065 2095 """Return the sidedata for a given revision number."""
2066 2096 index_entry = self.index[rev]
2067 2097 sidedata_offset = index_entry[8]
2068 2098 sidedata_size = index_entry[9]
2069 2099
2070 2100 if self._inline:
2071 2101 sidedata_offset += self.index.entry_size * (1 + rev)
2072 2102 if sidedata_size == 0:
2073 2103 return {}
2074 2104
2075 2105 # XXX this need caching, as we do for data
2076 2106 with self._sidedatareadfp() as sdf:
2077 sdf.seek(sidedata_offset)
2107 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2108 filename = self._sidedatafile
2109 end = self._docket.sidedata_end
2110 offset = sidedata_offset
2111 length = sidedata_size
2112 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2113 raise error.RevlogError(m)
2114
2115 sdf.seek(sidedata_offset, os.SEEK_SET)
2078 2116 comp_segment = sdf.read(sidedata_size)
2079 2117
2080 2118 if len(comp_segment) < sidedata_size:
2081 filename = self._datafile
2119 filename = self._sidedatafile
2082 2120 length = sidedata_size
2083 2121 offset = sidedata_offset
2084 2122 got = len(comp_segment)
2085 2123 m = PARTIAL_READ_MSG % (filename, length, offset, got)
2086 2124 raise error.RevlogError(m)
2087 2125
2088 2126 comp = self.index[rev][11]
2089 2127 if comp == COMP_MODE_PLAIN:
2090 2128 segment = comp_segment
2091 2129 elif comp == COMP_MODE_DEFAULT:
2092 2130 segment = self._decompressor(comp_segment)
2093 2131 elif comp == COMP_MODE_INLINE:
2094 2132 segment = self.decompress(comp_segment)
2095 2133 else:
2096 2134 msg = 'unknown compression mode %d'
2097 2135 msg %= comp
2098 2136 raise error.RevlogError(msg)
2099 2137
2100 2138 sidedata = sidedatautil.deserialize_sidedata(segment)
2101 2139 return sidedata
2102 2140
2103 2141 def rawdata(self, nodeorrev, _df=None):
2104 2142 """return an uncompressed raw data of a given node or revision number.
2105 2143
2106 2144 _df - an existing file handle to read from. (internal-only)
2107 2145 """
2108 2146 return self._revisiondata(nodeorrev, _df, raw=True)
2109 2147
2110 2148 def hash(self, text, p1, p2):
2111 2149 """Compute a node hash.
2112 2150
2113 2151 Available as a function so that subclasses can replace the hash
2114 2152 as needed.
2115 2153 """
2116 2154 return storageutil.hashrevisionsha1(text, p1, p2)
2117 2155
2118 2156 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2119 2157 """Check node hash integrity.
2120 2158
2121 2159 Available as a function so that subclasses can extend hash mismatch
2122 2160 behaviors as needed.
2123 2161 """
2124 2162 try:
2125 2163 if p1 is None and p2 is None:
2126 2164 p1, p2 = self.parents(node)
2127 2165 if node != self.hash(text, p1, p2):
2128 2166 # Clear the revision cache on hash failure. The revision cache
2129 2167 # only stores the raw revision and clearing the cache does have
2130 2168 # the side-effect that we won't have a cache hit when the raw
2131 2169 # revision data is accessed. But this case should be rare and
2132 2170 # it is extra work to teach the cache about the hash
2133 2171 # verification state.
2134 2172 if self._revisioncache and self._revisioncache[0] == node:
2135 2173 self._revisioncache = None
2136 2174
2137 2175 revornode = rev
2138 2176 if revornode is None:
2139 2177 revornode = templatefilters.short(hex(node))
2140 2178 raise error.RevlogError(
2141 2179 _(b"integrity check failed on %s:%s")
2142 2180 % (self.display_id, pycompat.bytestr(revornode))
2143 2181 )
2144 2182 except error.RevlogError:
2145 2183 if self._censorable and storageutil.iscensoredtext(text):
2146 2184 raise error.CensoredNodeError(self.display_id, node, text)
2147 2185 raise
2148 2186
2149 2187 def _enforceinlinesize(self, tr):
2150 2188 """Check if the revlog is too big for inline and convert if so.
2151 2189
2152 2190 This should be called after revisions are added to the revlog. If the
2153 2191 revlog has grown too large to be an inline revlog, it will convert it
2154 2192 to use multiple index and data files.
2155 2193 """
2156 2194 tiprev = len(self) - 1
2157 2195 total_size = self.start(tiprev) + self.length(tiprev)
2158 2196 if not self._inline or total_size < _maxinline:
2159 2197 return
2160 2198
2161 2199 troffset = tr.findoffset(self._indexfile)
2162 2200 if troffset is None:
2163 2201 raise error.RevlogError(
2164 2202 _(b"%s not found in the transaction") % self._indexfile
2165 2203 )
2166 2204 trindex = 0
2167 2205 tr.add(self._datafile, 0)
2168 2206
2169 2207 existing_handles = False
2170 2208 if self._writinghandles is not None:
2171 2209 existing_handles = True
2172 2210 fp = self._writinghandles[0]
2173 2211 fp.flush()
2174 2212 fp.close()
2175 2213 # We can't use the cached file handle after close(). So prevent
2176 2214 # its usage.
2177 2215 self._writinghandles = None
2178 2216
2179 2217 new_dfh = self._datafp(b'w+')
2180 2218 new_dfh.truncate(0) # drop any potentially existing data
2181 2219 try:
2182 2220 with self._indexfp() as read_ifh:
2183 2221 for r in self:
2184 2222 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2185 2223 if troffset <= self.start(r) + r * self.index.entry_size:
2186 2224 trindex = r
2187 2225 new_dfh.flush()
2188 2226
2189 2227 with self.__index_new_fp() as fp:
2190 2228 self._format_flags &= ~FLAG_INLINE_DATA
2191 2229 self._inline = False
2192 2230 for i in self:
2193 2231 e = self.index.entry_binary(i)
2194 2232 if i == 0 and self._docket is None:
2195 2233 header = self._format_flags | self._format_version
2196 2234 header = self.index.pack_header(header)
2197 2235 e = header + e
2198 2236 fp.write(e)
2199 2237 if self._docket is not None:
2200 2238 self._docket.index_end = fp.tell()
2201 2239
2202 2240 # There is a small transactional race here. If the rename of
2203 2241 # the index fails, we should remove the datafile. It is more
2204 2242 # important to ensure that the data file is not truncated
2205 2243 # when the index is replaced as otherwise data is lost.
2206 2244 tr.replace(self._datafile, self.start(trindex))
2207 2245
2208 2246 # the temp file replace the real index when we exit the context
2209 2247 # manager
2210 2248
2211 2249 tr.replace(self._indexfile, trindex * self.index.entry_size)
2212 2250 nodemaputil.setup_persistent_nodemap(tr, self)
2213 2251 self._chunkclear()
2214 2252
2215 2253 if existing_handles:
2216 2254 # switched from inline to conventional reopen the index
2217 2255 ifh = self.__index_write_fp()
2218 self._writinghandles = (ifh, new_dfh)
2256 self._writinghandles = (ifh, new_dfh, None)
2219 2257 new_dfh = None
2220 2258 finally:
2221 2259 if new_dfh is not None:
2222 2260 new_dfh.close()
2223 2261
2224 2262 def _nodeduplicatecallback(self, transaction, node):
2225 2263 """called when trying to add a node already stored."""
2226 2264
2227 2265 @contextlib.contextmanager
2228 2266 def _writing(self, transaction):
2229 2267 if self._trypending:
2230 2268 msg = b'try to write in a `trypending` revlog: %s'
2231 2269 msg %= self.display_id
2232 2270 raise error.ProgrammingError(msg)
2233 2271 if self._writinghandles is not None:
2234 2272 yield
2235 2273 else:
2236 ifh = dfh = None
2274 ifh = dfh = sdfh = None
2237 2275 try:
2238 2276 r = len(self)
2239 2277 # opening the data file.
2240 2278 dsize = 0
2241 2279 if r:
2242 2280 dsize = self.end(r - 1)
2243 2281 dfh = None
2244 2282 if not self._inline:
2245 2283 try:
2246 2284 dfh = self._datafp(b"r+")
2247 2285 if self._docket is None:
2248 2286 dfh.seek(0, os.SEEK_END)
2249 2287 else:
2250 2288 dfh.seek(self._docket.data_end, os.SEEK_SET)
2251 2289 except IOError as inst:
2252 2290 if inst.errno != errno.ENOENT:
2253 2291 raise
2254 2292 dfh = self._datafp(b"w+")
2255 2293 transaction.add(self._datafile, dsize)
2294 if self._sidedatafile is not None:
2295 try:
2296 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2297 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2298 except IOError as inst:
2299 if inst.errno != errno.ENOENT:
2300 raise
2301 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2302 transaction.add(
2303 self._sidedatafile, self._docket.sidedata_end
2304 )
2256 2305
2257 2306 # opening the index file.
2258 2307 isize = r * self.index.entry_size
2259 2308 ifh = self.__index_write_fp()
2260 2309 if self._inline:
2261 2310 transaction.add(self._indexfile, dsize + isize)
2262 2311 else:
2263 2312 transaction.add(self._indexfile, isize)
2264 2313 # exposing all file handle for writing.
2265 self._writinghandles = (ifh, dfh)
2314 self._writinghandles = (ifh, dfh, sdfh)
2266 2315 yield
2267 2316 if self._docket is not None:
2268 2317 self._write_docket(transaction)
2269 2318 finally:
2270 2319 self._writinghandles = None
2271 2320 if dfh is not None:
2272 2321 dfh.close()
2322 if sdfh is not None:
2323 dfh.close()
2273 2324 # closing the index file last to avoid exposing referent to
2274 2325 # potential unflushed data content.
2275 2326 if ifh is not None:
2276 2327 ifh.close()
2277 2328
2278 2329 def _write_docket(self, transaction):
2279 2330 """write the current docket on disk
2280 2331
2281 2332 Exist as a method to help changelog to implement transaction logic
2282 2333
2283 2334 We could also imagine using the same transaction logic for all revlog
2284 2335 since docket are cheap."""
2285 2336 self._docket.write(transaction)
2286 2337
2287 2338 def addrevision(
2288 2339 self,
2289 2340 text,
2290 2341 transaction,
2291 2342 link,
2292 2343 p1,
2293 2344 p2,
2294 2345 cachedelta=None,
2295 2346 node=None,
2296 2347 flags=REVIDX_DEFAULT_FLAGS,
2297 2348 deltacomputer=None,
2298 2349 sidedata=None,
2299 2350 ):
2300 2351 """add a revision to the log
2301 2352
2302 2353 text - the revision data to add
2303 2354 transaction - the transaction object used for rollback
2304 2355 link - the linkrev data to add
2305 2356 p1, p2 - the parent nodeids of the revision
2306 2357 cachedelta - an optional precomputed delta
2307 2358 node - nodeid of revision; typically node is not specified, and it is
2308 2359 computed by default as hash(text, p1, p2), however subclasses might
2309 2360 use different hashing method (and override checkhash() in such case)
2310 2361 flags - the known flags to set on the revision
2311 2362 deltacomputer - an optional deltacomputer instance shared between
2312 2363 multiple calls
2313 2364 """
2314 2365 if link == nullrev:
2315 2366 raise error.RevlogError(
2316 2367 _(b"attempted to add linkrev -1 to %s") % self.display_id
2317 2368 )
2318 2369
2319 2370 if sidedata is None:
2320 2371 sidedata = {}
2321 2372 elif sidedata and not self.hassidedata:
2322 2373 raise error.ProgrammingError(
2323 2374 _(b"trying to add sidedata to a revlog who don't support them")
2324 2375 )
2325 2376
2326 2377 if flags:
2327 2378 node = node or self.hash(text, p1, p2)
2328 2379
2329 2380 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2330 2381
2331 2382 # If the flag processor modifies the revision data, ignore any provided
2332 2383 # cachedelta.
2333 2384 if rawtext != text:
2334 2385 cachedelta = None
2335 2386
2336 2387 if len(rawtext) > _maxentrysize:
2337 2388 raise error.RevlogError(
2338 2389 _(
2339 2390 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2340 2391 )
2341 2392 % (self.display_id, len(rawtext))
2342 2393 )
2343 2394
2344 2395 node = node or self.hash(rawtext, p1, p2)
2345 2396 rev = self.index.get_rev(node)
2346 2397 if rev is not None:
2347 2398 return rev
2348 2399
2349 2400 if validatehash:
2350 2401 self.checkhash(rawtext, node, p1=p1, p2=p2)
2351 2402
2352 2403 return self.addrawrevision(
2353 2404 rawtext,
2354 2405 transaction,
2355 2406 link,
2356 2407 p1,
2357 2408 p2,
2358 2409 node,
2359 2410 flags,
2360 2411 cachedelta=cachedelta,
2361 2412 deltacomputer=deltacomputer,
2362 2413 sidedata=sidedata,
2363 2414 )
2364 2415
2365 2416 def addrawrevision(
2366 2417 self,
2367 2418 rawtext,
2368 2419 transaction,
2369 2420 link,
2370 2421 p1,
2371 2422 p2,
2372 2423 node,
2373 2424 flags,
2374 2425 cachedelta=None,
2375 2426 deltacomputer=None,
2376 2427 sidedata=None,
2377 2428 ):
2378 2429 """add a raw revision with known flags, node and parents
2379 2430 useful when reusing a revision not stored in this revlog (ex: received
2380 2431 over wire, or read from an external bundle).
2381 2432 """
2382 2433 with self._writing(transaction):
2383 2434 return self._addrevision(
2384 2435 node,
2385 2436 rawtext,
2386 2437 transaction,
2387 2438 link,
2388 2439 p1,
2389 2440 p2,
2390 2441 flags,
2391 2442 cachedelta,
2392 2443 deltacomputer=deltacomputer,
2393 2444 sidedata=sidedata,
2394 2445 )
2395 2446
2396 2447 def compress(self, data):
2397 2448 """Generate a possibly-compressed representation of data."""
2398 2449 if not data:
2399 2450 return b'', data
2400 2451
2401 2452 compressed = self._compressor.compress(data)
2402 2453
2403 2454 if compressed:
2404 2455 # The revlog compressor added the header in the returned data.
2405 2456 return b'', compressed
2406 2457
2407 2458 if data[0:1] == b'\0':
2408 2459 return b'', data
2409 2460 return b'u', data
2410 2461
2411 2462 def decompress(self, data):
2412 2463 """Decompress a revlog chunk.
2413 2464
2414 2465 The chunk is expected to begin with a header identifying the
2415 2466 format type so it can be routed to an appropriate decompressor.
2416 2467 """
2417 2468 if not data:
2418 2469 return data
2419 2470
2420 2471 # Revlogs are read much more frequently than they are written and many
2421 2472 # chunks only take microseconds to decompress, so performance is
2422 2473 # important here.
2423 2474 #
2424 2475 # We can make a few assumptions about revlogs:
2425 2476 #
2426 2477 # 1) the majority of chunks will be compressed (as opposed to inline
2427 2478 # raw data).
2428 2479 # 2) decompressing *any* data will likely by at least 10x slower than
2429 2480 # returning raw inline data.
2430 2481 # 3) we want to prioritize common and officially supported compression
2431 2482 # engines
2432 2483 #
2433 2484 # It follows that we want to optimize for "decompress compressed data
2434 2485 # when encoded with common and officially supported compression engines"
2435 2486 # case over "raw data" and "data encoded by less common or non-official
2436 2487 # compression engines." That is why we have the inline lookup first
2437 2488 # followed by the compengines lookup.
2438 2489 #
2439 2490 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2440 2491 # compressed chunks. And this matters for changelog and manifest reads.
2441 2492 t = data[0:1]
2442 2493
2443 2494 if t == b'x':
2444 2495 try:
2445 2496 return _zlibdecompress(data)
2446 2497 except zlib.error as e:
2447 2498 raise error.RevlogError(
2448 2499 _(b'revlog decompress error: %s')
2449 2500 % stringutil.forcebytestr(e)
2450 2501 )
2451 2502 # '\0' is more common than 'u' so it goes first.
2452 2503 elif t == b'\0':
2453 2504 return data
2454 2505 elif t == b'u':
2455 2506 return util.buffer(data, 1)
2456 2507
2457 2508 compressor = self._get_decompressor(t)
2458 2509
2459 2510 return compressor.decompress(data)
2460 2511
2461 2512 def _addrevision(
2462 2513 self,
2463 2514 node,
2464 2515 rawtext,
2465 2516 transaction,
2466 2517 link,
2467 2518 p1,
2468 2519 p2,
2469 2520 flags,
2470 2521 cachedelta,
2471 2522 alwayscache=False,
2472 2523 deltacomputer=None,
2473 2524 sidedata=None,
2474 2525 ):
2475 2526 """internal function to add revisions to the log
2476 2527
2477 2528 see addrevision for argument descriptions.
2478 2529
2479 2530 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2480 2531
2481 2532 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2482 2533 be used.
2483 2534
2484 2535 invariants:
2485 2536 - rawtext is optional (can be None); if not set, cachedelta must be set.
2486 2537 if both are set, they must correspond to each other.
2487 2538 """
2488 2539 if node == self.nullid:
2489 2540 raise error.RevlogError(
2490 2541 _(b"%s: attempt to add null revision") % self.display_id
2491 2542 )
2492 2543 if (
2493 2544 node == self.nodeconstants.wdirid
2494 2545 or node in self.nodeconstants.wdirfilenodeids
2495 2546 ):
2496 2547 raise error.RevlogError(
2497 2548 _(b"%s: attempt to add wdir revision") % self.display_id
2498 2549 )
2499 2550 if self._writinghandles is None:
2500 2551 msg = b'adding revision outside `revlog._writing` context'
2501 2552 raise error.ProgrammingError(msg)
2502 2553
2503 2554 if self._inline:
2504 2555 fh = self._writinghandles[0]
2505 2556 else:
2506 2557 fh = self._writinghandles[1]
2507 2558
2508 2559 btext = [rawtext]
2509 2560
2510 2561 curr = len(self)
2511 2562 prev = curr - 1
2512 2563
2513 2564 offset = self._get_data_offset(prev)
2514 2565
2515 2566 if self._concurrencychecker:
2516 ifh, dfh = self._writinghandles
2567 ifh, dfh, sdfh = self._writinghandles
2568 # XXX no checking for the sidedata file
2517 2569 if self._inline:
2518 2570 # offset is "as if" it were in the .d file, so we need to add on
2519 2571 # the size of the entry metadata.
2520 2572 self._concurrencychecker(
2521 2573 ifh, self._indexfile, offset + curr * self.index.entry_size
2522 2574 )
2523 2575 else:
2524 2576 # Entries in the .i are a consistent size.
2525 2577 self._concurrencychecker(
2526 2578 ifh, self._indexfile, curr * self.index.entry_size
2527 2579 )
2528 2580 self._concurrencychecker(dfh, self._datafile, offset)
2529 2581
2530 2582 p1r, p2r = self.rev(p1), self.rev(p2)
2531 2583
2532 2584 # full versions are inserted when the needed deltas
2533 2585 # become comparable to the uncompressed text
2534 2586 if rawtext is None:
2535 2587 # need rawtext size, before changed by flag processors, which is
2536 2588 # the non-raw size. use revlog explicitly to avoid filelog's extra
2537 2589 # logic that might remove metadata size.
2538 2590 textlen = mdiff.patchedsize(
2539 2591 revlog.size(self, cachedelta[0]), cachedelta[1]
2540 2592 )
2541 2593 else:
2542 2594 textlen = len(rawtext)
2543 2595
2544 2596 if deltacomputer is None:
2545 2597 deltacomputer = deltautil.deltacomputer(self)
2546 2598
2547 2599 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2548 2600
2549 2601 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2550 2602
2551 2603 compression_mode = COMP_MODE_INLINE
2552 2604 if self._docket is not None:
2553 2605 h, d = deltainfo.data
2554 2606 if not h and not d:
2555 2607 # not data to store at all... declare them uncompressed
2556 2608 compression_mode = COMP_MODE_PLAIN
2557 2609 elif not h:
2558 2610 t = d[0:1]
2559 2611 if t == b'\0':
2560 2612 compression_mode = COMP_MODE_PLAIN
2561 2613 elif t == self._docket.default_compression_header:
2562 2614 compression_mode = COMP_MODE_DEFAULT
2563 2615 elif h == b'u':
2564 2616 # we have a more efficient way to declare uncompressed
2565 2617 h = b''
2566 2618 compression_mode = COMP_MODE_PLAIN
2567 2619 deltainfo = deltautil.drop_u_compression(deltainfo)
2568 2620
2569 2621 sidedata_compression_mode = COMP_MODE_INLINE
2570 2622 if sidedata and self.hassidedata:
2571 2623 sidedata_compression_mode = COMP_MODE_PLAIN
2572 2624 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2573 sidedata_offset = offset + deltainfo.deltalen
2625 sidedata_offset = self._docket.sidedata_end
2574 2626 h, comp_sidedata = self.compress(serialized_sidedata)
2575 2627 if (
2576 2628 h != b'u'
2577 2629 and comp_sidedata[0:1] != b'\0'
2578 2630 and len(comp_sidedata) < len(serialized_sidedata)
2579 2631 ):
2580 2632 assert not h
2581 2633 if (
2582 2634 comp_sidedata[0:1]
2583 2635 == self._docket.default_compression_header
2584 2636 ):
2585 2637 sidedata_compression_mode = COMP_MODE_DEFAULT
2586 2638 serialized_sidedata = comp_sidedata
2587 2639 else:
2588 2640 sidedata_compression_mode = COMP_MODE_INLINE
2589 2641 serialized_sidedata = comp_sidedata
2590 2642 else:
2591 2643 serialized_sidedata = b""
2592 2644 # Don't store the offset if the sidedata is empty, that way
2593 2645 # we can easily detect empty sidedata and they will be no different
2594 2646 # than ones we manually add.
2595 2647 sidedata_offset = 0
2596 2648
2597 2649 e = (
2598 2650 offset_type(offset, flags),
2599 2651 deltainfo.deltalen,
2600 2652 textlen,
2601 2653 deltainfo.base,
2602 2654 link,
2603 2655 p1r,
2604 2656 p2r,
2605 2657 node,
2606 2658 sidedata_offset,
2607 2659 len(serialized_sidedata),
2608 2660 compression_mode,
2609 2661 sidedata_compression_mode,
2610 2662 )
2611 2663
2612 2664 self.index.append(e)
2613 2665 entry = self.index.entry_binary(curr)
2614 2666 if curr == 0 and self._docket is None:
2615 2667 header = self._format_flags | self._format_version
2616 2668 header = self.index.pack_header(header)
2617 2669 entry = header + entry
2618 2670 self._writeentry(
2619 2671 transaction,
2620 2672 entry,
2621 2673 deltainfo.data,
2622 2674 link,
2623 2675 offset,
2624 2676 serialized_sidedata,
2677 sidedata_offset,
2625 2678 )
2626 2679
2627 2680 rawtext = btext[0]
2628 2681
2629 2682 if alwayscache and rawtext is None:
2630 2683 rawtext = deltacomputer.buildtext(revinfo, fh)
2631 2684
2632 2685 if type(rawtext) == bytes: # only accept immutable objects
2633 2686 self._revisioncache = (node, curr, rawtext)
2634 2687 self._chainbasecache[curr] = deltainfo.chainbase
2635 2688 return curr
2636 2689
2637 2690 def _get_data_offset(self, prev):
2638 2691 """Returns the current offset in the (in-transaction) data file.
2639 2692 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2640 2693 file to store that information: since sidedata can be rewritten to the
2641 2694 end of the data file within a transaction, you can have cases where, for
2642 2695 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2643 2696 to `n - 1`'s sidedata being written after `n`'s data.
2644 2697
2645 2698 TODO cache this in a docket file before getting out of experimental."""
2646 2699 if self._docket is None:
2647 2700 return self.end(prev)
2648 2701 else:
2649 2702 return self._docket.data_end
2650 2703
2651 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2704 def _writeentry(
2705 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2706 ):
2652 2707 # Files opened in a+ mode have inconsistent behavior on various
2653 2708 # platforms. Windows requires that a file positioning call be made
2654 2709 # when the file handle transitions between reads and writes. See
2655 2710 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2656 2711 # platforms, Python or the platform itself can be buggy. Some versions
2657 2712 # of Solaris have been observed to not append at the end of the file
2658 2713 # if the file was seeked to before the end. See issue4943 for more.
2659 2714 #
2660 2715 # We work around this issue by inserting a seek() before writing.
2661 2716 # Note: This is likely not necessary on Python 3. However, because
2662 2717 # the file handle is reused for reads and may be seeked there, we need
2663 2718 # to be careful before changing this.
2664 2719 if self._writinghandles is None:
2665 2720 msg = b'adding revision outside `revlog._writing` context'
2666 2721 raise error.ProgrammingError(msg)
2667 ifh, dfh = self._writinghandles
2722 ifh, dfh, sdfh = self._writinghandles
2668 2723 if self._docket is None:
2669 2724 ifh.seek(0, os.SEEK_END)
2670 2725 else:
2671 2726 ifh.seek(self._docket.index_end, os.SEEK_SET)
2672 2727 if dfh:
2673 2728 if self._docket is None:
2674 2729 dfh.seek(0, os.SEEK_END)
2675 2730 else:
2676 2731 dfh.seek(self._docket.data_end, os.SEEK_SET)
2732 if sdfh:
2733 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2677 2734
2678 2735 curr = len(self) - 1
2679 2736 if not self._inline:
2680 2737 transaction.add(self._datafile, offset)
2738 if self._sidedatafile:
2739 transaction.add(self._sidedatafile, sidedata_offset)
2681 2740 transaction.add(self._indexfile, curr * len(entry))
2682 2741 if data[0]:
2683 2742 dfh.write(data[0])
2684 2743 dfh.write(data[1])
2685 2744 if sidedata:
2686 dfh.write(sidedata)
2745 sdfh.write(sidedata)
2687 2746 ifh.write(entry)
2688 2747 else:
2689 2748 offset += curr * self.index.entry_size
2690 2749 transaction.add(self._indexfile, offset)
2691 2750 ifh.write(entry)
2692 2751 ifh.write(data[0])
2693 2752 ifh.write(data[1])
2694 if sidedata:
2695 ifh.write(sidedata)
2753 assert not sidedata
2696 2754 self._enforceinlinesize(transaction)
2697 2755 if self._docket is not None:
2698 2756 self._docket.index_end = self._writinghandles[0].tell()
2699 2757 self._docket.data_end = self._writinghandles[1].tell()
2758 self._docket.sidedata_end = self._writinghandles[2].tell()
2700 2759
2701 2760 nodemaputil.setup_persistent_nodemap(transaction, self)
2702 2761
2703 2762 def addgroup(
2704 2763 self,
2705 2764 deltas,
2706 2765 linkmapper,
2707 2766 transaction,
2708 2767 alwayscache=False,
2709 2768 addrevisioncb=None,
2710 2769 duplicaterevisioncb=None,
2711 2770 ):
2712 2771 """
2713 2772 add a delta group
2714 2773
2715 2774 given a set of deltas, add them to the revision log. the
2716 2775 first delta is against its parent, which should be in our
2717 2776 log, the rest are against the previous delta.
2718 2777
2719 2778 If ``addrevisioncb`` is defined, it will be called with arguments of
2720 2779 this revlog and the node that was added.
2721 2780 """
2722 2781
2723 2782 if self._adding_group:
2724 2783 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2725 2784
2726 2785 self._adding_group = True
2727 2786 empty = True
2728 2787 try:
2729 2788 with self._writing(transaction):
2730 2789 deltacomputer = deltautil.deltacomputer(self)
2731 2790 # loop through our set of deltas
2732 2791 for data in deltas:
2733 2792 (
2734 2793 node,
2735 2794 p1,
2736 2795 p2,
2737 2796 linknode,
2738 2797 deltabase,
2739 2798 delta,
2740 2799 flags,
2741 2800 sidedata,
2742 2801 ) = data
2743 2802 link = linkmapper(linknode)
2744 2803 flags = flags or REVIDX_DEFAULT_FLAGS
2745 2804
2746 2805 rev = self.index.get_rev(node)
2747 2806 if rev is not None:
2748 2807 # this can happen if two branches make the same change
2749 2808 self._nodeduplicatecallback(transaction, rev)
2750 2809 if duplicaterevisioncb:
2751 2810 duplicaterevisioncb(self, rev)
2752 2811 empty = False
2753 2812 continue
2754 2813
2755 2814 for p in (p1, p2):
2756 2815 if not self.index.has_node(p):
2757 2816 raise error.LookupError(
2758 2817 p, self.radix, _(b'unknown parent')
2759 2818 )
2760 2819
2761 2820 if not self.index.has_node(deltabase):
2762 2821 raise error.LookupError(
2763 2822 deltabase, self.display_id, _(b'unknown delta base')
2764 2823 )
2765 2824
2766 2825 baserev = self.rev(deltabase)
2767 2826
2768 2827 if baserev != nullrev and self.iscensored(baserev):
2769 2828 # if base is censored, delta must be full replacement in a
2770 2829 # single patch operation
2771 2830 hlen = struct.calcsize(b">lll")
2772 2831 oldlen = self.rawsize(baserev)
2773 2832 newlen = len(delta) - hlen
2774 2833 if delta[:hlen] != mdiff.replacediffheader(
2775 2834 oldlen, newlen
2776 2835 ):
2777 2836 raise error.CensoredBaseError(
2778 2837 self.display_id, self.node(baserev)
2779 2838 )
2780 2839
2781 2840 if not flags and self._peek_iscensored(baserev, delta):
2782 2841 flags |= REVIDX_ISCENSORED
2783 2842
2784 2843 # We assume consumers of addrevisioncb will want to retrieve
2785 2844 # the added revision, which will require a call to
2786 2845 # revision(). revision() will fast path if there is a cache
2787 2846 # hit. So, we tell _addrevision() to always cache in this case.
2788 2847 # We're only using addgroup() in the context of changegroup
2789 2848 # generation so the revision data can always be handled as raw
2790 2849 # by the flagprocessor.
2791 2850 rev = self._addrevision(
2792 2851 node,
2793 2852 None,
2794 2853 transaction,
2795 2854 link,
2796 2855 p1,
2797 2856 p2,
2798 2857 flags,
2799 2858 (baserev, delta),
2800 2859 alwayscache=alwayscache,
2801 2860 deltacomputer=deltacomputer,
2802 2861 sidedata=sidedata,
2803 2862 )
2804 2863
2805 2864 if addrevisioncb:
2806 2865 addrevisioncb(self, rev)
2807 2866 empty = False
2808 2867 finally:
2809 2868 self._adding_group = False
2810 2869 return not empty
2811 2870
2812 2871 def iscensored(self, rev):
2813 2872 """Check if a file revision is censored."""
2814 2873 if not self._censorable:
2815 2874 return False
2816 2875
2817 2876 return self.flags(rev) & REVIDX_ISCENSORED
2818 2877
2819 2878 def _peek_iscensored(self, baserev, delta):
2820 2879 """Quickly check if a delta produces a censored revision."""
2821 2880 if not self._censorable:
2822 2881 return False
2823 2882
2824 2883 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2825 2884
2826 2885 def getstrippoint(self, minlink):
2827 2886 """find the minimum rev that must be stripped to strip the linkrev
2828 2887
2829 2888 Returns a tuple containing the minimum rev and a set of all revs that
2830 2889 have linkrevs that will be broken by this strip.
2831 2890 """
2832 2891 return storageutil.resolvestripinfo(
2833 2892 minlink,
2834 2893 len(self) - 1,
2835 2894 self.headrevs(),
2836 2895 self.linkrev,
2837 2896 self.parentrevs,
2838 2897 )
2839 2898
2840 2899 def strip(self, minlink, transaction):
2841 2900 """truncate the revlog on the first revision with a linkrev >= minlink
2842 2901
2843 2902 This function is called when we're stripping revision minlink and
2844 2903 its descendants from the repository.
2845 2904
2846 2905 We have to remove all revisions with linkrev >= minlink, because
2847 2906 the equivalent changelog revisions will be renumbered after the
2848 2907 strip.
2849 2908
2850 2909 So we truncate the revlog on the first of these revisions, and
2851 2910 trust that the caller has saved the revisions that shouldn't be
2852 2911 removed and that it'll re-add them after this truncation.
2853 2912 """
2854 2913 if len(self) == 0:
2855 2914 return
2856 2915
2857 2916 rev, _ = self.getstrippoint(minlink)
2858 2917 if rev == len(self):
2859 2918 return
2860 2919
2861 2920 # first truncate the files on disk
2862 2921 data_end = self.start(rev)
2863 2922 if not self._inline:
2864 2923 transaction.add(self._datafile, data_end)
2865 2924 end = rev * self.index.entry_size
2866 2925 else:
2867 2926 end = data_end + (rev * self.index.entry_size)
2868 2927
2928 if self._sidedatafile:
2929 sidedata_end = self.sidedata_cut_off(rev)
2930 transaction.add(self._sidedatafile, sidedata_end)
2931
2869 2932 transaction.add(self._indexfile, end)
2870 2933 if self._docket is not None:
2871 2934 # XXX we could, leverage the docket while stripping. However it is
2872 2935 # not powerfull enough at the time of this comment
2873 2936 self._docket.index_end = end
2874 2937 self._docket.data_end = data_end
2938 self._docket.sidedata_end = sidedata_end
2875 2939 self._docket.write(transaction, stripping=True)
2876 2940
2877 2941 # then reset internal state in memory to forget those revisions
2878 2942 self._revisioncache = None
2879 2943 self._chaininfocache = util.lrucachedict(500)
2880 2944 self._chunkclear()
2881 2945
2882 2946 del self.index[rev:-1]
2883 2947
2884 2948 def checksize(self):
2885 2949 """Check size of index and data files
2886 2950
2887 2951 return a (dd, di) tuple.
2888 2952 - dd: extra bytes for the "data" file
2889 2953 - di: extra bytes for the "index" file
2890 2954
2891 2955 A healthy revlog will return (0, 0).
2892 2956 """
2893 2957 expected = 0
2894 2958 if len(self):
2895 2959 expected = max(0, self.end(len(self) - 1))
2896 2960
2897 2961 try:
2898 2962 with self._datafp() as f:
2899 2963 f.seek(0, io.SEEK_END)
2900 2964 actual = f.tell()
2901 2965 dd = actual - expected
2902 2966 except IOError as inst:
2903 2967 if inst.errno != errno.ENOENT:
2904 2968 raise
2905 2969 dd = 0
2906 2970
2907 2971 try:
2908 2972 f = self.opener(self._indexfile)
2909 2973 f.seek(0, io.SEEK_END)
2910 2974 actual = f.tell()
2911 2975 f.close()
2912 2976 s = self.index.entry_size
2913 2977 i = max(0, actual // s)
2914 2978 di = actual - (i * s)
2915 2979 if self._inline:
2916 2980 databytes = 0
2917 2981 for r in self:
2918 2982 databytes += max(0, self.length(r))
2919 2983 dd = 0
2920 2984 di = actual - len(self) * s - databytes
2921 2985 except IOError as inst:
2922 2986 if inst.errno != errno.ENOENT:
2923 2987 raise
2924 2988 di = 0
2925 2989
2926 2990 return (dd, di)
2927 2991
2928 2992 def files(self):
2929 2993 res = [self._indexfile]
2930 2994 if not self._inline:
2931 2995 res.append(self._datafile)
2932 2996 return res
2933 2997
2934 2998 def emitrevisions(
2935 2999 self,
2936 3000 nodes,
2937 3001 nodesorder=None,
2938 3002 revisiondata=False,
2939 3003 assumehaveparentrevisions=False,
2940 3004 deltamode=repository.CG_DELTAMODE_STD,
2941 3005 sidedata_helpers=None,
2942 3006 ):
2943 3007 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2944 3008 raise error.ProgrammingError(
2945 3009 b'unhandled value for nodesorder: %s' % nodesorder
2946 3010 )
2947 3011
2948 3012 if nodesorder is None and not self._generaldelta:
2949 3013 nodesorder = b'storage'
2950 3014
2951 3015 if (
2952 3016 not self._storedeltachains
2953 3017 and deltamode != repository.CG_DELTAMODE_PREV
2954 3018 ):
2955 3019 deltamode = repository.CG_DELTAMODE_FULL
2956 3020
2957 3021 return storageutil.emitrevisions(
2958 3022 self,
2959 3023 nodes,
2960 3024 nodesorder,
2961 3025 revlogrevisiondelta,
2962 3026 deltaparentfn=self.deltaparent,
2963 3027 candeltafn=self.candelta,
2964 3028 rawsizefn=self.rawsize,
2965 3029 revdifffn=self.revdiff,
2966 3030 flagsfn=self.flags,
2967 3031 deltamode=deltamode,
2968 3032 revisiondata=revisiondata,
2969 3033 assumehaveparentrevisions=assumehaveparentrevisions,
2970 3034 sidedata_helpers=sidedata_helpers,
2971 3035 )
2972 3036
2973 3037 DELTAREUSEALWAYS = b'always'
2974 3038 DELTAREUSESAMEREVS = b'samerevs'
2975 3039 DELTAREUSENEVER = b'never'
2976 3040
2977 3041 DELTAREUSEFULLADD = b'fulladd'
2978 3042
2979 3043 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2980 3044
2981 3045 def clone(
2982 3046 self,
2983 3047 tr,
2984 3048 destrevlog,
2985 3049 addrevisioncb=None,
2986 3050 deltareuse=DELTAREUSESAMEREVS,
2987 3051 forcedeltabothparents=None,
2988 3052 sidedata_helpers=None,
2989 3053 ):
2990 3054 """Copy this revlog to another, possibly with format changes.
2991 3055
2992 3056 The destination revlog will contain the same revisions and nodes.
2993 3057 However, it may not be bit-for-bit identical due to e.g. delta encoding
2994 3058 differences.
2995 3059
2996 3060 The ``deltareuse`` argument control how deltas from the existing revlog
2997 3061 are preserved in the destination revlog. The argument can have the
2998 3062 following values:
2999 3063
3000 3064 DELTAREUSEALWAYS
3001 3065 Deltas will always be reused (if possible), even if the destination
3002 3066 revlog would not select the same revisions for the delta. This is the
3003 3067 fastest mode of operation.
3004 3068 DELTAREUSESAMEREVS
3005 3069 Deltas will be reused if the destination revlog would pick the same
3006 3070 revisions for the delta. This mode strikes a balance between speed
3007 3071 and optimization.
3008 3072 DELTAREUSENEVER
3009 3073 Deltas will never be reused. This is the slowest mode of execution.
3010 3074 This mode can be used to recompute deltas (e.g. if the diff/delta
3011 3075 algorithm changes).
3012 3076 DELTAREUSEFULLADD
3013 3077 Revision will be re-added as if their were new content. This is
3014 3078 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3015 3079 eg: large file detection and handling.
3016 3080
3017 3081 Delta computation can be slow, so the choice of delta reuse policy can
3018 3082 significantly affect run time.
3019 3083
3020 3084 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3021 3085 two extremes. Deltas will be reused if they are appropriate. But if the
3022 3086 delta could choose a better revision, it will do so. This means if you
3023 3087 are converting a non-generaldelta revlog to a generaldelta revlog,
3024 3088 deltas will be recomputed if the delta's parent isn't a parent of the
3025 3089 revision.
3026 3090
3027 3091 In addition to the delta policy, the ``forcedeltabothparents``
3028 3092 argument controls whether to force compute deltas against both parents
3029 3093 for merges. By default, the current default is used.
3030 3094
3031 3095 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3032 3096 `sidedata_helpers`.
3033 3097 """
3034 3098 if deltareuse not in self.DELTAREUSEALL:
3035 3099 raise ValueError(
3036 3100 _(b'value for deltareuse invalid: %s') % deltareuse
3037 3101 )
3038 3102
3039 3103 if len(destrevlog):
3040 3104 raise ValueError(_(b'destination revlog is not empty'))
3041 3105
3042 3106 if getattr(self, 'filteredrevs', None):
3043 3107 raise ValueError(_(b'source revlog has filtered revisions'))
3044 3108 if getattr(destrevlog, 'filteredrevs', None):
3045 3109 raise ValueError(_(b'destination revlog has filtered revisions'))
3046 3110
3047 3111 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3048 3112 # if possible.
3049 3113 oldlazydelta = destrevlog._lazydelta
3050 3114 oldlazydeltabase = destrevlog._lazydeltabase
3051 3115 oldamd = destrevlog._deltabothparents
3052 3116
3053 3117 try:
3054 3118 if deltareuse == self.DELTAREUSEALWAYS:
3055 3119 destrevlog._lazydeltabase = True
3056 3120 destrevlog._lazydelta = True
3057 3121 elif deltareuse == self.DELTAREUSESAMEREVS:
3058 3122 destrevlog._lazydeltabase = False
3059 3123 destrevlog._lazydelta = True
3060 3124 elif deltareuse == self.DELTAREUSENEVER:
3061 3125 destrevlog._lazydeltabase = False
3062 3126 destrevlog._lazydelta = False
3063 3127
3064 3128 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3065 3129
3066 3130 self._clone(
3067 3131 tr,
3068 3132 destrevlog,
3069 3133 addrevisioncb,
3070 3134 deltareuse,
3071 3135 forcedeltabothparents,
3072 3136 sidedata_helpers,
3073 3137 )
3074 3138
3075 3139 finally:
3076 3140 destrevlog._lazydelta = oldlazydelta
3077 3141 destrevlog._lazydeltabase = oldlazydeltabase
3078 3142 destrevlog._deltabothparents = oldamd
3079 3143
3080 3144 def _clone(
3081 3145 self,
3082 3146 tr,
3083 3147 destrevlog,
3084 3148 addrevisioncb,
3085 3149 deltareuse,
3086 3150 forcedeltabothparents,
3087 3151 sidedata_helpers,
3088 3152 ):
3089 3153 """perform the core duty of `revlog.clone` after parameter processing"""
3090 3154 deltacomputer = deltautil.deltacomputer(destrevlog)
3091 3155 index = self.index
3092 3156 for rev in self:
3093 3157 entry = index[rev]
3094 3158
3095 3159 # Some classes override linkrev to take filtered revs into
3096 3160 # account. Use raw entry from index.
3097 3161 flags = entry[0] & 0xFFFF
3098 3162 linkrev = entry[4]
3099 3163 p1 = index[entry[5]][7]
3100 3164 p2 = index[entry[6]][7]
3101 3165 node = entry[7]
3102 3166
3103 3167 # (Possibly) reuse the delta from the revlog if allowed and
3104 3168 # the revlog chunk is a delta.
3105 3169 cachedelta = None
3106 3170 rawtext = None
3107 3171 if deltareuse == self.DELTAREUSEFULLADD:
3108 3172 text = self._revisiondata(rev)
3109 3173 sidedata = self.sidedata(rev)
3110 3174
3111 3175 if sidedata_helpers is not None:
3112 3176 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3113 3177 self, sidedata_helpers, sidedata, rev
3114 3178 )
3115 3179 flags = flags | new_flags[0] & ~new_flags[1]
3116 3180
3117 3181 destrevlog.addrevision(
3118 3182 text,
3119 3183 tr,
3120 3184 linkrev,
3121 3185 p1,
3122 3186 p2,
3123 3187 cachedelta=cachedelta,
3124 3188 node=node,
3125 3189 flags=flags,
3126 3190 deltacomputer=deltacomputer,
3127 3191 sidedata=sidedata,
3128 3192 )
3129 3193 else:
3130 3194 if destrevlog._lazydelta:
3131 3195 dp = self.deltaparent(rev)
3132 3196 if dp != nullrev:
3133 3197 cachedelta = (dp, bytes(self._chunk(rev)))
3134 3198
3135 3199 sidedata = None
3136 3200 if not cachedelta:
3137 3201 rawtext = self._revisiondata(rev)
3138 3202 sidedata = self.sidedata(rev)
3139 3203 if sidedata is None:
3140 3204 sidedata = self.sidedata(rev)
3141 3205
3142 3206 if sidedata_helpers is not None:
3143 3207 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3144 3208 self, sidedata_helpers, sidedata, rev
3145 3209 )
3146 3210 flags = flags | new_flags[0] & ~new_flags[1]
3147 3211
3148 3212 with destrevlog._writing(tr):
3149 3213 destrevlog._addrevision(
3150 3214 node,
3151 3215 rawtext,
3152 3216 tr,
3153 3217 linkrev,
3154 3218 p1,
3155 3219 p2,
3156 3220 flags,
3157 3221 cachedelta,
3158 3222 deltacomputer=deltacomputer,
3159 3223 sidedata=sidedata,
3160 3224 )
3161 3225
3162 3226 if addrevisioncb:
3163 3227 addrevisioncb(self, rev, node)
3164 3228
3165 3229 def censorrevision(self, tr, censornode, tombstone=b''):
3166 3230 if self._format_version == REVLOGV0:
3167 3231 raise error.RevlogError(
3168 3232 _(b'cannot censor with version %d revlogs')
3169 3233 % self._format_version
3170 3234 )
3171 3235
3172 3236 censorrev = self.rev(censornode)
3173 3237 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3174 3238
3175 3239 if len(tombstone) > self.rawsize(censorrev):
3176 3240 raise error.Abort(
3177 3241 _(b'censor tombstone must be no longer than censored data')
3178 3242 )
3179 3243
3180 3244 # Rewriting the revlog in place is hard. Our strategy for censoring is
3181 3245 # to create a new revlog, copy all revisions to it, then replace the
3182 3246 # revlogs on transaction close.
3183 3247 #
3184 3248 # This is a bit dangerous. We could easily have a mismatch of state.
3185 3249 newrl = revlog(
3186 3250 self.opener,
3187 3251 target=self.target,
3188 3252 radix=self.radix,
3189 3253 postfix=b'tmpcensored',
3190 3254 censorable=True,
3191 3255 )
3192 3256 newrl._format_version = self._format_version
3193 3257 newrl._format_flags = self._format_flags
3194 3258 newrl._generaldelta = self._generaldelta
3195 3259 newrl._parse_index = self._parse_index
3196 3260
3197 3261 for rev in self.revs():
3198 3262 node = self.node(rev)
3199 3263 p1, p2 = self.parents(node)
3200 3264
3201 3265 if rev == censorrev:
3202 3266 newrl.addrawrevision(
3203 3267 tombstone,
3204 3268 tr,
3205 3269 self.linkrev(censorrev),
3206 3270 p1,
3207 3271 p2,
3208 3272 censornode,
3209 3273 REVIDX_ISCENSORED,
3210 3274 )
3211 3275
3212 3276 if newrl.deltaparent(rev) != nullrev:
3213 3277 raise error.Abort(
3214 3278 _(
3215 3279 b'censored revision stored as delta; '
3216 3280 b'cannot censor'
3217 3281 ),
3218 3282 hint=_(
3219 3283 b'censoring of revlogs is not '
3220 3284 b'fully implemented; please report '
3221 3285 b'this bug'
3222 3286 ),
3223 3287 )
3224 3288 continue
3225 3289
3226 3290 if self.iscensored(rev):
3227 3291 if self.deltaparent(rev) != nullrev:
3228 3292 raise error.Abort(
3229 3293 _(
3230 3294 b'cannot censor due to censored '
3231 3295 b'revision having delta stored'
3232 3296 )
3233 3297 )
3234 3298 rawtext = self._chunk(rev)
3235 3299 else:
3236 3300 rawtext = self.rawdata(rev)
3237 3301
3238 3302 newrl.addrawrevision(
3239 3303 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3240 3304 )
3241 3305
3242 3306 tr.addbackup(self._indexfile, location=b'store')
3243 3307 if not self._inline:
3244 3308 tr.addbackup(self._datafile, location=b'store')
3245 3309
3246 3310 self.opener.rename(newrl._indexfile, self._indexfile)
3247 3311 if not self._inline:
3248 3312 self.opener.rename(newrl._datafile, self._datafile)
3249 3313
3250 3314 self.clearcaches()
3251 3315 self._loadindex()
3252 3316
3253 3317 def verifyintegrity(self, state):
3254 3318 """Verifies the integrity of the revlog.
3255 3319
3256 3320 Yields ``revlogproblem`` instances describing problems that are
3257 3321 found.
3258 3322 """
3259 3323 dd, di = self.checksize()
3260 3324 if dd:
3261 3325 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3262 3326 if di:
3263 3327 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3264 3328
3265 3329 version = self._format_version
3266 3330
3267 3331 # The verifier tells us what version revlog we should be.
3268 3332 if version != state[b'expectedversion']:
3269 3333 yield revlogproblem(
3270 3334 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3271 3335 % (self.display_id, version, state[b'expectedversion'])
3272 3336 )
3273 3337
3274 3338 state[b'skipread'] = set()
3275 3339 state[b'safe_renamed'] = set()
3276 3340
3277 3341 for rev in self:
3278 3342 node = self.node(rev)
3279 3343
3280 3344 # Verify contents. 4 cases to care about:
3281 3345 #
3282 3346 # common: the most common case
3283 3347 # rename: with a rename
3284 3348 # meta: file content starts with b'\1\n', the metadata
3285 3349 # header defined in filelog.py, but without a rename
3286 3350 # ext: content stored externally
3287 3351 #
3288 3352 # More formally, their differences are shown below:
3289 3353 #
3290 3354 # | common | rename | meta | ext
3291 3355 # -------------------------------------------------------
3292 3356 # flags() | 0 | 0 | 0 | not 0
3293 3357 # renamed() | False | True | False | ?
3294 3358 # rawtext[0:2]=='\1\n'| False | True | True | ?
3295 3359 #
3296 3360 # "rawtext" means the raw text stored in revlog data, which
3297 3361 # could be retrieved by "rawdata(rev)". "text"
3298 3362 # mentioned below is "revision(rev)".
3299 3363 #
3300 3364 # There are 3 different lengths stored physically:
3301 3365 # 1. L1: rawsize, stored in revlog index
3302 3366 # 2. L2: len(rawtext), stored in revlog data
3303 3367 # 3. L3: len(text), stored in revlog data if flags==0, or
3304 3368 # possibly somewhere else if flags!=0
3305 3369 #
3306 3370 # L1 should be equal to L2. L3 could be different from them.
3307 3371 # "text" may or may not affect commit hash depending on flag
3308 3372 # processors (see flagutil.addflagprocessor).
3309 3373 #
3310 3374 # | common | rename | meta | ext
3311 3375 # -------------------------------------------------
3312 3376 # rawsize() | L1 | L1 | L1 | L1
3313 3377 # size() | L1 | L2-LM | L1(*) | L1 (?)
3314 3378 # len(rawtext) | L2 | L2 | L2 | L2
3315 3379 # len(text) | L2 | L2 | L2 | L3
3316 3380 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3317 3381 #
3318 3382 # LM: length of metadata, depending on rawtext
3319 3383 # (*): not ideal, see comment in filelog.size
3320 3384 # (?): could be "- len(meta)" if the resolved content has
3321 3385 # rename metadata
3322 3386 #
3323 3387 # Checks needed to be done:
3324 3388 # 1. length check: L1 == L2, in all cases.
3325 3389 # 2. hash check: depending on flag processor, we may need to
3326 3390 # use either "text" (external), or "rawtext" (in revlog).
3327 3391
3328 3392 try:
3329 3393 skipflags = state.get(b'skipflags', 0)
3330 3394 if skipflags:
3331 3395 skipflags &= self.flags(rev)
3332 3396
3333 3397 _verify_revision(self, skipflags, state, node)
3334 3398
3335 3399 l1 = self.rawsize(rev)
3336 3400 l2 = len(self.rawdata(node))
3337 3401
3338 3402 if l1 != l2:
3339 3403 yield revlogproblem(
3340 3404 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3341 3405 node=node,
3342 3406 )
3343 3407
3344 3408 except error.CensoredNodeError:
3345 3409 if state[b'erroroncensored']:
3346 3410 yield revlogproblem(
3347 3411 error=_(b'censored file data'), node=node
3348 3412 )
3349 3413 state[b'skipread'].add(node)
3350 3414 except Exception as e:
3351 3415 yield revlogproblem(
3352 3416 error=_(b'unpacking %s: %s')
3353 3417 % (short(node), stringutil.forcebytestr(e)),
3354 3418 node=node,
3355 3419 )
3356 3420 state[b'skipread'].add(node)
3357 3421
3358 3422 def storageinfo(
3359 3423 self,
3360 3424 exclusivefiles=False,
3361 3425 sharedfiles=False,
3362 3426 revisionscount=False,
3363 3427 trackedsize=False,
3364 3428 storedsize=False,
3365 3429 ):
3366 3430 d = {}
3367 3431
3368 3432 if exclusivefiles:
3369 3433 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3370 3434 if not self._inline:
3371 3435 d[b'exclusivefiles'].append((self.opener, self._datafile))
3372 3436
3373 3437 if sharedfiles:
3374 3438 d[b'sharedfiles'] = []
3375 3439
3376 3440 if revisionscount:
3377 3441 d[b'revisionscount'] = len(self)
3378 3442
3379 3443 if trackedsize:
3380 3444 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3381 3445
3382 3446 if storedsize:
3383 3447 d[b'storedsize'] = sum(
3384 3448 self.opener.stat(path).st_size for path in self.files()
3385 3449 )
3386 3450
3387 3451 return d
3388 3452
3389 3453 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3390 3454 if not self.hassidedata:
3391 3455 return
3392 3456 # revlog formats with sidedata support does not support inline
3393 3457 assert not self._inline
3394 3458 if not helpers[1] and not helpers[2]:
3395 3459 # Nothing to generate or remove
3396 3460 return
3397 3461
3398 3462 new_entries = []
3399 3463 # append the new sidedata
3400 3464 with self._writing(transaction):
3401 ifh, dfh = self._writinghandles
3402 if self._docket is not None:
3403 dfh.seek(self._docket.data_end, os.SEEK_SET)
3404 else:
3405 dfh.seek(0, os.SEEK_END)
3406
3407 current_offset = dfh.tell()
3465 ifh, dfh, sdfh = self._writinghandles
3466 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3467
3468 current_offset = sdfh.tell()
3408 3469 for rev in range(startrev, endrev + 1):
3409 3470 entry = self.index[rev]
3410 3471 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3411 3472 store=self,
3412 3473 sidedata_helpers=helpers,
3413 3474 sidedata={},
3414 3475 rev=rev,
3415 3476 )
3416 3477
3417 3478 serialized_sidedata = sidedatautil.serialize_sidedata(
3418 3479 new_sidedata
3419 3480 )
3420 3481
3421 3482 sidedata_compression_mode = COMP_MODE_INLINE
3422 3483 if serialized_sidedata and self.hassidedata:
3423 3484 sidedata_compression_mode = COMP_MODE_PLAIN
3424 3485 h, comp_sidedata = self.compress(serialized_sidedata)
3425 3486 if (
3426 3487 h != b'u'
3427 3488 and comp_sidedata[0] != b'\0'
3428 3489 and len(comp_sidedata) < len(serialized_sidedata)
3429 3490 ):
3430 3491 assert not h
3431 3492 if (
3432 3493 comp_sidedata[0]
3433 3494 == self._docket.default_compression_header
3434 3495 ):
3435 3496 sidedata_compression_mode = COMP_MODE_DEFAULT
3436 3497 serialized_sidedata = comp_sidedata
3437 3498 else:
3438 3499 sidedata_compression_mode = COMP_MODE_INLINE
3439 3500 serialized_sidedata = comp_sidedata
3440 3501 if entry[8] != 0 or entry[9] != 0:
3441 3502 # rewriting entries that already have sidedata is not
3442 3503 # supported yet, because it introduces garbage data in the
3443 3504 # revlog.
3444 3505 msg = b"rewriting existing sidedata is not supported yet"
3445 3506 raise error.Abort(msg)
3446 3507
3447 3508 # Apply (potential) flags to add and to remove after running
3448 3509 # the sidedata helpers
3449 3510 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3450 3511 entry_update = (
3451 3512 current_offset,
3452 3513 len(serialized_sidedata),
3453 3514 new_offset_flags,
3454 3515 sidedata_compression_mode,
3455 3516 )
3456 3517
3457 3518 # the sidedata computation might have move the file cursors around
3458 dfh.seek(current_offset, os.SEEK_SET)
3459 dfh.write(serialized_sidedata)
3519 sdfh.seek(current_offset, os.SEEK_SET)
3520 sdfh.write(serialized_sidedata)
3460 3521 new_entries.append(entry_update)
3461 3522 current_offset += len(serialized_sidedata)
3462 if self._docket is not None:
3463 self._docket.data_end = dfh.tell()
3523 self._docket.sidedata_end = sdfh.tell()
3464 3524
3465 3525 # rewrite the new index entries
3466 3526 ifh.seek(startrev * self.index.entry_size)
3467 3527 for i, e in enumerate(new_entries):
3468 3528 rev = startrev + i
3469 3529 self.index.replace_sidedata_info(rev, *e)
3470 3530 packed = self.index.entry_binary(rev)
3471 3531 if rev == 0 and self._docket is None:
3472 3532 header = self._format_flags | self._format_version
3473 3533 header = self.index.pack_header(header)
3474 3534 packed = header + packed
3475 3535 ifh.write(packed)
@@ -1,287 +1,333 b''
1 1 # docket - code related to revlog "docket"
2 2 #
3 3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 ### Revlog docket file
9 9 #
10 10 # The revlog is stored on disk using multiple files:
11 11 #
12 12 # * a small docket file, containing metadata and a pointer,
13 13 #
14 14 # * an index file, containing fixed width information about revisions,
15 15 #
16 16 # * a data file, containing variable width data for these revisions,
17 17
18 18 from __future__ import absolute_import
19 19
20 20 import errno
21 21 import os
22 22 import random
23 23 import struct
24 24
25 25 from .. import (
26 26 encoding,
27 27 error,
28 28 node,
29 29 pycompat,
30 30 util,
31 31 )
32 32
33 33 from . import (
34 34 constants,
35 35 )
36 36
37 37
38 38 def make_uid(id_size=8):
39 39 """return a new unique identifier.
40 40
41 41 The identifier is random and composed of ascii characters."""
42 42 # size we "hex" the result we need half the number of bits to have a final
43 43 # uuid of size ID_SIZE
44 44 return node.hex(os.urandom(id_size // 2))
45 45
46 46
47 47 # some special test logic to avoid anoying random output in the test
48 48 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
49 49
50 50 if stable_docket_file:
51 51
52 52 def make_uid(id_size=8):
53 53 try:
54 54 with open(stable_docket_file, mode='rb') as f:
55 55 seed = f.read().strip()
56 56 except IOError as inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 seed = b'04' # chosen by a fair dice roll. garanteed to be random
60 60 if pycompat.ispy3:
61 61 iter_seed = iter(seed)
62 62 else:
63 63 iter_seed = (ord(c) for c in seed)
64 64 # some basic circular sum hashing on 64 bits
65 65 int_seed = 0
66 66 low_mask = int('1' * 35, 2)
67 67 for i in iter_seed:
68 68 high_part = int_seed >> 35
69 69 low_part = (int_seed & low_mask) << 28
70 70 int_seed = high_part + low_part + i
71 71 r = random.Random()
72 72 if pycompat.ispy3:
73 73 r.seed(int_seed, version=1)
74 74 else:
75 75 r.seed(int_seed)
76 76 # once we drop python 3.8 support we can simply use r.randbytes
77 77 raw = r.getrandbits(id_size * 4)
78 78 assert id_size == 8
79 79 p = struct.pack('>L', raw)
80 80 new = node.hex(p)
81 81 with open(stable_docket_file, 'wb') as f:
82 82 f.write(new)
83 83 return new
84 84
85 85
86 86 # Docket format
87 87 #
88 88 # * 4 bytes: revlog version
89 89 # | This is mandatory as docket must be compatible with the previous
90 90 # | revlog index header.
91 91 # * 1 bytes: size of index uuid
92 92 # * 1 bytes: size of data uuid
93 # * 1 bytes: size of sizedata uuid
93 94 # * 8 bytes: size of index-data
94 95 # * 8 bytes: pending size of index-data
95 96 # * 8 bytes: size of data
97 # * 8 bytes: size of sidedata
96 98 # * 8 bytes: pending size of data
99 # * 8 bytes: pending size of sidedata
97 100 # * 1 bytes: default compression header
98 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBLLLLc')
101 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBLLLLLLc')
99 102
100 103
101 104 class RevlogDocket(object):
102 105 """metadata associated with revlog"""
103 106
104 107 def __init__(
105 108 self,
106 109 revlog,
107 110 use_pending=False,
108 111 version_header=None,
109 112 index_uuid=None,
110 113 data_uuid=None,
114 sidedata_uuid=None,
111 115 index_end=0,
112 116 pending_index_end=0,
113 117 data_end=0,
114 118 pending_data_end=0,
119 sidedata_end=0,
120 pending_sidedata_end=0,
115 121 default_compression_header=None,
116 122 ):
117 123 self._version_header = version_header
118 124 self._read_only = bool(use_pending)
119 125 self._dirty = False
120 126 self._radix = revlog.radix
121 127 self._path = revlog._docket_file
122 128 self._opener = revlog.opener
123 129 self._index_uuid = index_uuid
124 130 self._data_uuid = data_uuid
131 self._sidedata_uuid = sidedata_uuid
125 132 # thes asserts should be True as long as we have a single index filename
126 133 assert index_end <= pending_index_end
127 134 assert data_end <= pending_data_end
135 assert sidedata_end <= pending_sidedata_end
128 136 self._initial_index_end = index_end
129 137 self._pending_index_end = pending_index_end
130 138 self._initial_data_end = data_end
131 139 self._pending_data_end = pending_data_end
140 self._initial_sidedata_end = sidedata_end
141 self._pending_sidedata_end = pending_sidedata_end
132 142 if use_pending:
133 143 self._index_end = self._pending_index_end
134 144 self._data_end = self._pending_data_end
145 self._sidedata_end = self._pending_sidedata_end
135 146 else:
136 147 self._index_end = self._initial_index_end
137 148 self._data_end = self._initial_data_end
149 self._sidedata_end = self._initial_sidedata_end
138 150 self.default_compression_header = default_compression_header
139 151
140 152 def index_filepath(self):
141 153 """file path to the current index file associated to this docket"""
142 154 # very simplistic version at first
143 155 if self._index_uuid is None:
144 156 self._index_uuid = make_uid()
145 157 return b"%s-%s.idx" % (self._radix, self._index_uuid)
146 158
147 159 def data_filepath(self):
148 160 """file path to the current data file associated to this docket"""
149 161 # very simplistic version at first
150 162 if self._data_uuid is None:
151 163 self._data_uuid = make_uid()
152 164 return b"%s-%s.dat" % (self._radix, self._data_uuid)
153 165
166 def sidedata_filepath(self):
167 """file path to the current sidedata file associated to this docket"""
168 # very simplistic version at first
169 if self._sidedata_uuid is None:
170 self._sidedata_uuid = make_uid()
171 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
172
154 173 @property
155 174 def index_end(self):
156 175 return self._index_end
157 176
158 177 @index_end.setter
159 178 def index_end(self, new_size):
160 179 if new_size != self._index_end:
161 180 self._index_end = new_size
162 181 self._dirty = True
163 182
164 183 @property
165 184 def data_end(self):
166 185 return self._data_end
167 186
168 187 @data_end.setter
169 188 def data_end(self, new_size):
170 189 if new_size != self._data_end:
171 190 self._data_end = new_size
172 191 self._dirty = True
173 192
193 @property
194 def sidedata_end(self):
195 return self._sidedata_end
196
197 @sidedata_end.setter
198 def sidedata_end(self, new_size):
199 if new_size != self._sidedata_end:
200 self._sidedata_end = new_size
201 self._dirty = True
202
174 203 def write(self, transaction, pending=False, stripping=False):
175 204 """write the modification of disk if any
176 205
177 206 This make the new content visible to all process"""
178 207 if not self._dirty:
179 208 return False
180 209 else:
181 210 if self._read_only:
182 211 msg = b'writing read-only docket: %s'
183 212 msg %= self._path
184 213 raise error.ProgrammingError(msg)
185 214 if not stripping:
186 215 # XXX we could, leverage the docket while stripping. However it
187 216 # is not powerfull enough at the time of this comment
188 217 transaction.addbackup(self._path, location=b'store')
189 218 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
190 219 f.write(self._serialize(pending=pending))
191 220 # if pending we still need to the write final data eventually
192 221 self._dirty = pending
193 222 return True
194 223
195 224 def _serialize(self, pending=False):
196 225 if pending:
197 226 official_index_end = self._initial_index_end
198 227 official_data_end = self._initial_data_end
228 official_sidedata_end = self._initial_sidedata_end
199 229 else:
200 230 official_index_end = self._index_end
201 231 official_data_end = self._data_end
232 official_sidedata_end = self._sidedata_end
202 233
203 234 # this assert should be True as long as we have a single index filename
204 235 assert official_data_end <= self._data_end
236 assert official_sidedata_end <= self._sidedata_end
205 237 data = (
206 238 self._version_header,
207 239 len(self._index_uuid),
208 240 len(self._data_uuid),
241 len(self._sidedata_uuid),
209 242 official_index_end,
210 243 self._index_end,
211 244 official_data_end,
212 245 self._data_end,
246 official_sidedata_end,
247 self._sidedata_end,
213 248 self.default_compression_header,
214 249 )
215 250 s = []
216 251 s.append(S_HEADER.pack(*data))
217 252 s.append(self._index_uuid)
218 253 s.append(self._data_uuid)
254 s.append(self._sidedata_uuid)
219 255 return b''.join(s)
220 256
221 257
222 258 def default_docket(revlog, version_header):
223 259 """given a revlog version a new docket object for the given revlog"""
224 260 rl_version = version_header & 0xFFFF
225 261 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
226 262 return None
227 263 comp = util.compengines[revlog._compengine].revlogheader()
228 264 docket = RevlogDocket(
229 265 revlog,
230 266 version_header=version_header,
231 267 default_compression_header=comp,
232 268 )
233 269 docket._dirty = True
234 270 return docket
235 271
236 272
237 273 def parse_docket(revlog, data, use_pending=False):
238 274 """given some docket data return a docket object for the given revlog"""
239 275 header = S_HEADER.unpack(data[: S_HEADER.size])
240 276
241 277 # this is a mutable closure capture used in `get_data`
242 278 offset = [S_HEADER.size]
243 279
244 280 def get_data(size):
245 281 """utility closure to access the `size` next bytes"""
246 282 if offset[0] + size > len(data):
247 283 # XXX better class
248 284 msg = b"docket is too short, expected %d got %d"
249 285 msg %= (offset[0] + size, len(data))
250 286 raise error.Abort(msg)
251 287 raw = data[offset[0] : offset[0] + size]
252 288 offset[0] += size
253 289 return raw
254 290
255 291 iheader = iter(header)
256 292
257 293 version_header = next(iheader)
258 294
259 295 index_uuid_size = next(iheader)
260 296 index_uuid = get_data(index_uuid_size)
261 297
262 298 data_uuid_size = next(iheader)
263 299 data_uuid = get_data(data_uuid_size)
264 300
301 sidedata_uuid_size = next(iheader)
302 sidedata_uuid = get_data(sidedata_uuid_size)
303
265 304 index_size = next(iheader)
266 305
267 306 pending_index_size = next(iheader)
268 307
269 308 data_size = next(iheader)
270 309
271 310 pending_data_size = next(iheader)
272 311
312 sidedata_size = next(iheader)
313
314 pending_sidedata_size = next(iheader)
315
273 316 default_compression_header = next(iheader)
274 317
275 318 docket = RevlogDocket(
276 319 revlog,
277 320 use_pending=use_pending,
278 321 version_header=version_header,
279 322 index_uuid=index_uuid,
280 323 data_uuid=data_uuid,
324 sidedata_uuid=sidedata_uuid,
281 325 index_end=index_size,
282 326 pending_index_end=pending_index_size,
283 327 data_end=data_size,
284 328 pending_data_end=pending_data_size,
329 sidedata_end=sidedata_size,
330 pending_sidedata_end=pending_sidedata_size,
285 331 default_compression_header=default_compression_header,
286 332 )
287 333 return docket
@@ -1,823 +1,824 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import getattr
18 18 from .node import hex
19 19 from . import (
20 20 changelog,
21 21 error,
22 22 manifest,
23 23 policy,
24 24 pycompat,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28 from .utils import hashutil
29 29
30 30 parsers = policy.importmod('parsers')
31 31 # how much bytes should be read from fncache in one read
32 32 # It is done to prevent loading large fncache files into memory
33 33 fncache_chunksize = 10 ** 6
34 34
35 35
36 36 def _matchtrackedpath(path, matcher):
37 37 """parses a fncache entry and returns whether the entry is tracking a path
38 38 matched by matcher or not.
39 39
40 40 If matcher is None, returns True"""
41 41
42 42 if matcher is None:
43 43 return True
44 44 path = decodedir(path)
45 45 if path.startswith(b'data/'):
46 46 return matcher(path[len(b'data/') : -len(b'.i')])
47 47 elif path.startswith(b'meta/'):
48 48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49 49
50 50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51 51
52 52
53 53 # This avoids a collision between a file named foo and a dir named
54 54 # foo.i or foo.d
55 55 def _encodedir(path):
56 56 """
57 57 >>> _encodedir(b'data/foo.i')
58 58 'data/foo.i'
59 59 >>> _encodedir(b'data/foo.i/bla.i')
60 60 'data/foo.i.hg/bla.i'
61 61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 62 'data/foo.i.hg.hg/bla.i'
63 63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 65 """
66 66 return (
67 67 path.replace(b".hg/", b".hg.hg/")
68 68 .replace(b".i/", b".i.hg/")
69 69 .replace(b".d/", b".d.hg/")
70 70 )
71 71
72 72
73 73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74 74
75 75
76 76 def decodedir(path):
77 77 """
78 78 >>> decodedir(b'data/foo.i')
79 79 'data/foo.i'
80 80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 81 'data/foo.i/bla.i'
82 82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 83 'data/foo.i.hg/bla.i'
84 84 """
85 85 if b".hg/" not in path:
86 86 return path
87 87 return (
88 88 path.replace(b".d.hg/", b".d/")
89 89 .replace(b".i.hg/", b".i/")
90 90 .replace(b".hg.hg/", b".hg/")
91 91 )
92 92
93 93
94 94 def _reserved():
95 95 """characters that are problematic for filesystems
96 96
97 97 * ascii escapes (0..31)
98 98 * ascii hi (126..255)
99 99 * windows specials
100 100
101 101 these characters will be escaped by encodefunctions
102 102 """
103 103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 104 for x in range(32):
105 105 yield x
106 106 for x in range(126, 256):
107 107 yield x
108 108 for x in winreserved:
109 109 yield x
110 110
111 111
112 112 def _buildencodefun():
113 113 """
114 114 >>> enc, dec = _buildencodefun()
115 115
116 116 >>> enc(b'nothing/special.txt')
117 117 'nothing/special.txt'
118 118 >>> dec(b'nothing/special.txt')
119 119 'nothing/special.txt'
120 120
121 121 >>> enc(b'HELLO')
122 122 '_h_e_l_l_o'
123 123 >>> dec(b'_h_e_l_l_o')
124 124 'HELLO'
125 125
126 126 >>> enc(b'hello:world?')
127 127 'hello~3aworld~3f'
128 128 >>> dec(b'hello~3aworld~3f')
129 129 'hello:world?'
130 130
131 131 >>> enc(b'the\\x07quick\\xADshot')
132 132 'the~07quick~adshot'
133 133 >>> dec(b'the~07quick~adshot')
134 134 'the\\x07quick\\xadshot'
135 135 """
136 136 e = b'_'
137 137 xchr = pycompat.bytechr
138 138 asciistr = list(map(xchr, range(127)))
139 139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140 140
141 141 cmap = {x: x for x in asciistr}
142 142 for x in _reserved():
143 143 cmap[xchr(x)] = b"~%02x" % x
144 144 for x in capitals + [ord(e)]:
145 145 cmap[xchr(x)] = e + xchr(x).lower()
146 146
147 147 dmap = {}
148 148 for k, v in pycompat.iteritems(cmap):
149 149 dmap[v] = k
150 150
151 151 def decode(s):
152 152 i = 0
153 153 while i < len(s):
154 154 for l in pycompat.xrange(1, 4):
155 155 try:
156 156 yield dmap[s[i : i + l]]
157 157 i += l
158 158 break
159 159 except KeyError:
160 160 pass
161 161 else:
162 162 raise KeyError
163 163
164 164 return (
165 165 lambda s: b''.join(
166 166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 167 ),
168 168 lambda s: b''.join(list(decode(s))),
169 169 )
170 170
171 171
172 172 _encodefname, _decodefname = _buildencodefun()
173 173
174 174
175 175 def encodefilename(s):
176 176 """
177 177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 179 """
180 180 return _encodefname(encodedir(s))
181 181
182 182
183 183 def decodefilename(s):
184 184 """
185 185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 187 """
188 188 return decodedir(_decodefname(s))
189 189
190 190
191 191 def _buildlowerencodefun():
192 192 """
193 193 >>> f = _buildlowerencodefun()
194 194 >>> f(b'nothing/special.txt')
195 195 'nothing/special.txt'
196 196 >>> f(b'HELLO')
197 197 'hello'
198 198 >>> f(b'hello:world?')
199 199 'hello~3aworld~3f'
200 200 >>> f(b'the\\x07quick\\xADshot')
201 201 'the~07quick~adshot'
202 202 """
203 203 xchr = pycompat.bytechr
204 204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 205 for x in _reserved():
206 206 cmap[xchr(x)] = b"~%02x" % x
207 207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 208 cmap[xchr(x)] = xchr(x).lower()
209 209
210 210 def lowerencode(s):
211 211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212 212
213 213 return lowerencode
214 214
215 215
216 216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217 217
218 218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221 221
222 222
223 223 def _auxencode(path, dotencode):
224 224 """
225 225 Encodes filenames containing names reserved by Windows or which end in
226 226 period or space. Does not touch other single reserved characters c.
227 227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 228 Additionally encodes space or period at the beginning, if dotencode is
229 229 True. Parameter path is assumed to be all lowercase.
230 230 A segment only needs encoding if a reserved name appears as a
231 231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 232 doesn't need encoding.
233 233
234 234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 235 >>> _auxencode(s.split(b'/'), True)
236 236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 238 >>> _auxencode(s.split(b'/'), False)
239 239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 240 >>> _auxencode([b'foo. '], True)
241 241 ['foo.~20']
242 242 >>> _auxencode([b' .foo'], True)
243 243 ['~20.foo']
244 244 """
245 245 for i, n in enumerate(path):
246 246 if not n:
247 247 continue
248 248 if dotencode and n[0] in b'. ':
249 249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 250 path[i] = n
251 251 else:
252 252 l = n.find(b'.')
253 253 if l == -1:
254 254 l = len(n)
255 255 if (l == 3 and n[:3] in _winres3) or (
256 256 l == 4
257 257 and n[3:4] <= b'9'
258 258 and n[3:4] >= b'1'
259 259 and n[:3] in _winres4
260 260 ):
261 261 # encode third letter ('aux' -> 'au~78')
262 262 ec = b"~%02x" % ord(n[2:3])
263 263 n = n[0:2] + ec + n[3:]
264 264 path[i] = n
265 265 if n[-1] in b'. ':
266 266 # encode last period or space ('foo...' -> 'foo..~2e')
267 267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 268 return path
269 269
270 270
271 271 _maxstorepathlen = 120
272 272 _dirprefixlen = 8
273 273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274 274
275 275
276 276 def _hashencode(path, dotencode):
277 277 digest = hex(hashutil.sha1(path).digest())
278 278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 279 parts = _auxencode(le, dotencode)
280 280 basename = parts[-1]
281 281 _root, ext = os.path.splitext(basename)
282 282 sdirs = []
283 283 sdirslen = 0
284 284 for p in parts[:-1]:
285 285 d = p[:_dirprefixlen]
286 286 if d[-1] in b'. ':
287 287 # Windows can't access dirs ending in period or space
288 288 d = d[:-1] + b'_'
289 289 if sdirslen == 0:
290 290 t = len(d)
291 291 else:
292 292 t = sdirslen + 1 + len(d)
293 293 if t > _maxshortdirslen:
294 294 break
295 295 sdirs.append(d)
296 296 sdirslen = t
297 297 dirs = b'/'.join(sdirs)
298 298 if len(dirs) > 0:
299 299 dirs += b'/'
300 300 res = b'dh/' + dirs + digest + ext
301 301 spaceleft = _maxstorepathlen - len(res)
302 302 if spaceleft > 0:
303 303 filler = basename[:spaceleft]
304 304 res = b'dh/' + dirs + filler + digest + ext
305 305 return res
306 306
307 307
308 308 def _hybridencode(path, dotencode):
309 309 """encodes path with a length limit
310 310
311 311 Encodes all paths that begin with 'data/', according to the following.
312 312
313 313 Default encoding (reversible):
314 314
315 315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 316 characters are encoded as '~xx', where xx is the two digit hex code
317 317 of the character (see encodefilename).
318 318 Relevant path components consisting of Windows reserved filenames are
319 319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320 320
321 321 Hashed encoding (not reversible):
322 322
323 323 If the default-encoded path is longer than _maxstorepathlen, a
324 324 non-reversible hybrid hashing of the path is done instead.
325 325 This encoding uses up to _dirprefixlen characters of all directory
326 326 levels of the lowerencoded path, but not more levels than can fit into
327 327 _maxshortdirslen.
328 328 Then follows the filler followed by the sha digest of the full path.
329 329 The filler is the beginning of the basename of the lowerencoded path
330 330 (the basename is everything after the last path separator). The filler
331 331 is as long as possible, filling in characters from the basename until
332 332 the encoded path has _maxstorepathlen characters (or all chars of the
333 333 basename have been taken).
334 334 The extension (e.g. '.i' or '.d') is preserved.
335 335
336 336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 337 encoding was used.
338 338 """
339 339 path = encodedir(path)
340 340 ef = _encodefname(path).split(b'/')
341 341 res = b'/'.join(_auxencode(ef, dotencode))
342 342 if len(res) > _maxstorepathlen:
343 343 res = _hashencode(path, dotencode)
344 344 return res
345 345
346 346
347 347 def _pathencode(path):
348 348 de = encodedir(path)
349 349 if len(path) > _maxstorepathlen:
350 350 return _hashencode(de, True)
351 351 ef = _encodefname(de).split(b'/')
352 352 res = b'/'.join(_auxencode(ef, True))
353 353 if len(res) > _maxstorepathlen:
354 354 return _hashencode(de, True)
355 355 return res
356 356
357 357
358 358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359 359
360 360
361 361 def _plainhybridencode(f):
362 362 return _hybridencode(f, False)
363 363
364 364
365 365 def _calcmode(vfs):
366 366 try:
367 367 # files in .hg/ will be created using this mode
368 368 mode = vfs.stat().st_mode
369 369 # avoid some useless chmods
370 370 if (0o777 & ~util.umask) == (0o777 & mode):
371 371 mode = None
372 372 except OSError:
373 373 mode = None
374 374 return mode
375 375
376 376
377 377 _data = [
378 378 b'bookmarks',
379 379 b'narrowspec',
380 380 b'data',
381 381 b'meta',
382 382 b'00manifest.d',
383 383 b'00manifest.i',
384 384 b'00changelog.d',
385 385 b'00changelog.i',
386 386 b'phaseroots',
387 387 b'obsstore',
388 388 b'requires',
389 389 ]
390 390
391 391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 392 REVLOG_FILES_OTHER_EXT = (
393 393 b'.idx',
394 394 b'.d',
395 395 b'.dat',
396 396 b'.n',
397 397 b'.nd',
398 b'.sda',
398 399 b'd.tmpcensored',
399 400 )
400 401 # files that are "volatile" and might change between listing and streaming
401 402 #
402 403 # note: the ".nd" file are nodemap data and won't "change" but they might be
403 404 # deleted.
404 405 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
405 406
406 407 # some exception to the above matching
407 408 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
408 409
409 410
410 411 def is_revlog(f, kind, st):
411 412 if kind != stat.S_IFREG:
412 413 return None
413 414 return revlog_type(f)
414 415
415 416
416 417 def revlog_type(f):
417 418 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
418 419 return FILEFLAGS_REVLOG_MAIN
419 420 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
420 421 t = FILETYPE_FILELOG_OTHER
421 422 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
422 423 t |= FILEFLAGS_VOLATILE
423 424 return t
424 425 return None
425 426
426 427
427 428 # the file is part of changelog data
428 429 FILEFLAGS_CHANGELOG = 1 << 13
429 430 # the file is part of manifest data
430 431 FILEFLAGS_MANIFESTLOG = 1 << 12
431 432 # the file is part of filelog data
432 433 FILEFLAGS_FILELOG = 1 << 11
433 434 # file that are not directly part of a revlog
434 435 FILEFLAGS_OTHER = 1 << 10
435 436
436 437 # the main entry point for a revlog
437 438 FILEFLAGS_REVLOG_MAIN = 1 << 1
438 439 # a secondary file for a revlog
439 440 FILEFLAGS_REVLOG_OTHER = 1 << 0
440 441
441 442 # files that are "volatile" and might change between listing and streaming
442 443 FILEFLAGS_VOLATILE = 1 << 20
443 444
444 445 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
445 446 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
446 447 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
447 448 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
448 449 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
449 450 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
450 451 FILETYPE_OTHER = FILEFLAGS_OTHER
451 452
452 453
453 454 class basicstore(object):
454 455 '''base class for local repository stores'''
455 456
456 457 def __init__(self, path, vfstype):
457 458 vfs = vfstype(path)
458 459 self.path = vfs.base
459 460 self.createmode = _calcmode(vfs)
460 461 vfs.createmode = self.createmode
461 462 self.rawvfs = vfs
462 463 self.vfs = vfsmod.filtervfs(vfs, encodedir)
463 464 self.opener = self.vfs
464 465
465 466 def join(self, f):
466 467 return self.path + b'/' + encodedir(f)
467 468
468 469 def _walk(self, relpath, recurse):
469 470 '''yields (unencoded, encoded, size)'''
470 471 path = self.path
471 472 if relpath:
472 473 path += b'/' + relpath
473 474 striplen = len(self.path) + 1
474 475 l = []
475 476 if self.rawvfs.isdir(path):
476 477 visit = [path]
477 478 readdir = self.rawvfs.readdir
478 479 while visit:
479 480 p = visit.pop()
480 481 for f, kind, st in readdir(p, stat=True):
481 482 fp = p + b'/' + f
482 483 rl_type = is_revlog(f, kind, st)
483 484 if rl_type is not None:
484 485 n = util.pconvert(fp[striplen:])
485 486 l.append((rl_type, decodedir(n), n, st.st_size))
486 487 elif kind == stat.S_IFDIR and recurse:
487 488 visit.append(fp)
488 489 l.sort()
489 490 return l
490 491
491 492 def changelog(self, trypending, concurrencychecker=None):
492 493 return changelog.changelog(
493 494 self.vfs,
494 495 trypending=trypending,
495 496 concurrencychecker=concurrencychecker,
496 497 )
497 498
498 499 def manifestlog(self, repo, storenarrowmatch):
499 500 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
500 501 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
501 502
502 503 def datafiles(self, matcher=None):
503 504 files = self._walk(b'data', True) + self._walk(b'meta', True)
504 505 for (t, u, e, s) in files:
505 506 yield (FILEFLAGS_FILELOG | t, u, e, s)
506 507
507 508 def topfiles(self):
508 509 # yield manifest before changelog
509 510 files = reversed(self._walk(b'', False))
510 511 for (t, u, e, s) in files:
511 512 if u.startswith(b'00changelog'):
512 513 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
513 514 elif u.startswith(b'00manifest'):
514 515 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
515 516 else:
516 517 yield (FILETYPE_OTHER | t, u, e, s)
517 518
518 519 def walk(self, matcher=None):
519 520 """return file related to data storage (ie: revlogs)
520 521
521 522 yields (file_type, unencoded, encoded, size)
522 523
523 524 if a matcher is passed, storage files of only those tracked paths
524 525 are passed with matches the matcher
525 526 """
526 527 # yield data files first
527 528 for x in self.datafiles(matcher):
528 529 yield x
529 530 for x in self.topfiles():
530 531 yield x
531 532
532 533 def copylist(self):
533 534 return _data
534 535
535 536 def write(self, tr):
536 537 pass
537 538
538 539 def invalidatecaches(self):
539 540 pass
540 541
541 542 def markremoved(self, fn):
542 543 pass
543 544
544 545 def __contains__(self, path):
545 546 '''Checks if the store contains path'''
546 547 path = b"/".join((b"data", path))
547 548 # file?
548 549 if self.vfs.exists(path + b".i"):
549 550 return True
550 551 # dir?
551 552 if not path.endswith(b"/"):
552 553 path = path + b"/"
553 554 return self.vfs.exists(path)
554 555
555 556
556 557 class encodedstore(basicstore):
557 558 def __init__(self, path, vfstype):
558 559 vfs = vfstype(path + b'/store')
559 560 self.path = vfs.base
560 561 self.createmode = _calcmode(vfs)
561 562 vfs.createmode = self.createmode
562 563 self.rawvfs = vfs
563 564 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
564 565 self.opener = self.vfs
565 566
566 567 def datafiles(self, matcher=None):
567 568 for t, a, b, size in super(encodedstore, self).datafiles():
568 569 try:
569 570 a = decodefilename(a)
570 571 except KeyError:
571 572 a = None
572 573 if a is not None and not _matchtrackedpath(a, matcher):
573 574 continue
574 575 yield t, a, b, size
575 576
576 577 def join(self, f):
577 578 return self.path + b'/' + encodefilename(f)
578 579
579 580 def copylist(self):
580 581 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
581 582
582 583
583 584 class fncache(object):
584 585 # the filename used to be partially encoded
585 586 # hence the encodedir/decodedir dance
586 587 def __init__(self, vfs):
587 588 self.vfs = vfs
588 589 self.entries = None
589 590 self._dirty = False
590 591 # set of new additions to fncache
591 592 self.addls = set()
592 593
593 594 def ensureloaded(self, warn=None):
594 595 """read the fncache file if not already read.
595 596
596 597 If the file on disk is corrupted, raise. If warn is provided,
597 598 warn and keep going instead."""
598 599 if self.entries is None:
599 600 self._load(warn)
600 601
601 602 def _load(self, warn=None):
602 603 '''fill the entries from the fncache file'''
603 604 self._dirty = False
604 605 try:
605 606 fp = self.vfs(b'fncache', mode=b'rb')
606 607 except IOError:
607 608 # skip nonexistent file
608 609 self.entries = set()
609 610 return
610 611
611 612 self.entries = set()
612 613 chunk = b''
613 614 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
614 615 chunk += c
615 616 try:
616 617 p = chunk.rindex(b'\n')
617 618 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
618 619 chunk = chunk[p + 1 :]
619 620 except ValueError:
620 621 # substring '\n' not found, maybe the entry is bigger than the
621 622 # chunksize, so let's keep iterating
622 623 pass
623 624
624 625 if chunk:
625 626 msg = _(b"fncache does not ends with a newline")
626 627 if warn:
627 628 warn(msg + b'\n')
628 629 else:
629 630 raise error.Abort(
630 631 msg,
631 632 hint=_(
632 633 b"use 'hg debugrebuildfncache' to "
633 634 b"rebuild the fncache"
634 635 ),
635 636 )
636 637 self._checkentries(fp, warn)
637 638 fp.close()
638 639
639 640 def _checkentries(self, fp, warn):
640 641 """make sure there is no empty string in entries"""
641 642 if b'' in self.entries:
642 643 fp.seek(0)
643 644 for n, line in enumerate(util.iterfile(fp)):
644 645 if not line.rstrip(b'\n'):
645 646 t = _(b'invalid entry in fncache, line %d') % (n + 1)
646 647 if warn:
647 648 warn(t + b'\n')
648 649 else:
649 650 raise error.Abort(t)
650 651
651 652 def write(self, tr):
652 653 if self._dirty:
653 654 assert self.entries is not None
654 655 self.entries = self.entries | self.addls
655 656 self.addls = set()
656 657 tr.addbackup(b'fncache')
657 658 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
658 659 if self.entries:
659 660 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
660 661 fp.close()
661 662 self._dirty = False
662 663 if self.addls:
663 664 # if we have just new entries, let's append them to the fncache
664 665 tr.addbackup(b'fncache')
665 666 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
666 667 if self.addls:
667 668 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
668 669 fp.close()
669 670 self.entries = None
670 671 self.addls = set()
671 672
672 673 def add(self, fn):
673 674 if self.entries is None:
674 675 self._load()
675 676 if fn not in self.entries:
676 677 self.addls.add(fn)
677 678
678 679 def remove(self, fn):
679 680 if self.entries is None:
680 681 self._load()
681 682 if fn in self.addls:
682 683 self.addls.remove(fn)
683 684 return
684 685 try:
685 686 self.entries.remove(fn)
686 687 self._dirty = True
687 688 except KeyError:
688 689 pass
689 690
690 691 def __contains__(self, fn):
691 692 if fn in self.addls:
692 693 return True
693 694 if self.entries is None:
694 695 self._load()
695 696 return fn in self.entries
696 697
697 698 def __iter__(self):
698 699 if self.entries is None:
699 700 self._load()
700 701 return iter(self.entries | self.addls)
701 702
702 703
703 704 class _fncachevfs(vfsmod.proxyvfs):
704 705 def __init__(self, vfs, fnc, encode):
705 706 vfsmod.proxyvfs.__init__(self, vfs)
706 707 self.fncache = fnc
707 708 self.encode = encode
708 709
709 710 def __call__(self, path, mode=b'r', *args, **kw):
710 711 encoded = self.encode(path)
711 712 if mode not in (b'r', b'rb') and (
712 713 path.startswith(b'data/') or path.startswith(b'meta/')
713 714 ):
714 715 # do not trigger a fncache load when adding a file that already is
715 716 # known to exist.
716 717 notload = self.fncache.entries is None and self.vfs.exists(encoded)
717 718 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
718 719 # when appending to an existing file, if the file has size zero,
719 720 # it should be considered as missing. Such zero-size files are
720 721 # the result of truncation when a transaction is aborted.
721 722 notload = False
722 723 if not notload:
723 724 self.fncache.add(path)
724 725 return self.vfs(encoded, mode, *args, **kw)
725 726
726 727 def join(self, path):
727 728 if path:
728 729 return self.vfs.join(self.encode(path))
729 730 else:
730 731 return self.vfs.join(path)
731 732
732 733
733 734 class fncachestore(basicstore):
734 735 def __init__(self, path, vfstype, dotencode):
735 736 if dotencode:
736 737 encode = _pathencode
737 738 else:
738 739 encode = _plainhybridencode
739 740 self.encode = encode
740 741 vfs = vfstype(path + b'/store')
741 742 self.path = vfs.base
742 743 self.pathsep = self.path + b'/'
743 744 self.createmode = _calcmode(vfs)
744 745 vfs.createmode = self.createmode
745 746 self.rawvfs = vfs
746 747 fnc = fncache(vfs)
747 748 self.fncache = fnc
748 749 self.vfs = _fncachevfs(vfs, fnc, encode)
749 750 self.opener = self.vfs
750 751
751 752 def join(self, f):
752 753 return self.pathsep + self.encode(f)
753 754
754 755 def getsize(self, path):
755 756 return self.rawvfs.stat(path).st_size
756 757
757 758 def datafiles(self, matcher=None):
758 759 for f in sorted(self.fncache):
759 760 if not _matchtrackedpath(f, matcher):
760 761 continue
761 762 ef = self.encode(f)
762 763 try:
763 764 t = revlog_type(f)
764 765 assert t is not None, f
765 766 t |= FILEFLAGS_FILELOG
766 767 yield t, f, ef, self.getsize(ef)
767 768 except OSError as err:
768 769 if err.errno != errno.ENOENT:
769 770 raise
770 771
771 772 def copylist(self):
772 773 d = (
773 774 b'bookmarks',
774 775 b'narrowspec',
775 776 b'data',
776 777 b'meta',
777 778 b'dh',
778 779 b'fncache',
779 780 b'phaseroots',
780 781 b'obsstore',
781 782 b'00manifest.d',
782 783 b'00manifest.i',
783 784 b'00changelog.d',
784 785 b'00changelog.i',
785 786 b'requires',
786 787 )
787 788 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
788 789
789 790 def write(self, tr):
790 791 self.fncache.write(tr)
791 792
792 793 def invalidatecaches(self):
793 794 self.fncache.entries = None
794 795 self.fncache.addls = set()
795 796
796 797 def markremoved(self, fn):
797 798 self.fncache.remove(fn)
798 799
799 800 def _exists(self, f):
800 801 ef = self.encode(f)
801 802 try:
802 803 self.getsize(ef)
803 804 return True
804 805 except OSError as err:
805 806 if err.errno != errno.ENOENT:
806 807 raise
807 808 # nonexistent entry
808 809 return False
809 810
810 811 def __contains__(self, path):
811 812 '''Checks if the store contains path'''
812 813 path = b"/".join((b"data", path))
813 814 # check for files (exact match)
814 815 e = path + b'.i'
815 816 if e in self.fncache and self._exists(e):
816 817 return True
817 818 # now check for directories (prefix match)
818 819 if not path.endswith(b'/'):
819 820 path += b'/'
820 821 for e in self.fncache:
821 822 if e.startswith(path) and self._exists(e):
822 823 return True
823 824 return False
@@ -1,94 +1,96 b''
1 1 #require reporevlogstore
2 2
3 3 A repo with unknown revlogv2 requirement string cannot be opened
4 4
5 5 $ hg init invalidreq
6 6 $ cd invalidreq
7 7 $ echo exp-revlogv2.unknown >> .hg/requires
8 8 $ hg log
9 9 abort: repository requires features unknown to this Mercurial: exp-revlogv2.unknown
10 10 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
11 11 [255]
12 12 $ cd ..
13 13
14 14 Can create and open repo with revlog v2 requirement
15 15
16 16 $ cat >> $HGRCPATH << EOF
17 17 > [experimental]
18 18 > revlogv2 = enable-unstable-format-and-corrupt-my-data
19 19 > EOF
20 20
21 21 $ hg init empty-repo
22 22 $ cd empty-repo
23 23 $ cat .hg/requires
24 24 dotencode
25 25 exp-dirstate-v2 (dirstate-v2 !)
26 26 exp-revlogv2.2
27 27 fncache
28 28 generaldelta
29 29 persistent-nodemap (rust !)
30 30 revlog-compression-zstd (zstd !)
31 31 sparserevlog
32 32 store
33 33
34 34 $ hg log
35 35
36 36 Unknown flags to revlog are rejected
37 37
38 38 >>> with open('.hg/store/00changelog.i', 'wb') as fh:
39 39 ... fh.write(b'\xff\x00\xde\xad') and None
40 40
41 41 $ hg log
42 42 abort: unknown flags (0xff00) in version 57005 revlog 00changelog
43 43 [50]
44 44
45 45 $ cd ..
46 46
47 47 Writing a simple revlog v2 works
48 48
49 49 $ hg init simple
50 50 $ cd simple
51 51 $ touch foo
52 52 $ hg -q commit -A -m initial
53 53
54 54 $ hg log
55 55 changeset: 0:96ee1d7354c4
56 56 tag: tip
57 57 user: test
58 58 date: Thu Jan 01 00:00:00 1970 +0000
59 59 summary: initial
60 60
61 61
62 62 Header written as expected
63 63
64 64 $ f --hexdump --bytes 4 .hg/store/00changelog.i
65 65 .hg/store/00changelog.i:
66 66 0000: 00 00 de ad |....|
67 67
68 68 $ f --hexdump --bytes 4 .hg/store/data/foo.i
69 69 .hg/store/data/foo.i:
70 70 0000: 00 00 de ad |....|
71 71
72 72 Bundle use a compatible changegroup format
73 73 ------------------------------------------
74 74
75 75 $ hg bundle --all ../basic.hg
76 76 1 changesets found
77 77 $ hg debugbundle --spec ../basic.hg
78 78 bzip2-v2
79 79
80 80 The expected files are generated
81 81 --------------------------------
82 82
83 83 We should have have:
84 84 - a docket
85 85 - a index file with a unique name
86 86 - a data file
87 87
88 88 $ ls .hg/store/00changelog* .hg/store/00manifest*
89 .hg/store/00changelog-6b8ab34b.dat
90 .hg/store/00changelog-88698448.idx
89 .hg/store/00changelog-1335303a.sda
90 .hg/store/00changelog-6b8ab34b.idx
91 .hg/store/00changelog-b875dfc5.dat
91 92 .hg/store/00changelog.i
92 .hg/store/00manifest-1335303a.dat
93 .hg/store/00manifest-b875dfc5.idx
93 .hg/store/00manifest-05a21d65.idx
94 .hg/store/00manifest-43c37dde.dat
95 .hg/store/00manifest-e2c9362a.sda
94 96 .hg/store/00manifest.i
General Comments 0
You need to be logged in to leave comments. Login now