##// END OF EJS Templates
dirstate-v2: add devel config option to control write behavior...
Raphaël Gomès -
r51117:ecd28d89 stable
parent child Browse files
Show More
@@ -1,2893 +1,2902 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import functools
10 10 import re
11 11
12 12 from . import (
13 13 encoding,
14 14 error,
15 15 )
16 16
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config=b'warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31
32 32 class configitem:
33 33 """represent a known config item
34 34
35 35 :section: the official config section where to find this item,
36 36 :name: the official name within the section,
37 37 :default: default value for this item,
38 38 :alias: optional list of tuples as alternatives,
39 39 :generic: this is a generic definition, match name using regular expression.
40 40 """
41 41
42 42 def __init__(
43 43 self,
44 44 section,
45 45 name,
46 46 default=None,
47 47 alias=(),
48 48 generic=False,
49 49 priority=0,
50 50 experimental=False,
51 51 ):
52 52 self.section = section
53 53 self.name = name
54 54 self.default = default
55 55 self.alias = list(alias)
56 56 self.generic = generic
57 57 self.priority = priority
58 58 self.experimental = experimental
59 59 self._re = None
60 60 if generic:
61 61 self._re = re.compile(self.name)
62 62
63 63
64 64 class itemregister(dict):
65 65 """A specialized dictionary that can handle wild-card selection"""
66 66
67 67 def __init__(self):
68 68 super(itemregister, self).__init__()
69 69 self._generics = set()
70 70
71 71 def update(self, other):
72 72 super(itemregister, self).update(other)
73 73 self._generics.update(other._generics)
74 74
75 75 def __setitem__(self, key, item):
76 76 super(itemregister, self).__setitem__(key, item)
77 77 if item.generic:
78 78 self._generics.add(item)
79 79
80 80 def get(self, key):
81 81 baseitem = super(itemregister, self).get(key)
82 82 if baseitem is not None and not baseitem.generic:
83 83 return baseitem
84 84
85 85 # search for a matching generic item
86 86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 87 for item in generics:
88 88 # we use 'match' instead of 'search' to make the matching simpler
89 89 # for people unfamiliar with regular expression. Having the match
90 90 # rooted to the start of the string will produce less surprising
91 91 # result for user writing simple regex for sub-attribute.
92 92 #
93 93 # For example using "color\..*" match produces an unsurprising
94 94 # result, while using search could suddenly match apparently
95 95 # unrelated configuration that happens to contains "color."
96 96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 97 # some match to avoid the need to prefix most pattern with "^".
98 98 # The "^" seems more error prone.
99 99 if item._re.match(key):
100 100 return item
101 101
102 102 return None
103 103
104 104
105 105 coreitems = {}
106 106
107 107
108 108 def _register(configtable, *args, **kwargs):
109 109 item = configitem(*args, **kwargs)
110 110 section = configtable.setdefault(item.section, itemregister())
111 111 if item.name in section:
112 112 msg = b"duplicated config item registration for '%s.%s'"
113 113 raise error.ProgrammingError(msg % (item.section, item.name))
114 114 section[item.name] = item
115 115
116 116
117 117 # special value for case where the default is derived from other values
118 118 dynamicdefault = object()
119 119
120 120 # Registering actual config items
121 121
122 122
123 123 def getitemregister(configtable):
124 124 f = functools.partial(_register, configtable)
125 125 # export pseudo enum as configitem.*
126 126 f.dynamicdefault = dynamicdefault
127 127 return f
128 128
129 129
130 130 coreconfigitem = getitemregister(coreitems)
131 131
132 132
133 133 def _registerdiffopts(section, configprefix=b''):
134 134 coreconfigitem(
135 135 section,
136 136 configprefix + b'nodates',
137 137 default=False,
138 138 )
139 139 coreconfigitem(
140 140 section,
141 141 configprefix + b'showfunc',
142 142 default=False,
143 143 )
144 144 coreconfigitem(
145 145 section,
146 146 configprefix + b'unified',
147 147 default=None,
148 148 )
149 149 coreconfigitem(
150 150 section,
151 151 configprefix + b'git',
152 152 default=False,
153 153 )
154 154 coreconfigitem(
155 155 section,
156 156 configprefix + b'ignorews',
157 157 default=False,
158 158 )
159 159 coreconfigitem(
160 160 section,
161 161 configprefix + b'ignorewsamount',
162 162 default=False,
163 163 )
164 164 coreconfigitem(
165 165 section,
166 166 configprefix + b'ignoreblanklines',
167 167 default=False,
168 168 )
169 169 coreconfigitem(
170 170 section,
171 171 configprefix + b'ignorewseol',
172 172 default=False,
173 173 )
174 174 coreconfigitem(
175 175 section,
176 176 configprefix + b'nobinary',
177 177 default=False,
178 178 )
179 179 coreconfigitem(
180 180 section,
181 181 configprefix + b'noprefix',
182 182 default=False,
183 183 )
184 184 coreconfigitem(
185 185 section,
186 186 configprefix + b'word-diff',
187 187 default=False,
188 188 )
189 189
190 190
191 191 coreconfigitem(
192 192 b'alias',
193 193 b'.*',
194 194 default=dynamicdefault,
195 195 generic=True,
196 196 )
197 197 coreconfigitem(
198 198 b'auth',
199 199 b'cookiefile',
200 200 default=None,
201 201 )
202 202 _registerdiffopts(section=b'annotate')
203 203 # bookmarks.pushing: internal hack for discovery
204 204 coreconfigitem(
205 205 b'bookmarks',
206 206 b'pushing',
207 207 default=list,
208 208 )
209 209 # bundle.mainreporoot: internal hack for bundlerepo
210 210 coreconfigitem(
211 211 b'bundle',
212 212 b'mainreporoot',
213 213 default=b'',
214 214 )
215 215 coreconfigitem(
216 216 b'censor',
217 217 b'policy',
218 218 default=b'abort',
219 219 experimental=True,
220 220 )
221 221 coreconfigitem(
222 222 b'chgserver',
223 223 b'idletimeout',
224 224 default=3600,
225 225 )
226 226 coreconfigitem(
227 227 b'chgserver',
228 228 b'skiphash',
229 229 default=False,
230 230 )
231 231 coreconfigitem(
232 232 b'cmdserver',
233 233 b'log',
234 234 default=None,
235 235 )
236 236 coreconfigitem(
237 237 b'cmdserver',
238 238 b'max-log-files',
239 239 default=7,
240 240 )
241 241 coreconfigitem(
242 242 b'cmdserver',
243 243 b'max-log-size',
244 244 default=b'1 MB',
245 245 )
246 246 coreconfigitem(
247 247 b'cmdserver',
248 248 b'max-repo-cache',
249 249 default=0,
250 250 experimental=True,
251 251 )
252 252 coreconfigitem(
253 253 b'cmdserver',
254 254 b'message-encodings',
255 255 default=list,
256 256 )
257 257 coreconfigitem(
258 258 b'cmdserver',
259 259 b'track-log',
260 260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 261 )
262 262 coreconfigitem(
263 263 b'cmdserver',
264 264 b'shutdown-on-interrupt',
265 265 default=True,
266 266 )
267 267 coreconfigitem(
268 268 b'color',
269 269 b'.*',
270 270 default=None,
271 271 generic=True,
272 272 )
273 273 coreconfigitem(
274 274 b'color',
275 275 b'mode',
276 276 default=b'auto',
277 277 )
278 278 coreconfigitem(
279 279 b'color',
280 280 b'pagermode',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem(
284 284 b'command-templates',
285 285 b'graphnode',
286 286 default=None,
287 287 alias=[(b'ui', b'graphnodetemplate')],
288 288 )
289 289 coreconfigitem(
290 290 b'command-templates',
291 291 b'log',
292 292 default=None,
293 293 alias=[(b'ui', b'logtemplate')],
294 294 )
295 295 coreconfigitem(
296 296 b'command-templates',
297 297 b'mergemarker',
298 298 default=(
299 299 b'{node|short} '
300 300 b'{ifeq(tags, "tip", "", '
301 301 b'ifeq(tags, "", "", "{tags} "))}'
302 302 b'{if(bookmarks, "{bookmarks} ")}'
303 303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 304 b'- {author|user}: {desc|firstline}'
305 305 ),
306 306 alias=[(b'ui', b'mergemarkertemplate')],
307 307 )
308 308 coreconfigitem(
309 309 b'command-templates',
310 310 b'pre-merge-tool-output',
311 311 default=None,
312 312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 313 )
314 314 coreconfigitem(
315 315 b'command-templates',
316 316 b'oneline-summary',
317 317 default=None,
318 318 )
319 319 coreconfigitem(
320 320 b'command-templates',
321 321 b'oneline-summary.*',
322 322 default=dynamicdefault,
323 323 generic=True,
324 324 )
325 325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 326 coreconfigitem(
327 327 b'commands',
328 328 b'commit.post-status',
329 329 default=False,
330 330 )
331 331 coreconfigitem(
332 332 b'commands',
333 333 b'grep.all-files',
334 334 default=False,
335 335 experimental=True,
336 336 )
337 337 coreconfigitem(
338 338 b'commands',
339 339 b'merge.require-rev',
340 340 default=False,
341 341 )
342 342 coreconfigitem(
343 343 b'commands',
344 344 b'push.require-revs',
345 345 default=False,
346 346 )
347 347 coreconfigitem(
348 348 b'commands',
349 349 b'resolve.confirm',
350 350 default=False,
351 351 )
352 352 coreconfigitem(
353 353 b'commands',
354 354 b'resolve.explicit-re-merge',
355 355 default=False,
356 356 )
357 357 coreconfigitem(
358 358 b'commands',
359 359 b'resolve.mark-check',
360 360 default=b'none',
361 361 )
362 362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 363 coreconfigitem(
364 364 b'commands',
365 365 b'show.aliasprefix',
366 366 default=list,
367 367 )
368 368 coreconfigitem(
369 369 b'commands',
370 370 b'status.relative',
371 371 default=False,
372 372 )
373 373 coreconfigitem(
374 374 b'commands',
375 375 b'status.skipstates',
376 376 default=[],
377 377 experimental=True,
378 378 )
379 379 coreconfigitem(
380 380 b'commands',
381 381 b'status.terse',
382 382 default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'commands',
386 386 b'status.verbose',
387 387 default=False,
388 388 )
389 389 coreconfigitem(
390 390 b'commands',
391 391 b'update.check',
392 392 default=None,
393 393 )
394 394 coreconfigitem(
395 395 b'commands',
396 396 b'update.requiredest',
397 397 default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'committemplate',
401 401 b'.*',
402 402 default=None,
403 403 generic=True,
404 404 )
405 405 coreconfigitem(
406 406 b'convert',
407 407 b'bzr.saverev',
408 408 default=True,
409 409 )
410 410 coreconfigitem(
411 411 b'convert',
412 412 b'cvsps.cache',
413 413 default=True,
414 414 )
415 415 coreconfigitem(
416 416 b'convert',
417 417 b'cvsps.fuzz',
418 418 default=60,
419 419 )
420 420 coreconfigitem(
421 421 b'convert',
422 422 b'cvsps.logencoding',
423 423 default=None,
424 424 )
425 425 coreconfigitem(
426 426 b'convert',
427 427 b'cvsps.mergefrom',
428 428 default=None,
429 429 )
430 430 coreconfigitem(
431 431 b'convert',
432 432 b'cvsps.mergeto',
433 433 default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'convert',
437 437 b'git.committeractions',
438 438 default=lambda: [b'messagedifferent'],
439 439 )
440 440 coreconfigitem(
441 441 b'convert',
442 442 b'git.extrakeys',
443 443 default=list,
444 444 )
445 445 coreconfigitem(
446 446 b'convert',
447 447 b'git.findcopiesharder',
448 448 default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'convert',
452 452 b'git.remoteprefix',
453 453 default=b'remote',
454 454 )
455 455 coreconfigitem(
456 456 b'convert',
457 457 b'git.renamelimit',
458 458 default=400,
459 459 )
460 460 coreconfigitem(
461 461 b'convert',
462 462 b'git.saverev',
463 463 default=True,
464 464 )
465 465 coreconfigitem(
466 466 b'convert',
467 467 b'git.similarity',
468 468 default=50,
469 469 )
470 470 coreconfigitem(
471 471 b'convert',
472 472 b'git.skipsubmodules',
473 473 default=False,
474 474 )
475 475 coreconfigitem(
476 476 b'convert',
477 477 b'hg.clonebranches',
478 478 default=False,
479 479 )
480 480 coreconfigitem(
481 481 b'convert',
482 482 b'hg.ignoreerrors',
483 483 default=False,
484 484 )
485 485 coreconfigitem(
486 486 b'convert',
487 487 b'hg.preserve-hash',
488 488 default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'convert',
492 492 b'hg.revs',
493 493 default=None,
494 494 )
495 495 coreconfigitem(
496 496 b'convert',
497 497 b'hg.saverev',
498 498 default=False,
499 499 )
500 500 coreconfigitem(
501 501 b'convert',
502 502 b'hg.sourcename',
503 503 default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'convert',
507 507 b'hg.startrev',
508 508 default=None,
509 509 )
510 510 coreconfigitem(
511 511 b'convert',
512 512 b'hg.tagsbranch',
513 513 default=b'default',
514 514 )
515 515 coreconfigitem(
516 516 b'convert',
517 517 b'hg.usebranchnames',
518 518 default=True,
519 519 )
520 520 coreconfigitem(
521 521 b'convert',
522 522 b'ignoreancestorcheck',
523 523 default=False,
524 524 experimental=True,
525 525 )
526 526 coreconfigitem(
527 527 b'convert',
528 528 b'localtimezone',
529 529 default=False,
530 530 )
531 531 coreconfigitem(
532 532 b'convert',
533 533 b'p4.encoding',
534 534 default=dynamicdefault,
535 535 )
536 536 coreconfigitem(
537 537 b'convert',
538 538 b'p4.startrev',
539 539 default=0,
540 540 )
541 541 coreconfigitem(
542 542 b'convert',
543 543 b'skiptags',
544 544 default=False,
545 545 )
546 546 coreconfigitem(
547 547 b'convert',
548 548 b'svn.debugsvnlog',
549 549 default=True,
550 550 )
551 551 coreconfigitem(
552 552 b'convert',
553 553 b'svn.trunk',
554 554 default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'convert',
558 558 b'svn.tags',
559 559 default=None,
560 560 )
561 561 coreconfigitem(
562 562 b'convert',
563 563 b'svn.branches',
564 564 default=None,
565 565 )
566 566 coreconfigitem(
567 567 b'convert',
568 568 b'svn.startrev',
569 569 default=0,
570 570 )
571 571 coreconfigitem(
572 572 b'convert',
573 573 b'svn.dangerous-set-commit-dates',
574 574 default=False,
575 575 )
576 576 coreconfigitem(
577 577 b'debug',
578 578 b'dirstate.delaywrite',
579 579 default=0,
580 580 )
581 581 coreconfigitem(
582 582 b'debug',
583 583 b'revlog.verifyposition.changelog',
584 584 default=b'',
585 585 )
586 586 coreconfigitem(
587 587 b'debug',
588 588 b'revlog.debug-delta',
589 589 default=False,
590 590 )
591 591 coreconfigitem(
592 592 b'defaults',
593 593 b'.*',
594 594 default=None,
595 595 generic=True,
596 596 )
597 597 coreconfigitem(
598 598 b'devel',
599 599 b'all-warnings',
600 600 default=False,
601 601 )
602 602 coreconfigitem(
603 603 b'devel',
604 604 b'bundle2.debug',
605 605 default=False,
606 606 )
607 607 coreconfigitem(
608 608 b'devel',
609 609 b'bundle.delta',
610 610 default=b'',
611 611 )
612 612 coreconfigitem(
613 613 b'devel',
614 614 b'cache-vfs',
615 615 default=None,
616 616 )
617 617 coreconfigitem(
618 618 b'devel',
619 619 b'check-locks',
620 620 default=False,
621 621 )
622 622 coreconfigitem(
623 623 b'devel',
624 624 b'check-relroot',
625 625 default=False,
626 626 )
627 627 # Track copy information for all file, not just "added" one (very slow)
628 628 coreconfigitem(
629 629 b'devel',
630 630 b'copy-tracing.trace-all-files',
631 631 default=False,
632 632 )
633 633 coreconfigitem(
634 634 b'devel',
635 635 b'default-date',
636 636 default=None,
637 637 )
638 638 coreconfigitem(
639 639 b'devel',
640 640 b'deprec-warn',
641 641 default=False,
642 642 )
643 # possible values:
644 # - auto (the default)
645 # - force-append
646 # - force-new
647 coreconfigitem(
648 b'devel',
649 b'dirstate.v2.data_update_mode',
650 default="auto",
651 )
643 652 coreconfigitem(
644 653 b'devel',
645 654 b'disableloaddefaultcerts',
646 655 default=False,
647 656 )
648 657 coreconfigitem(
649 658 b'devel',
650 659 b'warn-empty-changegroup',
651 660 default=False,
652 661 )
653 662 coreconfigitem(
654 663 b'devel',
655 664 b'legacy.exchange',
656 665 default=list,
657 666 )
658 667 # When True, revlogs use a special reference version of the nodemap, that is not
659 668 # performant but is "known" to behave properly.
660 669 coreconfigitem(
661 670 b'devel',
662 671 b'persistent-nodemap',
663 672 default=False,
664 673 )
665 674 coreconfigitem(
666 675 b'devel',
667 676 b'servercafile',
668 677 default=b'',
669 678 )
670 679 coreconfigitem(
671 680 b'devel',
672 681 b'serverexactprotocol',
673 682 default=b'',
674 683 )
675 684 coreconfigitem(
676 685 b'devel',
677 686 b'serverrequirecert',
678 687 default=False,
679 688 )
680 689 # Makes the status algorithm wait for the existence of this file
681 690 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
682 691 # seconds) before taking the lock and writing the dirstate.
683 692 # Status signals that it's ready to wait by creating a file
684 693 # with the same name + `.waiting`.
685 694 # Useful when testing race conditions.
686 695 coreconfigitem(
687 696 b'devel',
688 697 b'sync.status.pre-dirstate-write-file',
689 698 default=None,
690 699 )
691 700 coreconfigitem(
692 701 b'devel',
693 702 b'sync.status.pre-dirstate-write-file-timeout',
694 703 default=2,
695 704 )
696 705 coreconfigitem(
697 706 b'devel',
698 707 b'strip-obsmarkers',
699 708 default=True,
700 709 )
701 710 coreconfigitem(
702 711 b'devel',
703 712 b'warn-config',
704 713 default=None,
705 714 )
706 715 coreconfigitem(
707 716 b'devel',
708 717 b'warn-config-default',
709 718 default=None,
710 719 )
711 720 coreconfigitem(
712 721 b'devel',
713 722 b'user.obsmarker',
714 723 default=None,
715 724 )
716 725 coreconfigitem(
717 726 b'devel',
718 727 b'warn-config-unknown',
719 728 default=None,
720 729 )
721 730 coreconfigitem(
722 731 b'devel',
723 732 b'debug.copies',
724 733 default=False,
725 734 )
726 735 coreconfigitem(
727 736 b'devel',
728 737 b'copy-tracing.multi-thread',
729 738 default=True,
730 739 )
731 740 coreconfigitem(
732 741 b'devel',
733 742 b'debug.extensions',
734 743 default=False,
735 744 )
736 745 coreconfigitem(
737 746 b'devel',
738 747 b'debug.repo-filters',
739 748 default=False,
740 749 )
741 750 coreconfigitem(
742 751 b'devel',
743 752 b'debug.peer-request',
744 753 default=False,
745 754 )
746 755 # If discovery.exchange-heads is False, the discovery will not start with
747 756 # remote head fetching and local head querying.
748 757 coreconfigitem(
749 758 b'devel',
750 759 b'discovery.exchange-heads',
751 760 default=True,
752 761 )
753 762 # If discovery.grow-sample is False, the sample size used in set discovery will
754 763 # not be increased through the process
755 764 coreconfigitem(
756 765 b'devel',
757 766 b'discovery.grow-sample',
758 767 default=True,
759 768 )
760 769 # When discovery.grow-sample.dynamic is True, the default, the sample size is
761 770 # adapted to the shape of the undecided set (it is set to the max of:
762 771 # <target-size>, len(roots(undecided)), len(heads(undecided)
763 772 coreconfigitem(
764 773 b'devel',
765 774 b'discovery.grow-sample.dynamic',
766 775 default=True,
767 776 )
768 777 # discovery.grow-sample.rate control the rate at which the sample grow
769 778 coreconfigitem(
770 779 b'devel',
771 780 b'discovery.grow-sample.rate',
772 781 default=1.05,
773 782 )
774 783 # If discovery.randomize is False, random sampling during discovery are
775 784 # deterministic. It is meant for integration tests.
776 785 coreconfigitem(
777 786 b'devel',
778 787 b'discovery.randomize',
779 788 default=True,
780 789 )
781 790 # Control the initial size of the discovery sample
782 791 coreconfigitem(
783 792 b'devel',
784 793 b'discovery.sample-size',
785 794 default=200,
786 795 )
787 796 # Control the initial size of the discovery for initial change
788 797 coreconfigitem(
789 798 b'devel',
790 799 b'discovery.sample-size.initial',
791 800 default=100,
792 801 )
793 802 _registerdiffopts(section=b'diff')
794 803 coreconfigitem(
795 804 b'diff',
796 805 b'merge',
797 806 default=False,
798 807 experimental=True,
799 808 )
800 809 coreconfigitem(
801 810 b'email',
802 811 b'bcc',
803 812 default=None,
804 813 )
805 814 coreconfigitem(
806 815 b'email',
807 816 b'cc',
808 817 default=None,
809 818 )
810 819 coreconfigitem(
811 820 b'email',
812 821 b'charsets',
813 822 default=list,
814 823 )
815 824 coreconfigitem(
816 825 b'email',
817 826 b'from',
818 827 default=None,
819 828 )
820 829 coreconfigitem(
821 830 b'email',
822 831 b'method',
823 832 default=b'smtp',
824 833 )
825 834 coreconfigitem(
826 835 b'email',
827 836 b'reply-to',
828 837 default=None,
829 838 )
830 839 coreconfigitem(
831 840 b'email',
832 841 b'to',
833 842 default=None,
834 843 )
835 844 coreconfigitem(
836 845 b'experimental',
837 846 b'archivemetatemplate',
838 847 default=dynamicdefault,
839 848 )
840 849 coreconfigitem(
841 850 b'experimental',
842 851 b'auto-publish',
843 852 default=b'publish',
844 853 )
845 854 coreconfigitem(
846 855 b'experimental',
847 856 b'bundle-phases',
848 857 default=False,
849 858 )
850 859 coreconfigitem(
851 860 b'experimental',
852 861 b'bundle2-advertise',
853 862 default=True,
854 863 )
855 864 coreconfigitem(
856 865 b'experimental',
857 866 b'bundle2-output-capture',
858 867 default=False,
859 868 )
860 869 coreconfigitem(
861 870 b'experimental',
862 871 b'bundle2.pushback',
863 872 default=False,
864 873 )
865 874 coreconfigitem(
866 875 b'experimental',
867 876 b'bundle2lazylocking',
868 877 default=False,
869 878 )
870 879 coreconfigitem(
871 880 b'experimental',
872 881 b'bundlecomplevel',
873 882 default=None,
874 883 )
875 884 coreconfigitem(
876 885 b'experimental',
877 886 b'bundlecomplevel.bzip2',
878 887 default=None,
879 888 )
880 889 coreconfigitem(
881 890 b'experimental',
882 891 b'bundlecomplevel.gzip',
883 892 default=None,
884 893 )
885 894 coreconfigitem(
886 895 b'experimental',
887 896 b'bundlecomplevel.none',
888 897 default=None,
889 898 )
890 899 coreconfigitem(
891 900 b'experimental',
892 901 b'bundlecomplevel.zstd',
893 902 default=None,
894 903 )
895 904 coreconfigitem(
896 905 b'experimental',
897 906 b'bundlecompthreads',
898 907 default=None,
899 908 )
900 909 coreconfigitem(
901 910 b'experimental',
902 911 b'bundlecompthreads.bzip2',
903 912 default=None,
904 913 )
905 914 coreconfigitem(
906 915 b'experimental',
907 916 b'bundlecompthreads.gzip',
908 917 default=None,
909 918 )
910 919 coreconfigitem(
911 920 b'experimental',
912 921 b'bundlecompthreads.none',
913 922 default=None,
914 923 )
915 924 coreconfigitem(
916 925 b'experimental',
917 926 b'bundlecompthreads.zstd',
918 927 default=None,
919 928 )
920 929 coreconfigitem(
921 930 b'experimental',
922 931 b'changegroup3',
923 932 default=False,
924 933 )
925 934 coreconfigitem(
926 935 b'experimental',
927 936 b'changegroup4',
928 937 default=False,
929 938 )
930 939 coreconfigitem(
931 940 b'experimental',
932 941 b'cleanup-as-archived',
933 942 default=False,
934 943 )
935 944 coreconfigitem(
936 945 b'experimental',
937 946 b'clientcompressionengines',
938 947 default=list,
939 948 )
940 949 coreconfigitem(
941 950 b'experimental',
942 951 b'copytrace',
943 952 default=b'on',
944 953 )
945 954 coreconfigitem(
946 955 b'experimental',
947 956 b'copytrace.movecandidateslimit',
948 957 default=100,
949 958 )
950 959 coreconfigitem(
951 960 b'experimental',
952 961 b'copytrace.sourcecommitlimit',
953 962 default=100,
954 963 )
955 964 coreconfigitem(
956 965 b'experimental',
957 966 b'copies.read-from',
958 967 default=b"filelog-only",
959 968 )
960 969 coreconfigitem(
961 970 b'experimental',
962 971 b'copies.write-to',
963 972 default=b'filelog-only',
964 973 )
965 974 coreconfigitem(
966 975 b'experimental',
967 976 b'crecordtest',
968 977 default=None,
969 978 )
970 979 coreconfigitem(
971 980 b'experimental',
972 981 b'directaccess',
973 982 default=False,
974 983 )
975 984 coreconfigitem(
976 985 b'experimental',
977 986 b'directaccess.revnums',
978 987 default=False,
979 988 )
980 989 coreconfigitem(
981 990 b'experimental',
982 991 b'editortmpinhg',
983 992 default=False,
984 993 )
985 994 coreconfigitem(
986 995 b'experimental',
987 996 b'evolution',
988 997 default=list,
989 998 )
990 999 coreconfigitem(
991 1000 b'experimental',
992 1001 b'evolution.allowdivergence',
993 1002 default=False,
994 1003 alias=[(b'experimental', b'allowdivergence')],
995 1004 )
996 1005 coreconfigitem(
997 1006 b'experimental',
998 1007 b'evolution.allowunstable',
999 1008 default=None,
1000 1009 )
1001 1010 coreconfigitem(
1002 1011 b'experimental',
1003 1012 b'evolution.createmarkers',
1004 1013 default=None,
1005 1014 )
1006 1015 coreconfigitem(
1007 1016 b'experimental',
1008 1017 b'evolution.effect-flags',
1009 1018 default=True,
1010 1019 alias=[(b'experimental', b'effect-flags')],
1011 1020 )
1012 1021 coreconfigitem(
1013 1022 b'experimental',
1014 1023 b'evolution.exchange',
1015 1024 default=None,
1016 1025 )
1017 1026 coreconfigitem(
1018 1027 b'experimental',
1019 1028 b'evolution.bundle-obsmarker',
1020 1029 default=False,
1021 1030 )
1022 1031 coreconfigitem(
1023 1032 b'experimental',
1024 1033 b'evolution.bundle-obsmarker:mandatory',
1025 1034 default=True,
1026 1035 )
1027 1036 coreconfigitem(
1028 1037 b'experimental',
1029 1038 b'log.topo',
1030 1039 default=False,
1031 1040 )
1032 1041 coreconfigitem(
1033 1042 b'experimental',
1034 1043 b'evolution.report-instabilities',
1035 1044 default=True,
1036 1045 )
1037 1046 coreconfigitem(
1038 1047 b'experimental',
1039 1048 b'evolution.track-operation',
1040 1049 default=True,
1041 1050 )
1042 1051 # repo-level config to exclude a revset visibility
1043 1052 #
1044 1053 # The target use case is to use `share` to expose different subset of the same
1045 1054 # repository, especially server side. See also `server.view`.
1046 1055 coreconfigitem(
1047 1056 b'experimental',
1048 1057 b'extra-filter-revs',
1049 1058 default=None,
1050 1059 )
1051 1060 coreconfigitem(
1052 1061 b'experimental',
1053 1062 b'maxdeltachainspan',
1054 1063 default=-1,
1055 1064 )
1056 1065 # tracks files which were undeleted (merge might delete them but we explicitly
1057 1066 # kept/undeleted them) and creates new filenodes for them
1058 1067 coreconfigitem(
1059 1068 b'experimental',
1060 1069 b'merge-track-salvaged',
1061 1070 default=False,
1062 1071 )
1063 1072 coreconfigitem(
1064 1073 b'experimental',
1065 1074 b'mmapindexthreshold',
1066 1075 default=None,
1067 1076 )
1068 1077 coreconfigitem(
1069 1078 b'experimental',
1070 1079 b'narrow',
1071 1080 default=False,
1072 1081 )
1073 1082 coreconfigitem(
1074 1083 b'experimental',
1075 1084 b'nonnormalparanoidcheck',
1076 1085 default=False,
1077 1086 )
1078 1087 coreconfigitem(
1079 1088 b'experimental',
1080 1089 b'exportableenviron',
1081 1090 default=list,
1082 1091 )
1083 1092 coreconfigitem(
1084 1093 b'experimental',
1085 1094 b'extendedheader.index',
1086 1095 default=None,
1087 1096 )
1088 1097 coreconfigitem(
1089 1098 b'experimental',
1090 1099 b'extendedheader.similarity',
1091 1100 default=False,
1092 1101 )
1093 1102 coreconfigitem(
1094 1103 b'experimental',
1095 1104 b'graphshorten',
1096 1105 default=False,
1097 1106 )
1098 1107 coreconfigitem(
1099 1108 b'experimental',
1100 1109 b'graphstyle.parent',
1101 1110 default=dynamicdefault,
1102 1111 )
1103 1112 coreconfigitem(
1104 1113 b'experimental',
1105 1114 b'graphstyle.missing',
1106 1115 default=dynamicdefault,
1107 1116 )
1108 1117 coreconfigitem(
1109 1118 b'experimental',
1110 1119 b'graphstyle.grandparent',
1111 1120 default=dynamicdefault,
1112 1121 )
1113 1122 coreconfigitem(
1114 1123 b'experimental',
1115 1124 b'hook-track-tags',
1116 1125 default=False,
1117 1126 )
1118 1127 coreconfigitem(
1119 1128 b'experimental',
1120 1129 b'httppostargs',
1121 1130 default=False,
1122 1131 )
1123 1132 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 1133 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125 1134
1126 1135 coreconfigitem(
1127 1136 b'experimental',
1128 1137 b'obsmarkers-exchange-debug',
1129 1138 default=False,
1130 1139 )
1131 1140 coreconfigitem(
1132 1141 b'experimental',
1133 1142 b'remotenames',
1134 1143 default=False,
1135 1144 )
1136 1145 coreconfigitem(
1137 1146 b'experimental',
1138 1147 b'removeemptydirs',
1139 1148 default=True,
1140 1149 )
1141 1150 coreconfigitem(
1142 1151 b'experimental',
1143 1152 b'revert.interactive.select-to-keep',
1144 1153 default=False,
1145 1154 )
1146 1155 coreconfigitem(
1147 1156 b'experimental',
1148 1157 b'revisions.prefixhexnode',
1149 1158 default=False,
1150 1159 )
1151 1160 # "out of experimental" todo list.
1152 1161 #
1153 1162 # * include management of a persistent nodemap in the main docket
1154 1163 # * enforce a "no-truncate" policy for mmap safety
1155 1164 # - for censoring operation
1156 1165 # - for stripping operation
1157 1166 # - for rollback operation
1158 1167 # * proper streaming (race free) of the docket file
1159 1168 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 1169 # * Exchange-wise, we will also need to do something more efficient than
1161 1170 # keeping references to the affected revlogs, especially memory-wise when
1162 1171 # rewriting sidedata.
1163 1172 # * introduce a proper solution to reduce the number of filelog related files.
1164 1173 # * use caching for reading sidedata (similar to what we do for data).
1165 1174 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1166 1175 # * Improvement to consider
1167 1176 # - avoid compression header in chunk using the default compression?
1168 1177 # - forbid "inline" compression mode entirely?
1169 1178 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 1179 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 1180 # - keep track of chain base or size (probably not that useful anymore)
1172 1181 coreconfigitem(
1173 1182 b'experimental',
1174 1183 b'revlogv2',
1175 1184 default=None,
1176 1185 )
1177 1186 coreconfigitem(
1178 1187 b'experimental',
1179 1188 b'revisions.disambiguatewithin',
1180 1189 default=None,
1181 1190 )
1182 1191 coreconfigitem(
1183 1192 b'experimental',
1184 1193 b'rust.index',
1185 1194 default=False,
1186 1195 )
1187 1196 coreconfigitem(
1188 1197 b'experimental',
1189 1198 b'server.filesdata.recommended-batch-size',
1190 1199 default=50000,
1191 1200 )
1192 1201 coreconfigitem(
1193 1202 b'experimental',
1194 1203 b'server.manifestdata.recommended-batch-size',
1195 1204 default=100000,
1196 1205 )
1197 1206 coreconfigitem(
1198 1207 b'experimental',
1199 1208 b'server.stream-narrow-clones',
1200 1209 default=False,
1201 1210 )
1202 1211 coreconfigitem(
1203 1212 b'experimental',
1204 1213 b'single-head-per-branch',
1205 1214 default=False,
1206 1215 )
1207 1216 coreconfigitem(
1208 1217 b'experimental',
1209 1218 b'single-head-per-branch:account-closed-heads',
1210 1219 default=False,
1211 1220 )
1212 1221 coreconfigitem(
1213 1222 b'experimental',
1214 1223 b'single-head-per-branch:public-changes-only',
1215 1224 default=False,
1216 1225 )
1217 1226 coreconfigitem(
1218 1227 b'experimental',
1219 1228 b'sparse-read',
1220 1229 default=False,
1221 1230 )
1222 1231 coreconfigitem(
1223 1232 b'experimental',
1224 1233 b'sparse-read.density-threshold',
1225 1234 default=0.50,
1226 1235 )
1227 1236 coreconfigitem(
1228 1237 b'experimental',
1229 1238 b'sparse-read.min-gap-size',
1230 1239 default=b'65K',
1231 1240 )
1232 1241 coreconfigitem(
1233 1242 b'experimental',
1234 1243 b'treemanifest',
1235 1244 default=False,
1236 1245 )
1237 1246 coreconfigitem(
1238 1247 b'experimental',
1239 1248 b'update.atomic-file',
1240 1249 default=False,
1241 1250 )
1242 1251 coreconfigitem(
1243 1252 b'experimental',
1244 1253 b'web.full-garbage-collection-rate',
1245 1254 default=1, # still forcing a full collection on each request
1246 1255 )
1247 1256 coreconfigitem(
1248 1257 b'experimental',
1249 1258 b'worker.wdir-get-thread-safe',
1250 1259 default=False,
1251 1260 )
1252 1261 coreconfigitem(
1253 1262 b'experimental',
1254 1263 b'worker.repository-upgrade',
1255 1264 default=False,
1256 1265 )
1257 1266 coreconfigitem(
1258 1267 b'experimental',
1259 1268 b'xdiff',
1260 1269 default=False,
1261 1270 )
1262 1271 coreconfigitem(
1263 1272 b'extensions',
1264 1273 b'[^:]*',
1265 1274 default=None,
1266 1275 generic=True,
1267 1276 )
1268 1277 coreconfigitem(
1269 1278 b'extensions',
1270 1279 b'[^:]*:required',
1271 1280 default=False,
1272 1281 generic=True,
1273 1282 )
1274 1283 coreconfigitem(
1275 1284 b'extdata',
1276 1285 b'.*',
1277 1286 default=None,
1278 1287 generic=True,
1279 1288 )
1280 1289 coreconfigitem(
1281 1290 b'format',
1282 1291 b'bookmarks-in-store',
1283 1292 default=False,
1284 1293 )
1285 1294 coreconfigitem(
1286 1295 b'format',
1287 1296 b'chunkcachesize',
1288 1297 default=None,
1289 1298 experimental=True,
1290 1299 )
1291 1300 coreconfigitem(
1292 1301 # Enable this dirstate format *when creating a new repository*.
1293 1302 # Which format to use for existing repos is controlled by .hg/requires
1294 1303 b'format',
1295 1304 b'use-dirstate-v2',
1296 1305 default=False,
1297 1306 experimental=True,
1298 1307 alias=[(b'format', b'exp-rc-dirstate-v2')],
1299 1308 )
1300 1309 coreconfigitem(
1301 1310 b'format',
1302 1311 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1303 1312 default=False,
1304 1313 experimental=True,
1305 1314 )
1306 1315 coreconfigitem(
1307 1316 b'format',
1308 1317 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1309 1318 default=False,
1310 1319 experimental=True,
1311 1320 )
1312 1321 coreconfigitem(
1313 1322 b'format',
1314 1323 b'use-dirstate-tracked-hint',
1315 1324 default=False,
1316 1325 experimental=True,
1317 1326 )
1318 1327 coreconfigitem(
1319 1328 b'format',
1320 1329 b'use-dirstate-tracked-hint.version',
1321 1330 default=1,
1322 1331 experimental=True,
1323 1332 )
1324 1333 coreconfigitem(
1325 1334 b'format',
1326 1335 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1327 1336 default=False,
1328 1337 experimental=True,
1329 1338 )
1330 1339 coreconfigitem(
1331 1340 b'format',
1332 1341 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1333 1342 default=False,
1334 1343 experimental=True,
1335 1344 )
1336 1345 coreconfigitem(
1337 1346 b'format',
1338 1347 b'dotencode',
1339 1348 default=True,
1340 1349 )
1341 1350 coreconfigitem(
1342 1351 b'format',
1343 1352 b'generaldelta',
1344 1353 default=False,
1345 1354 experimental=True,
1346 1355 )
1347 1356 coreconfigitem(
1348 1357 b'format',
1349 1358 b'manifestcachesize',
1350 1359 default=None,
1351 1360 experimental=True,
1352 1361 )
1353 1362 coreconfigitem(
1354 1363 b'format',
1355 1364 b'maxchainlen',
1356 1365 default=dynamicdefault,
1357 1366 experimental=True,
1358 1367 )
1359 1368 coreconfigitem(
1360 1369 b'format',
1361 1370 b'obsstore-version',
1362 1371 default=None,
1363 1372 )
1364 1373 coreconfigitem(
1365 1374 b'format',
1366 1375 b'sparse-revlog',
1367 1376 default=True,
1368 1377 )
1369 1378 coreconfigitem(
1370 1379 b'format',
1371 1380 b'revlog-compression',
1372 1381 default=lambda: [b'zstd', b'zlib'],
1373 1382 alias=[(b'experimental', b'format.compression')],
1374 1383 )
1375 1384 # Experimental TODOs:
1376 1385 #
1377 1386 # * Same as for revlogv2 (but for the reduction of the number of files)
1378 1387 # * Actually computing the rank of changesets
1379 1388 # * Improvement to investigate
1380 1389 # - storing .hgtags fnode
1381 1390 # - storing branch related identifier
1382 1391
1383 1392 coreconfigitem(
1384 1393 b'format',
1385 1394 b'exp-use-changelog-v2',
1386 1395 default=None,
1387 1396 experimental=True,
1388 1397 )
1389 1398 coreconfigitem(
1390 1399 b'format',
1391 1400 b'usefncache',
1392 1401 default=True,
1393 1402 )
1394 1403 coreconfigitem(
1395 1404 b'format',
1396 1405 b'usegeneraldelta',
1397 1406 default=True,
1398 1407 )
1399 1408 coreconfigitem(
1400 1409 b'format',
1401 1410 b'usestore',
1402 1411 default=True,
1403 1412 )
1404 1413
1405 1414
1406 1415 def _persistent_nodemap_default():
1407 1416 """compute `use-persistent-nodemap` default value
1408 1417
1409 1418 The feature is disabled unless a fast implementation is available.
1410 1419 """
1411 1420 from . import policy
1412 1421
1413 1422 return policy.importrust('revlog') is not None
1414 1423
1415 1424
1416 1425 coreconfigitem(
1417 1426 b'format',
1418 1427 b'use-persistent-nodemap',
1419 1428 default=_persistent_nodemap_default,
1420 1429 )
1421 1430 coreconfigitem(
1422 1431 b'format',
1423 1432 b'exp-use-copies-side-data-changeset',
1424 1433 default=False,
1425 1434 experimental=True,
1426 1435 )
1427 1436 coreconfigitem(
1428 1437 b'format',
1429 1438 b'use-share-safe',
1430 1439 default=True,
1431 1440 )
1432 1441 coreconfigitem(
1433 1442 b'format',
1434 1443 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1435 1444 default=False,
1436 1445 experimental=True,
1437 1446 )
1438 1447 coreconfigitem(
1439 1448 b'format',
1440 1449 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1441 1450 default=False,
1442 1451 experimental=True,
1443 1452 )
1444 1453
1445 1454 # Moving this on by default means we are confident about the scaling of phases.
1446 1455 # This is not garanteed to be the case at the time this message is written.
1447 1456 coreconfigitem(
1448 1457 b'format',
1449 1458 b'use-internal-phase',
1450 1459 default=False,
1451 1460 experimental=True,
1452 1461 )
1453 1462 # The interaction between the archived phase and obsolescence markers needs to
1454 1463 # be sorted out before wider usage of this are to be considered.
1455 1464 #
1456 1465 # At the time this message is written, behavior when archiving obsolete
1457 1466 # changeset differ significantly from stripping. As part of stripping, we also
1458 1467 # remove the obsolescence marker associated to the stripped changesets,
1459 1468 # revealing the precedecessors changesets when applicable. When archiving, we
1460 1469 # don't touch the obsolescence markers, keeping everything hidden. This can
1461 1470 # result in quite confusing situation for people combining exchanging draft
1462 1471 # with the archived phases. As some markers needed by others may be skipped
1463 1472 # during exchange.
1464 1473 coreconfigitem(
1465 1474 b'format',
1466 1475 b'exp-archived-phase',
1467 1476 default=False,
1468 1477 experimental=True,
1469 1478 )
1470 1479 coreconfigitem(
1471 1480 b'shelve',
1472 1481 b'store',
1473 1482 default=b'internal',
1474 1483 experimental=True,
1475 1484 )
1476 1485 coreconfigitem(
1477 1486 b'fsmonitor',
1478 1487 b'warn_when_unused',
1479 1488 default=True,
1480 1489 )
1481 1490 coreconfigitem(
1482 1491 b'fsmonitor',
1483 1492 b'warn_update_file_count',
1484 1493 default=50000,
1485 1494 )
1486 1495 coreconfigitem(
1487 1496 b'fsmonitor',
1488 1497 b'warn_update_file_count_rust',
1489 1498 default=400000,
1490 1499 )
1491 1500 coreconfigitem(
1492 1501 b'help',
1493 1502 br'hidden-command\..*',
1494 1503 default=False,
1495 1504 generic=True,
1496 1505 )
1497 1506 coreconfigitem(
1498 1507 b'help',
1499 1508 br'hidden-topic\..*',
1500 1509 default=False,
1501 1510 generic=True,
1502 1511 )
1503 1512 coreconfigitem(
1504 1513 b'hooks',
1505 1514 b'[^:]*',
1506 1515 default=dynamicdefault,
1507 1516 generic=True,
1508 1517 )
1509 1518 coreconfigitem(
1510 1519 b'hooks',
1511 1520 b'.*:run-with-plain',
1512 1521 default=True,
1513 1522 generic=True,
1514 1523 )
1515 1524 coreconfigitem(
1516 1525 b'hgweb-paths',
1517 1526 b'.*',
1518 1527 default=list,
1519 1528 generic=True,
1520 1529 )
1521 1530 coreconfigitem(
1522 1531 b'hostfingerprints',
1523 1532 b'.*',
1524 1533 default=list,
1525 1534 generic=True,
1526 1535 )
1527 1536 coreconfigitem(
1528 1537 b'hostsecurity',
1529 1538 b'ciphers',
1530 1539 default=None,
1531 1540 )
1532 1541 coreconfigitem(
1533 1542 b'hostsecurity',
1534 1543 b'minimumprotocol',
1535 1544 default=dynamicdefault,
1536 1545 )
1537 1546 coreconfigitem(
1538 1547 b'hostsecurity',
1539 1548 b'.*:minimumprotocol$',
1540 1549 default=dynamicdefault,
1541 1550 generic=True,
1542 1551 )
1543 1552 coreconfigitem(
1544 1553 b'hostsecurity',
1545 1554 b'.*:ciphers$',
1546 1555 default=dynamicdefault,
1547 1556 generic=True,
1548 1557 )
1549 1558 coreconfigitem(
1550 1559 b'hostsecurity',
1551 1560 b'.*:fingerprints$',
1552 1561 default=list,
1553 1562 generic=True,
1554 1563 )
1555 1564 coreconfigitem(
1556 1565 b'hostsecurity',
1557 1566 b'.*:verifycertsfile$',
1558 1567 default=None,
1559 1568 generic=True,
1560 1569 )
1561 1570
1562 1571 coreconfigitem(
1563 1572 b'http_proxy',
1564 1573 b'always',
1565 1574 default=False,
1566 1575 )
1567 1576 coreconfigitem(
1568 1577 b'http_proxy',
1569 1578 b'host',
1570 1579 default=None,
1571 1580 )
1572 1581 coreconfigitem(
1573 1582 b'http_proxy',
1574 1583 b'no',
1575 1584 default=list,
1576 1585 )
1577 1586 coreconfigitem(
1578 1587 b'http_proxy',
1579 1588 b'passwd',
1580 1589 default=None,
1581 1590 )
1582 1591 coreconfigitem(
1583 1592 b'http_proxy',
1584 1593 b'user',
1585 1594 default=None,
1586 1595 )
1587 1596
1588 1597 coreconfigitem(
1589 1598 b'http',
1590 1599 b'timeout',
1591 1600 default=None,
1592 1601 )
1593 1602
1594 1603 coreconfigitem(
1595 1604 b'logtoprocess',
1596 1605 b'commandexception',
1597 1606 default=None,
1598 1607 )
1599 1608 coreconfigitem(
1600 1609 b'logtoprocess',
1601 1610 b'commandfinish',
1602 1611 default=None,
1603 1612 )
1604 1613 coreconfigitem(
1605 1614 b'logtoprocess',
1606 1615 b'command',
1607 1616 default=None,
1608 1617 )
1609 1618 coreconfigitem(
1610 1619 b'logtoprocess',
1611 1620 b'develwarn',
1612 1621 default=None,
1613 1622 )
1614 1623 coreconfigitem(
1615 1624 b'logtoprocess',
1616 1625 b'uiblocked',
1617 1626 default=None,
1618 1627 )
1619 1628 coreconfigitem(
1620 1629 b'merge',
1621 1630 b'checkunknown',
1622 1631 default=b'abort',
1623 1632 )
1624 1633 coreconfigitem(
1625 1634 b'merge',
1626 1635 b'checkignored',
1627 1636 default=b'abort',
1628 1637 )
1629 1638 coreconfigitem(
1630 1639 b'experimental',
1631 1640 b'merge.checkpathconflicts',
1632 1641 default=False,
1633 1642 )
1634 1643 coreconfigitem(
1635 1644 b'merge',
1636 1645 b'followcopies',
1637 1646 default=True,
1638 1647 )
1639 1648 coreconfigitem(
1640 1649 b'merge',
1641 1650 b'on-failure',
1642 1651 default=b'continue',
1643 1652 )
1644 1653 coreconfigitem(
1645 1654 b'merge',
1646 1655 b'preferancestor',
1647 1656 default=lambda: [b'*'],
1648 1657 experimental=True,
1649 1658 )
1650 1659 coreconfigitem(
1651 1660 b'merge',
1652 1661 b'strict-capability-check',
1653 1662 default=False,
1654 1663 )
1655 1664 coreconfigitem(
1656 1665 b'merge',
1657 1666 b'disable-partial-tools',
1658 1667 default=False,
1659 1668 experimental=True,
1660 1669 )
1661 1670 coreconfigitem(
1662 1671 b'partial-merge-tools',
1663 1672 b'.*',
1664 1673 default=None,
1665 1674 generic=True,
1666 1675 experimental=True,
1667 1676 )
1668 1677 coreconfigitem(
1669 1678 b'partial-merge-tools',
1670 1679 br'.*\.patterns',
1671 1680 default=dynamicdefault,
1672 1681 generic=True,
1673 1682 priority=-1,
1674 1683 experimental=True,
1675 1684 )
1676 1685 coreconfigitem(
1677 1686 b'partial-merge-tools',
1678 1687 br'.*\.executable$',
1679 1688 default=dynamicdefault,
1680 1689 generic=True,
1681 1690 priority=-1,
1682 1691 experimental=True,
1683 1692 )
1684 1693 coreconfigitem(
1685 1694 b'partial-merge-tools',
1686 1695 br'.*\.order',
1687 1696 default=0,
1688 1697 generic=True,
1689 1698 priority=-1,
1690 1699 experimental=True,
1691 1700 )
1692 1701 coreconfigitem(
1693 1702 b'partial-merge-tools',
1694 1703 br'.*\.args',
1695 1704 default=b"$local $base $other",
1696 1705 generic=True,
1697 1706 priority=-1,
1698 1707 experimental=True,
1699 1708 )
1700 1709 coreconfigitem(
1701 1710 b'partial-merge-tools',
1702 1711 br'.*\.disable',
1703 1712 default=False,
1704 1713 generic=True,
1705 1714 priority=-1,
1706 1715 experimental=True,
1707 1716 )
1708 1717 coreconfigitem(
1709 1718 b'merge-tools',
1710 1719 b'.*',
1711 1720 default=None,
1712 1721 generic=True,
1713 1722 )
1714 1723 coreconfigitem(
1715 1724 b'merge-tools',
1716 1725 br'.*\.args$',
1717 1726 default=b"$local $base $other",
1718 1727 generic=True,
1719 1728 priority=-1,
1720 1729 )
1721 1730 coreconfigitem(
1722 1731 b'merge-tools',
1723 1732 br'.*\.binary$',
1724 1733 default=False,
1725 1734 generic=True,
1726 1735 priority=-1,
1727 1736 )
1728 1737 coreconfigitem(
1729 1738 b'merge-tools',
1730 1739 br'.*\.check$',
1731 1740 default=list,
1732 1741 generic=True,
1733 1742 priority=-1,
1734 1743 )
1735 1744 coreconfigitem(
1736 1745 b'merge-tools',
1737 1746 br'.*\.checkchanged$',
1738 1747 default=False,
1739 1748 generic=True,
1740 1749 priority=-1,
1741 1750 )
1742 1751 coreconfigitem(
1743 1752 b'merge-tools',
1744 1753 br'.*\.executable$',
1745 1754 default=dynamicdefault,
1746 1755 generic=True,
1747 1756 priority=-1,
1748 1757 )
1749 1758 coreconfigitem(
1750 1759 b'merge-tools',
1751 1760 br'.*\.fixeol$',
1752 1761 default=False,
1753 1762 generic=True,
1754 1763 priority=-1,
1755 1764 )
1756 1765 coreconfigitem(
1757 1766 b'merge-tools',
1758 1767 br'.*\.gui$',
1759 1768 default=False,
1760 1769 generic=True,
1761 1770 priority=-1,
1762 1771 )
1763 1772 coreconfigitem(
1764 1773 b'merge-tools',
1765 1774 br'.*\.mergemarkers$',
1766 1775 default=b'basic',
1767 1776 generic=True,
1768 1777 priority=-1,
1769 1778 )
1770 1779 coreconfigitem(
1771 1780 b'merge-tools',
1772 1781 br'.*\.mergemarkertemplate$',
1773 1782 default=dynamicdefault, # take from command-templates.mergemarker
1774 1783 generic=True,
1775 1784 priority=-1,
1776 1785 )
1777 1786 coreconfigitem(
1778 1787 b'merge-tools',
1779 1788 br'.*\.priority$',
1780 1789 default=0,
1781 1790 generic=True,
1782 1791 priority=-1,
1783 1792 )
1784 1793 coreconfigitem(
1785 1794 b'merge-tools',
1786 1795 br'.*\.premerge$',
1787 1796 default=dynamicdefault,
1788 1797 generic=True,
1789 1798 priority=-1,
1790 1799 )
1791 1800 coreconfigitem(
1792 1801 b'merge-tools',
1793 1802 br'.*\.symlink$',
1794 1803 default=False,
1795 1804 generic=True,
1796 1805 priority=-1,
1797 1806 )
1798 1807 coreconfigitem(
1799 1808 b'pager',
1800 1809 b'attend-.*',
1801 1810 default=dynamicdefault,
1802 1811 generic=True,
1803 1812 )
1804 1813 coreconfigitem(
1805 1814 b'pager',
1806 1815 b'ignore',
1807 1816 default=list,
1808 1817 )
1809 1818 coreconfigitem(
1810 1819 b'pager',
1811 1820 b'pager',
1812 1821 default=dynamicdefault,
1813 1822 )
1814 1823 coreconfigitem(
1815 1824 b'patch',
1816 1825 b'eol',
1817 1826 default=b'strict',
1818 1827 )
1819 1828 coreconfigitem(
1820 1829 b'patch',
1821 1830 b'fuzz',
1822 1831 default=2,
1823 1832 )
1824 1833 coreconfigitem(
1825 1834 b'paths',
1826 1835 b'default',
1827 1836 default=None,
1828 1837 )
1829 1838 coreconfigitem(
1830 1839 b'paths',
1831 1840 b'default-push',
1832 1841 default=None,
1833 1842 )
1834 1843 coreconfigitem(
1835 1844 b'paths',
1836 1845 b'.*',
1837 1846 default=None,
1838 1847 generic=True,
1839 1848 )
1840 1849 coreconfigitem(
1841 1850 b'paths',
1842 1851 b'.*:bookmarks.mode',
1843 1852 default='default',
1844 1853 generic=True,
1845 1854 )
1846 1855 coreconfigitem(
1847 1856 b'paths',
1848 1857 b'.*:multi-urls',
1849 1858 default=False,
1850 1859 generic=True,
1851 1860 )
1852 1861 coreconfigitem(
1853 1862 b'paths',
1854 1863 b'.*:pushrev',
1855 1864 default=None,
1856 1865 generic=True,
1857 1866 )
1858 1867 coreconfigitem(
1859 1868 b'paths',
1860 1869 b'.*:pushurl',
1861 1870 default=None,
1862 1871 generic=True,
1863 1872 )
1864 1873 coreconfigitem(
1865 1874 b'phases',
1866 1875 b'checksubrepos',
1867 1876 default=b'follow',
1868 1877 )
1869 1878 coreconfigitem(
1870 1879 b'phases',
1871 1880 b'new-commit',
1872 1881 default=b'draft',
1873 1882 )
1874 1883 coreconfigitem(
1875 1884 b'phases',
1876 1885 b'publish',
1877 1886 default=True,
1878 1887 )
1879 1888 coreconfigitem(
1880 1889 b'profiling',
1881 1890 b'enabled',
1882 1891 default=False,
1883 1892 )
1884 1893 coreconfigitem(
1885 1894 b'profiling',
1886 1895 b'format',
1887 1896 default=b'text',
1888 1897 )
1889 1898 coreconfigitem(
1890 1899 b'profiling',
1891 1900 b'freq',
1892 1901 default=1000,
1893 1902 )
1894 1903 coreconfigitem(
1895 1904 b'profiling',
1896 1905 b'limit',
1897 1906 default=30,
1898 1907 )
1899 1908 coreconfigitem(
1900 1909 b'profiling',
1901 1910 b'nested',
1902 1911 default=0,
1903 1912 )
1904 1913 coreconfigitem(
1905 1914 b'profiling',
1906 1915 b'output',
1907 1916 default=None,
1908 1917 )
1909 1918 coreconfigitem(
1910 1919 b'profiling',
1911 1920 b'showmax',
1912 1921 default=0.999,
1913 1922 )
1914 1923 coreconfigitem(
1915 1924 b'profiling',
1916 1925 b'showmin',
1917 1926 default=dynamicdefault,
1918 1927 )
1919 1928 coreconfigitem(
1920 1929 b'profiling',
1921 1930 b'showtime',
1922 1931 default=True,
1923 1932 )
1924 1933 coreconfigitem(
1925 1934 b'profiling',
1926 1935 b'sort',
1927 1936 default=b'inlinetime',
1928 1937 )
1929 1938 coreconfigitem(
1930 1939 b'profiling',
1931 1940 b'statformat',
1932 1941 default=b'hotpath',
1933 1942 )
1934 1943 coreconfigitem(
1935 1944 b'profiling',
1936 1945 b'time-track',
1937 1946 default=dynamicdefault,
1938 1947 )
1939 1948 coreconfigitem(
1940 1949 b'profiling',
1941 1950 b'type',
1942 1951 default=b'stat',
1943 1952 )
1944 1953 coreconfigitem(
1945 1954 b'progress',
1946 1955 b'assume-tty',
1947 1956 default=False,
1948 1957 )
1949 1958 coreconfigitem(
1950 1959 b'progress',
1951 1960 b'changedelay',
1952 1961 default=1,
1953 1962 )
1954 1963 coreconfigitem(
1955 1964 b'progress',
1956 1965 b'clear-complete',
1957 1966 default=True,
1958 1967 )
1959 1968 coreconfigitem(
1960 1969 b'progress',
1961 1970 b'debug',
1962 1971 default=False,
1963 1972 )
1964 1973 coreconfigitem(
1965 1974 b'progress',
1966 1975 b'delay',
1967 1976 default=3,
1968 1977 )
1969 1978 coreconfigitem(
1970 1979 b'progress',
1971 1980 b'disable',
1972 1981 default=False,
1973 1982 )
1974 1983 coreconfigitem(
1975 1984 b'progress',
1976 1985 b'estimateinterval',
1977 1986 default=60.0,
1978 1987 )
1979 1988 coreconfigitem(
1980 1989 b'progress',
1981 1990 b'format',
1982 1991 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1983 1992 )
1984 1993 coreconfigitem(
1985 1994 b'progress',
1986 1995 b'refresh',
1987 1996 default=0.1,
1988 1997 )
1989 1998 coreconfigitem(
1990 1999 b'progress',
1991 2000 b'width',
1992 2001 default=dynamicdefault,
1993 2002 )
1994 2003 coreconfigitem(
1995 2004 b'pull',
1996 2005 b'confirm',
1997 2006 default=False,
1998 2007 )
1999 2008 coreconfigitem(
2000 2009 b'push',
2001 2010 b'pushvars.server',
2002 2011 default=False,
2003 2012 )
2004 2013 coreconfigitem(
2005 2014 b'rewrite',
2006 2015 b'backup-bundle',
2007 2016 default=True,
2008 2017 alias=[(b'ui', b'history-editing-backup')],
2009 2018 )
2010 2019 coreconfigitem(
2011 2020 b'rewrite',
2012 2021 b'update-timestamp',
2013 2022 default=False,
2014 2023 )
2015 2024 coreconfigitem(
2016 2025 b'rewrite',
2017 2026 b'empty-successor',
2018 2027 default=b'skip',
2019 2028 experimental=True,
2020 2029 )
2021 2030 # experimental as long as format.use-dirstate-v2 is.
2022 2031 coreconfigitem(
2023 2032 b'storage',
2024 2033 b'dirstate-v2.slow-path',
2025 2034 default=b"abort",
2026 2035 experimental=True,
2027 2036 )
2028 2037 coreconfigitem(
2029 2038 b'storage',
2030 2039 b'new-repo-backend',
2031 2040 default=b'revlogv1',
2032 2041 experimental=True,
2033 2042 )
2034 2043 coreconfigitem(
2035 2044 b'storage',
2036 2045 b'revlog.optimize-delta-parent-choice',
2037 2046 default=True,
2038 2047 alias=[(b'format', b'aggressivemergedeltas')],
2039 2048 )
2040 2049 coreconfigitem(
2041 2050 b'storage',
2042 2051 b'revlog.issue6528.fix-incoming',
2043 2052 default=True,
2044 2053 )
2045 2054 # experimental as long as rust is experimental (or a C version is implemented)
2046 2055 coreconfigitem(
2047 2056 b'storage',
2048 2057 b'revlog.persistent-nodemap.mmap',
2049 2058 default=True,
2050 2059 )
2051 2060 # experimental as long as format.use-persistent-nodemap is.
2052 2061 coreconfigitem(
2053 2062 b'storage',
2054 2063 b'revlog.persistent-nodemap.slow-path',
2055 2064 default=b"abort",
2056 2065 )
2057 2066
2058 2067 coreconfigitem(
2059 2068 b'storage',
2060 2069 b'revlog.reuse-external-delta',
2061 2070 default=True,
2062 2071 )
2063 2072 coreconfigitem(
2064 2073 b'storage',
2065 2074 b'revlog.reuse-external-delta-parent',
2066 2075 default=None,
2067 2076 )
2068 2077 coreconfigitem(
2069 2078 b'storage',
2070 2079 b'revlog.zlib.level',
2071 2080 default=None,
2072 2081 )
2073 2082 coreconfigitem(
2074 2083 b'storage',
2075 2084 b'revlog.zstd.level',
2076 2085 default=None,
2077 2086 )
2078 2087 coreconfigitem(
2079 2088 b'server',
2080 2089 b'bookmarks-pushkey-compat',
2081 2090 default=True,
2082 2091 )
2083 2092 coreconfigitem(
2084 2093 b'server',
2085 2094 b'bundle1',
2086 2095 default=True,
2087 2096 )
2088 2097 coreconfigitem(
2089 2098 b'server',
2090 2099 b'bundle1gd',
2091 2100 default=None,
2092 2101 )
2093 2102 coreconfigitem(
2094 2103 b'server',
2095 2104 b'bundle1.pull',
2096 2105 default=None,
2097 2106 )
2098 2107 coreconfigitem(
2099 2108 b'server',
2100 2109 b'bundle1gd.pull',
2101 2110 default=None,
2102 2111 )
2103 2112 coreconfigitem(
2104 2113 b'server',
2105 2114 b'bundle1.push',
2106 2115 default=None,
2107 2116 )
2108 2117 coreconfigitem(
2109 2118 b'server',
2110 2119 b'bundle1gd.push',
2111 2120 default=None,
2112 2121 )
2113 2122 coreconfigitem(
2114 2123 b'server',
2115 2124 b'bundle2.stream',
2116 2125 default=True,
2117 2126 alias=[(b'experimental', b'bundle2.stream')],
2118 2127 )
2119 2128 coreconfigitem(
2120 2129 b'server',
2121 2130 b'compressionengines',
2122 2131 default=list,
2123 2132 )
2124 2133 coreconfigitem(
2125 2134 b'server',
2126 2135 b'concurrent-push-mode',
2127 2136 default=b'check-related',
2128 2137 )
2129 2138 coreconfigitem(
2130 2139 b'server',
2131 2140 b'disablefullbundle',
2132 2141 default=False,
2133 2142 )
2134 2143 coreconfigitem(
2135 2144 b'server',
2136 2145 b'maxhttpheaderlen',
2137 2146 default=1024,
2138 2147 )
2139 2148 coreconfigitem(
2140 2149 b'server',
2141 2150 b'pullbundle',
2142 2151 default=False,
2143 2152 )
2144 2153 coreconfigitem(
2145 2154 b'server',
2146 2155 b'preferuncompressed',
2147 2156 default=False,
2148 2157 )
2149 2158 coreconfigitem(
2150 2159 b'server',
2151 2160 b'streamunbundle',
2152 2161 default=False,
2153 2162 )
2154 2163 coreconfigitem(
2155 2164 b'server',
2156 2165 b'uncompressed',
2157 2166 default=True,
2158 2167 )
2159 2168 coreconfigitem(
2160 2169 b'server',
2161 2170 b'uncompressedallowsecret',
2162 2171 default=False,
2163 2172 )
2164 2173 coreconfigitem(
2165 2174 b'server',
2166 2175 b'view',
2167 2176 default=b'served',
2168 2177 )
2169 2178 coreconfigitem(
2170 2179 b'server',
2171 2180 b'validate',
2172 2181 default=False,
2173 2182 )
2174 2183 coreconfigitem(
2175 2184 b'server',
2176 2185 b'zliblevel',
2177 2186 default=-1,
2178 2187 )
2179 2188 coreconfigitem(
2180 2189 b'server',
2181 2190 b'zstdlevel',
2182 2191 default=3,
2183 2192 )
2184 2193 coreconfigitem(
2185 2194 b'share',
2186 2195 b'pool',
2187 2196 default=None,
2188 2197 )
2189 2198 coreconfigitem(
2190 2199 b'share',
2191 2200 b'poolnaming',
2192 2201 default=b'identity',
2193 2202 )
2194 2203 coreconfigitem(
2195 2204 b'share',
2196 2205 b'safe-mismatch.source-not-safe',
2197 2206 default=b'abort',
2198 2207 )
2199 2208 coreconfigitem(
2200 2209 b'share',
2201 2210 b'safe-mismatch.source-safe',
2202 2211 default=b'abort',
2203 2212 )
2204 2213 coreconfigitem(
2205 2214 b'share',
2206 2215 b'safe-mismatch.source-not-safe.warn',
2207 2216 default=True,
2208 2217 )
2209 2218 coreconfigitem(
2210 2219 b'share',
2211 2220 b'safe-mismatch.source-safe.warn',
2212 2221 default=True,
2213 2222 )
2214 2223 coreconfigitem(
2215 2224 b'share',
2216 2225 b'safe-mismatch.source-not-safe:verbose-upgrade',
2217 2226 default=True,
2218 2227 )
2219 2228 coreconfigitem(
2220 2229 b'share',
2221 2230 b'safe-mismatch.source-safe:verbose-upgrade',
2222 2231 default=True,
2223 2232 )
2224 2233 coreconfigitem(
2225 2234 b'shelve',
2226 2235 b'maxbackups',
2227 2236 default=10,
2228 2237 )
2229 2238 coreconfigitem(
2230 2239 b'smtp',
2231 2240 b'host',
2232 2241 default=None,
2233 2242 )
2234 2243 coreconfigitem(
2235 2244 b'smtp',
2236 2245 b'local_hostname',
2237 2246 default=None,
2238 2247 )
2239 2248 coreconfigitem(
2240 2249 b'smtp',
2241 2250 b'password',
2242 2251 default=None,
2243 2252 )
2244 2253 coreconfigitem(
2245 2254 b'smtp',
2246 2255 b'port',
2247 2256 default=dynamicdefault,
2248 2257 )
2249 2258 coreconfigitem(
2250 2259 b'smtp',
2251 2260 b'tls',
2252 2261 default=b'none',
2253 2262 )
2254 2263 coreconfigitem(
2255 2264 b'smtp',
2256 2265 b'username',
2257 2266 default=None,
2258 2267 )
2259 2268 coreconfigitem(
2260 2269 b'sparse',
2261 2270 b'missingwarning',
2262 2271 default=True,
2263 2272 experimental=True,
2264 2273 )
2265 2274 coreconfigitem(
2266 2275 b'subrepos',
2267 2276 b'allowed',
2268 2277 default=dynamicdefault, # to make backporting simpler
2269 2278 )
2270 2279 coreconfigitem(
2271 2280 b'subrepos',
2272 2281 b'hg:allowed',
2273 2282 default=dynamicdefault,
2274 2283 )
2275 2284 coreconfigitem(
2276 2285 b'subrepos',
2277 2286 b'git:allowed',
2278 2287 default=dynamicdefault,
2279 2288 )
2280 2289 coreconfigitem(
2281 2290 b'subrepos',
2282 2291 b'svn:allowed',
2283 2292 default=dynamicdefault,
2284 2293 )
2285 2294 coreconfigitem(
2286 2295 b'templates',
2287 2296 b'.*',
2288 2297 default=None,
2289 2298 generic=True,
2290 2299 )
2291 2300 coreconfigitem(
2292 2301 b'templateconfig',
2293 2302 b'.*',
2294 2303 default=dynamicdefault,
2295 2304 generic=True,
2296 2305 )
2297 2306 coreconfigitem(
2298 2307 b'trusted',
2299 2308 b'groups',
2300 2309 default=list,
2301 2310 )
2302 2311 coreconfigitem(
2303 2312 b'trusted',
2304 2313 b'users',
2305 2314 default=list,
2306 2315 )
2307 2316 coreconfigitem(
2308 2317 b'ui',
2309 2318 b'_usedassubrepo',
2310 2319 default=False,
2311 2320 )
2312 2321 coreconfigitem(
2313 2322 b'ui',
2314 2323 b'allowemptycommit',
2315 2324 default=False,
2316 2325 )
2317 2326 coreconfigitem(
2318 2327 b'ui',
2319 2328 b'archivemeta',
2320 2329 default=True,
2321 2330 )
2322 2331 coreconfigitem(
2323 2332 b'ui',
2324 2333 b'askusername',
2325 2334 default=False,
2326 2335 )
2327 2336 coreconfigitem(
2328 2337 b'ui',
2329 2338 b'available-memory',
2330 2339 default=None,
2331 2340 )
2332 2341
2333 2342 coreconfigitem(
2334 2343 b'ui',
2335 2344 b'clonebundlefallback',
2336 2345 default=False,
2337 2346 )
2338 2347 coreconfigitem(
2339 2348 b'ui',
2340 2349 b'clonebundleprefers',
2341 2350 default=list,
2342 2351 )
2343 2352 coreconfigitem(
2344 2353 b'ui',
2345 2354 b'clonebundles',
2346 2355 default=True,
2347 2356 )
2348 2357 coreconfigitem(
2349 2358 b'ui',
2350 2359 b'color',
2351 2360 default=b'auto',
2352 2361 )
2353 2362 coreconfigitem(
2354 2363 b'ui',
2355 2364 b'commitsubrepos',
2356 2365 default=False,
2357 2366 )
2358 2367 coreconfigitem(
2359 2368 b'ui',
2360 2369 b'debug',
2361 2370 default=False,
2362 2371 )
2363 2372 coreconfigitem(
2364 2373 b'ui',
2365 2374 b'debugger',
2366 2375 default=None,
2367 2376 )
2368 2377 coreconfigitem(
2369 2378 b'ui',
2370 2379 b'editor',
2371 2380 default=dynamicdefault,
2372 2381 )
2373 2382 coreconfigitem(
2374 2383 b'ui',
2375 2384 b'detailed-exit-code',
2376 2385 default=False,
2377 2386 experimental=True,
2378 2387 )
2379 2388 coreconfigitem(
2380 2389 b'ui',
2381 2390 b'fallbackencoding',
2382 2391 default=None,
2383 2392 )
2384 2393 coreconfigitem(
2385 2394 b'ui',
2386 2395 b'forcecwd',
2387 2396 default=None,
2388 2397 )
2389 2398 coreconfigitem(
2390 2399 b'ui',
2391 2400 b'forcemerge',
2392 2401 default=None,
2393 2402 )
2394 2403 coreconfigitem(
2395 2404 b'ui',
2396 2405 b'formatdebug',
2397 2406 default=False,
2398 2407 )
2399 2408 coreconfigitem(
2400 2409 b'ui',
2401 2410 b'formatjson',
2402 2411 default=False,
2403 2412 )
2404 2413 coreconfigitem(
2405 2414 b'ui',
2406 2415 b'formatted',
2407 2416 default=None,
2408 2417 )
2409 2418 coreconfigitem(
2410 2419 b'ui',
2411 2420 b'interactive',
2412 2421 default=None,
2413 2422 )
2414 2423 coreconfigitem(
2415 2424 b'ui',
2416 2425 b'interface',
2417 2426 default=None,
2418 2427 )
2419 2428 coreconfigitem(
2420 2429 b'ui',
2421 2430 b'interface.chunkselector',
2422 2431 default=None,
2423 2432 )
2424 2433 coreconfigitem(
2425 2434 b'ui',
2426 2435 b'large-file-limit',
2427 2436 default=10 * (2 ** 20),
2428 2437 )
2429 2438 coreconfigitem(
2430 2439 b'ui',
2431 2440 b'logblockedtimes',
2432 2441 default=False,
2433 2442 )
2434 2443 coreconfigitem(
2435 2444 b'ui',
2436 2445 b'merge',
2437 2446 default=None,
2438 2447 )
2439 2448 coreconfigitem(
2440 2449 b'ui',
2441 2450 b'mergemarkers',
2442 2451 default=b'basic',
2443 2452 )
2444 2453 coreconfigitem(
2445 2454 b'ui',
2446 2455 b'message-output',
2447 2456 default=b'stdio',
2448 2457 )
2449 2458 coreconfigitem(
2450 2459 b'ui',
2451 2460 b'nontty',
2452 2461 default=False,
2453 2462 )
2454 2463 coreconfigitem(
2455 2464 b'ui',
2456 2465 b'origbackuppath',
2457 2466 default=None,
2458 2467 )
2459 2468 coreconfigitem(
2460 2469 b'ui',
2461 2470 b'paginate',
2462 2471 default=True,
2463 2472 )
2464 2473 coreconfigitem(
2465 2474 b'ui',
2466 2475 b'patch',
2467 2476 default=None,
2468 2477 )
2469 2478 coreconfigitem(
2470 2479 b'ui',
2471 2480 b'portablefilenames',
2472 2481 default=b'warn',
2473 2482 )
2474 2483 coreconfigitem(
2475 2484 b'ui',
2476 2485 b'promptecho',
2477 2486 default=False,
2478 2487 )
2479 2488 coreconfigitem(
2480 2489 b'ui',
2481 2490 b'quiet',
2482 2491 default=False,
2483 2492 )
2484 2493 coreconfigitem(
2485 2494 b'ui',
2486 2495 b'quietbookmarkmove',
2487 2496 default=False,
2488 2497 )
2489 2498 coreconfigitem(
2490 2499 b'ui',
2491 2500 b'relative-paths',
2492 2501 default=b'legacy',
2493 2502 )
2494 2503 coreconfigitem(
2495 2504 b'ui',
2496 2505 b'remotecmd',
2497 2506 default=b'hg',
2498 2507 )
2499 2508 coreconfigitem(
2500 2509 b'ui',
2501 2510 b'report_untrusted',
2502 2511 default=True,
2503 2512 )
2504 2513 coreconfigitem(
2505 2514 b'ui',
2506 2515 b'rollback',
2507 2516 default=True,
2508 2517 )
2509 2518 coreconfigitem(
2510 2519 b'ui',
2511 2520 b'signal-safe-lock',
2512 2521 default=True,
2513 2522 )
2514 2523 coreconfigitem(
2515 2524 b'ui',
2516 2525 b'slash',
2517 2526 default=False,
2518 2527 )
2519 2528 coreconfigitem(
2520 2529 b'ui',
2521 2530 b'ssh',
2522 2531 default=b'ssh',
2523 2532 )
2524 2533 coreconfigitem(
2525 2534 b'ui',
2526 2535 b'ssherrorhint',
2527 2536 default=None,
2528 2537 )
2529 2538 coreconfigitem(
2530 2539 b'ui',
2531 2540 b'statuscopies',
2532 2541 default=False,
2533 2542 )
2534 2543 coreconfigitem(
2535 2544 b'ui',
2536 2545 b'strict',
2537 2546 default=False,
2538 2547 )
2539 2548 coreconfigitem(
2540 2549 b'ui',
2541 2550 b'style',
2542 2551 default=b'',
2543 2552 )
2544 2553 coreconfigitem(
2545 2554 b'ui',
2546 2555 b'supportcontact',
2547 2556 default=None,
2548 2557 )
2549 2558 coreconfigitem(
2550 2559 b'ui',
2551 2560 b'textwidth',
2552 2561 default=78,
2553 2562 )
2554 2563 coreconfigitem(
2555 2564 b'ui',
2556 2565 b'timeout',
2557 2566 default=b'600',
2558 2567 )
2559 2568 coreconfigitem(
2560 2569 b'ui',
2561 2570 b'timeout.warn',
2562 2571 default=0,
2563 2572 )
2564 2573 coreconfigitem(
2565 2574 b'ui',
2566 2575 b'timestamp-output',
2567 2576 default=False,
2568 2577 )
2569 2578 coreconfigitem(
2570 2579 b'ui',
2571 2580 b'traceback',
2572 2581 default=False,
2573 2582 )
2574 2583 coreconfigitem(
2575 2584 b'ui',
2576 2585 b'tweakdefaults',
2577 2586 default=False,
2578 2587 )
2579 2588 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2580 2589 coreconfigitem(
2581 2590 b'ui',
2582 2591 b'verbose',
2583 2592 default=False,
2584 2593 )
2585 2594 coreconfigitem(
2586 2595 b'verify',
2587 2596 b'skipflags',
2588 2597 default=0,
2589 2598 )
2590 2599 coreconfigitem(
2591 2600 b'web',
2592 2601 b'allowbz2',
2593 2602 default=False,
2594 2603 )
2595 2604 coreconfigitem(
2596 2605 b'web',
2597 2606 b'allowgz',
2598 2607 default=False,
2599 2608 )
2600 2609 coreconfigitem(
2601 2610 b'web',
2602 2611 b'allow-pull',
2603 2612 alias=[(b'web', b'allowpull')],
2604 2613 default=True,
2605 2614 )
2606 2615 coreconfigitem(
2607 2616 b'web',
2608 2617 b'allow-push',
2609 2618 alias=[(b'web', b'allow_push')],
2610 2619 default=list,
2611 2620 )
2612 2621 coreconfigitem(
2613 2622 b'web',
2614 2623 b'allowzip',
2615 2624 default=False,
2616 2625 )
2617 2626 coreconfigitem(
2618 2627 b'web',
2619 2628 b'archivesubrepos',
2620 2629 default=False,
2621 2630 )
2622 2631 coreconfigitem(
2623 2632 b'web',
2624 2633 b'cache',
2625 2634 default=True,
2626 2635 )
2627 2636 coreconfigitem(
2628 2637 b'web',
2629 2638 b'comparisoncontext',
2630 2639 default=5,
2631 2640 )
2632 2641 coreconfigitem(
2633 2642 b'web',
2634 2643 b'contact',
2635 2644 default=None,
2636 2645 )
2637 2646 coreconfigitem(
2638 2647 b'web',
2639 2648 b'deny_push',
2640 2649 default=list,
2641 2650 )
2642 2651 coreconfigitem(
2643 2652 b'web',
2644 2653 b'guessmime',
2645 2654 default=False,
2646 2655 )
2647 2656 coreconfigitem(
2648 2657 b'web',
2649 2658 b'hidden',
2650 2659 default=False,
2651 2660 )
2652 2661 coreconfigitem(
2653 2662 b'web',
2654 2663 b'labels',
2655 2664 default=list,
2656 2665 )
2657 2666 coreconfigitem(
2658 2667 b'web',
2659 2668 b'logoimg',
2660 2669 default=b'hglogo.png',
2661 2670 )
2662 2671 coreconfigitem(
2663 2672 b'web',
2664 2673 b'logourl',
2665 2674 default=b'https://mercurial-scm.org/',
2666 2675 )
2667 2676 coreconfigitem(
2668 2677 b'web',
2669 2678 b'accesslog',
2670 2679 default=b'-',
2671 2680 )
2672 2681 coreconfigitem(
2673 2682 b'web',
2674 2683 b'address',
2675 2684 default=b'',
2676 2685 )
2677 2686 coreconfigitem(
2678 2687 b'web',
2679 2688 b'allow-archive',
2680 2689 alias=[(b'web', b'allow_archive')],
2681 2690 default=list,
2682 2691 )
2683 2692 coreconfigitem(
2684 2693 b'web',
2685 2694 b'allow_read',
2686 2695 default=list,
2687 2696 )
2688 2697 coreconfigitem(
2689 2698 b'web',
2690 2699 b'baseurl',
2691 2700 default=None,
2692 2701 )
2693 2702 coreconfigitem(
2694 2703 b'web',
2695 2704 b'cacerts',
2696 2705 default=None,
2697 2706 )
2698 2707 coreconfigitem(
2699 2708 b'web',
2700 2709 b'certificate',
2701 2710 default=None,
2702 2711 )
2703 2712 coreconfigitem(
2704 2713 b'web',
2705 2714 b'collapse',
2706 2715 default=False,
2707 2716 )
2708 2717 coreconfigitem(
2709 2718 b'web',
2710 2719 b'csp',
2711 2720 default=None,
2712 2721 )
2713 2722 coreconfigitem(
2714 2723 b'web',
2715 2724 b'deny_read',
2716 2725 default=list,
2717 2726 )
2718 2727 coreconfigitem(
2719 2728 b'web',
2720 2729 b'descend',
2721 2730 default=True,
2722 2731 )
2723 2732 coreconfigitem(
2724 2733 b'web',
2725 2734 b'description',
2726 2735 default=b"",
2727 2736 )
2728 2737 coreconfigitem(
2729 2738 b'web',
2730 2739 b'encoding',
2731 2740 default=lambda: encoding.encoding,
2732 2741 )
2733 2742 coreconfigitem(
2734 2743 b'web',
2735 2744 b'errorlog',
2736 2745 default=b'-',
2737 2746 )
2738 2747 coreconfigitem(
2739 2748 b'web',
2740 2749 b'ipv6',
2741 2750 default=False,
2742 2751 )
2743 2752 coreconfigitem(
2744 2753 b'web',
2745 2754 b'maxchanges',
2746 2755 default=10,
2747 2756 )
2748 2757 coreconfigitem(
2749 2758 b'web',
2750 2759 b'maxfiles',
2751 2760 default=10,
2752 2761 )
2753 2762 coreconfigitem(
2754 2763 b'web',
2755 2764 b'maxshortchanges',
2756 2765 default=60,
2757 2766 )
2758 2767 coreconfigitem(
2759 2768 b'web',
2760 2769 b'motd',
2761 2770 default=b'',
2762 2771 )
2763 2772 coreconfigitem(
2764 2773 b'web',
2765 2774 b'name',
2766 2775 default=dynamicdefault,
2767 2776 )
2768 2777 coreconfigitem(
2769 2778 b'web',
2770 2779 b'port',
2771 2780 default=8000,
2772 2781 )
2773 2782 coreconfigitem(
2774 2783 b'web',
2775 2784 b'prefix',
2776 2785 default=b'',
2777 2786 )
2778 2787 coreconfigitem(
2779 2788 b'web',
2780 2789 b'push_ssl',
2781 2790 default=True,
2782 2791 )
2783 2792 coreconfigitem(
2784 2793 b'web',
2785 2794 b'refreshinterval',
2786 2795 default=20,
2787 2796 )
2788 2797 coreconfigitem(
2789 2798 b'web',
2790 2799 b'server-header',
2791 2800 default=None,
2792 2801 )
2793 2802 coreconfigitem(
2794 2803 b'web',
2795 2804 b'static',
2796 2805 default=None,
2797 2806 )
2798 2807 coreconfigitem(
2799 2808 b'web',
2800 2809 b'staticurl',
2801 2810 default=None,
2802 2811 )
2803 2812 coreconfigitem(
2804 2813 b'web',
2805 2814 b'stripes',
2806 2815 default=1,
2807 2816 )
2808 2817 coreconfigitem(
2809 2818 b'web',
2810 2819 b'style',
2811 2820 default=b'paper',
2812 2821 )
2813 2822 coreconfigitem(
2814 2823 b'web',
2815 2824 b'templates',
2816 2825 default=None,
2817 2826 )
2818 2827 coreconfigitem(
2819 2828 b'web',
2820 2829 b'view',
2821 2830 default=b'served',
2822 2831 experimental=True,
2823 2832 )
2824 2833 coreconfigitem(
2825 2834 b'worker',
2826 2835 b'backgroundclose',
2827 2836 default=dynamicdefault,
2828 2837 )
2829 2838 # Windows defaults to a limit of 512 open files. A buffer of 128
2830 2839 # should give us enough headway.
2831 2840 coreconfigitem(
2832 2841 b'worker',
2833 2842 b'backgroundclosemaxqueue',
2834 2843 default=384,
2835 2844 )
2836 2845 coreconfigitem(
2837 2846 b'worker',
2838 2847 b'backgroundcloseminfilecount',
2839 2848 default=2048,
2840 2849 )
2841 2850 coreconfigitem(
2842 2851 b'worker',
2843 2852 b'backgroundclosethreadcount',
2844 2853 default=4,
2845 2854 )
2846 2855 coreconfigitem(
2847 2856 b'worker',
2848 2857 b'enabled',
2849 2858 default=True,
2850 2859 )
2851 2860 coreconfigitem(
2852 2861 b'worker',
2853 2862 b'numcpus',
2854 2863 default=None,
2855 2864 )
2856 2865
2857 2866 # Rebase related configuration moved to core because other extension are doing
2858 2867 # strange things. For example, shelve import the extensions to reuse some bit
2859 2868 # without formally loading it.
2860 2869 coreconfigitem(
2861 2870 b'commands',
2862 2871 b'rebase.requiredest',
2863 2872 default=False,
2864 2873 )
2865 2874 coreconfigitem(
2866 2875 b'experimental',
2867 2876 b'rebaseskipobsolete',
2868 2877 default=True,
2869 2878 )
2870 2879 coreconfigitem(
2871 2880 b'rebase',
2872 2881 b'singletransaction',
2873 2882 default=False,
2874 2883 )
2875 2884 coreconfigitem(
2876 2885 b'rebase',
2877 2886 b'experimental.inmemory',
2878 2887 default=False,
2879 2888 )
2880 2889
2881 2890 # This setting controls creation of a rebase_source extra field
2882 2891 # during rebase. When False, no such field is created. This is
2883 2892 # useful eg for incrementally converting changesets and then
2884 2893 # rebasing them onto an existing repo.
2885 2894 # WARNING: this is an advanced setting reserved for people who know
2886 2895 # exactly what they are doing. Misuse of this setting can easily
2887 2896 # result in obsmarker cycles and a vivid headache.
2888 2897 coreconfigitem(
2889 2898 b'rebase',
2890 2899 b'store-source',
2891 2900 default=True,
2892 2901 experimental=True,
2893 2902 )
@@ -1,693 +1,704 b''
1 1 # dirstatemap.py
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6
7 7 from .i18n import _
8 8
9 9 from . import (
10 10 error,
11 11 pathutil,
12 12 policy,
13 13 txnutil,
14 14 util,
15 15 )
16 16
17 17 from .dirstateutils import (
18 18 docket as docketmod,
19 19 v2,
20 20 )
21 21
22 22 parsers = policy.importmod('parsers')
23 23 rustmod = policy.importrust('dirstate')
24 24
25 25 propertycache = util.propertycache
26 26
27 27 if rustmod is None:
28 28 DirstateItem = parsers.DirstateItem
29 29 else:
30 30 DirstateItem = rustmod.DirstateItem
31 31
32 32 rangemask = 0x7FFFFFFF
33 33
34 34 WRITE_MODE_AUTO = 0
35 35 WRITE_MODE_FORCE_NEW = 1
36 WRITE_MODE_FORCE_APPEND = 2
36 37
37 38
38 39 class _dirstatemapcommon:
39 40 """
40 41 Methods that are identical for both implementations of the dirstatemap
41 42 class, with and without Rust extensions enabled.
42 43 """
43 44
44 45 # please pytype
45 46
46 47 _map = None
47 48 copymap = None
48 49
49 50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 51 self._use_dirstate_v2 = use_dirstate_v2
51 52 self._nodeconstants = nodeconstants
52 53 self._ui = ui
53 54 self._opener = opener
54 55 self._root = root
55 56 self._filename = b'dirstate'
56 57 self._nodelen = 20 # Also update Rust code when changing this!
57 58 self._parents = None
58 59 self._dirtyparents = False
59 60 self._docket = None
61 write_mode = ui.config(b"devel", b"dirstate.v2.data_update_mode")
62 if write_mode == b"auto":
63 self._write_mode = WRITE_MODE_AUTO
64 elif write_mode == b"force-append":
65 self._write_mode = WRITE_MODE_FORCE_APPEND
66 elif write_mode == b"force-new":
67 self._write_mode = WRITE_MODE_FORCE_NEW
68 else:
69 # unknown value, fallback to default
70 self._write_mode = WRITE_MODE_AUTO
60 71
61 72 # for consistent view between _pl() and _read() invocations
62 73 self._pendingmode = None
63 74
64 75 def preload(self):
65 76 """Loads the underlying data, if it's not already loaded"""
66 77 self._map
67 78
68 79 def get(self, key, default=None):
69 80 return self._map.get(key, default)
70 81
71 82 def __len__(self):
72 83 return len(self._map)
73 84
74 85 def __iter__(self):
75 86 return iter(self._map)
76 87
77 88 def __contains__(self, key):
78 89 return key in self._map
79 90
80 91 def __getitem__(self, item):
81 92 return self._map[item]
82 93
83 94 ### disk interaction
84 95
85 96 def _opendirstatefile(self):
86 97 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
87 98 if self._pendingmode is not None and self._pendingmode != mode:
88 99 fp.close()
89 100 raise error.Abort(
90 101 _(b'working directory state may be changed parallelly')
91 102 )
92 103 self._pendingmode = mode
93 104 return fp
94 105
95 106 def _readdirstatefile(self, size=-1):
96 107 try:
97 108 with self._opendirstatefile() as fp:
98 109 return fp.read(size)
99 110 except FileNotFoundError:
100 111 # File doesn't exist, so the current state is empty
101 112 return b''
102 113
103 114 @property
104 115 def docket(self):
105 116 if not self._docket:
106 117 if not self._use_dirstate_v2:
107 118 raise error.ProgrammingError(
108 119 b'dirstate only has a docket in v2 format'
109 120 )
110 121 self._docket = docketmod.DirstateDocket.parse(
111 122 self._readdirstatefile(), self._nodeconstants
112 123 )
113 124 return self._docket
114 125
115 126 def write_v2_no_append(self, tr, st, meta, packed):
116 127 old_docket = self.docket
117 128 new_docket = docketmod.DirstateDocket.with_new_uuid(
118 129 self.parents(), len(packed), meta
119 130 )
120 131 if old_docket.uuid == new_docket.uuid:
121 132 raise error.ProgrammingError(b'dirstate docket name collision')
122 133 data_filename = new_docket.data_filename()
123 134 self._opener.write(data_filename, packed)
124 135 # Write the new docket after the new data file has been
125 136 # written. Because `st` was opened with `atomictemp=True`,
126 137 # the actual `.hg/dirstate` file is only affected on close.
127 138 st.write(new_docket.serialize())
128 139 st.close()
129 140 # Remove the old data file after the new docket pointing to
130 141 # the new data file was written.
131 142 if old_docket.uuid:
132 143 data_filename = old_docket.data_filename()
133 144 unlink = lambda _tr=None: self._opener.unlink(data_filename)
134 145 if tr:
135 146 category = b"dirstate-v2-clean-" + old_docket.uuid
136 147 tr.addpostclose(category, unlink)
137 148 else:
138 149 unlink()
139 150 self._docket = new_docket
140 151
141 152 ### reading/setting parents
142 153
143 154 def parents(self):
144 155 if not self._parents:
145 156 if self._use_dirstate_v2:
146 157 self._parents = self.docket.parents
147 158 else:
148 159 read_len = self._nodelen * 2
149 160 st = self._readdirstatefile(read_len)
150 161 l = len(st)
151 162 if l == read_len:
152 163 self._parents = (
153 164 st[: self._nodelen],
154 165 st[self._nodelen : 2 * self._nodelen],
155 166 )
156 167 elif l == 0:
157 168 self._parents = (
158 169 self._nodeconstants.nullid,
159 170 self._nodeconstants.nullid,
160 171 )
161 172 else:
162 173 raise error.Abort(
163 174 _(b'working directory state appears damaged!')
164 175 )
165 176
166 177 return self._parents
167 178
168 179
169 180 class dirstatemap(_dirstatemapcommon):
170 181 """Map encapsulating the dirstate's contents.
171 182
172 183 The dirstate contains the following state:
173 184
174 185 - `identity` is the identity of the dirstate file, which can be used to
175 186 detect when changes have occurred to the dirstate file.
176 187
177 188 - `parents` is a pair containing the parents of the working copy. The
178 189 parents are updated by calling `setparents`.
179 190
180 191 - the state map maps filenames to tuples of (state, mode, size, mtime),
181 192 where state is a single character representing 'normal', 'added',
182 193 'removed', or 'merged'. It is read by treating the dirstate as a
183 194 dict. File state is updated by calling various methods (see each
184 195 documentation for details):
185 196
186 197 - `reset_state`,
187 198 - `set_tracked`
188 199 - `set_untracked`
189 200 - `set_clean`
190 201 - `set_possibly_dirty`
191 202
192 203 - `copymap` maps destination filenames to their source filename.
193 204
194 205 The dirstate also provides the following views onto the state:
195 206
196 207 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
197 208 form that they appear as in the dirstate.
198 209
199 210 - `dirfoldmap` is a dict mapping normalized directory names to the
200 211 denormalized form that they appear as in the dirstate.
201 212 """
202 213
203 214 ### Core data storage and access
204 215
205 216 @propertycache
206 217 def _map(self):
207 218 self._map = {}
208 219 self.read()
209 220 return self._map
210 221
211 222 @propertycache
212 223 def copymap(self):
213 224 self.copymap = {}
214 225 self._map
215 226 return self.copymap
216 227
217 228 def clear(self):
218 229 self._map.clear()
219 230 self.copymap.clear()
220 231 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
221 232 util.clearcachedproperty(self, b"_dirs")
222 233 util.clearcachedproperty(self, b"_alldirs")
223 234 util.clearcachedproperty(self, b"filefoldmap")
224 235 util.clearcachedproperty(self, b"dirfoldmap")
225 236
226 237 def items(self):
227 238 return self._map.items()
228 239
229 240 # forward for python2,3 compat
230 241 iteritems = items
231 242
232 243 def debug_iter(self, all):
233 244 """
234 245 Return an iterator of (filename, state, mode, size, mtime) tuples
235 246
236 247 `all` is unused when Rust is not enabled
237 248 """
238 249 for (filename, item) in self.items():
239 250 yield (filename, item.state, item.mode, item.size, item.mtime)
240 251
241 252 def keys(self):
242 253 return self._map.keys()
243 254
244 255 ### reading/setting parents
245 256
246 257 def setparents(self, p1, p2, fold_p2=False):
247 258 self._parents = (p1, p2)
248 259 self._dirtyparents = True
249 260 copies = {}
250 261 if fold_p2:
251 262 for f, s in self._map.items():
252 263 # Discard "merged" markers when moving away from a merge state
253 264 if s.p2_info:
254 265 source = self.copymap.pop(f, None)
255 266 if source:
256 267 copies[f] = source
257 268 s.drop_merge_data()
258 269 return copies
259 270
260 271 ### disk interaction
261 272
262 273 def read(self):
263 274 # ignore HG_PENDING because identity is used only for writing
264 275 self.identity = util.filestat.frompath(
265 276 self._opener.join(self._filename)
266 277 )
267 278
268 279 if self._use_dirstate_v2:
269 280 if not self.docket.uuid:
270 281 return
271 282 st = self._opener.read(self.docket.data_filename())
272 283 else:
273 284 st = self._readdirstatefile()
274 285
275 286 if not st:
276 287 return
277 288
278 289 # TODO: adjust this estimate for dirstate-v2
279 290 if util.safehasattr(parsers, b'dict_new_presized'):
280 291 # Make an estimate of the number of files in the dirstate based on
281 292 # its size. This trades wasting some memory for avoiding costly
282 293 # resizes. Each entry have a prefix of 17 bytes followed by one or
283 294 # two path names. Studies on various large-scale real-world repositories
284 295 # found 54 bytes a reasonable upper limit for the average path names.
285 296 # Copy entries are ignored for the sake of this estimate.
286 297 self._map = parsers.dict_new_presized(len(st) // 71)
287 298
288 299 # Python's garbage collector triggers a GC each time a certain number
289 300 # of container objects (the number being defined by
290 301 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
291 302 # for each file in the dirstate. The C version then immediately marks
292 303 # them as not to be tracked by the collector. However, this has no
293 304 # effect on when GCs are triggered, only on what objects the GC looks
294 305 # into. This means that O(number of files) GCs are unavoidable.
295 306 # Depending on when in the process's lifetime the dirstate is parsed,
296 307 # this can get very expensive. As a workaround, disable GC while
297 308 # parsing the dirstate.
298 309 #
299 310 # (we cannot decorate the function directly since it is in a C module)
300 311 if self._use_dirstate_v2:
301 312 p = self.docket.parents
302 313 meta = self.docket.tree_metadata
303 314 parse_dirstate = util.nogc(v2.parse_dirstate)
304 315 parse_dirstate(self._map, self.copymap, st, meta)
305 316 else:
306 317 parse_dirstate = util.nogc(parsers.parse_dirstate)
307 318 p = parse_dirstate(self._map, self.copymap, st)
308 319 if not self._dirtyparents:
309 320 self.setparents(*p)
310 321
311 322 # Avoid excess attribute lookups by fast pathing certain checks
312 323 self.__contains__ = self._map.__contains__
313 324 self.__getitem__ = self._map.__getitem__
314 325 self.get = self._map.get
315 326
316 327 def write(self, tr, st):
317 328 if self._use_dirstate_v2:
318 329 packed, meta = v2.pack_dirstate(self._map, self.copymap)
319 330 self.write_v2_no_append(tr, st, meta, packed)
320 331 else:
321 332 packed = parsers.pack_dirstate(
322 333 self._map, self.copymap, self.parents()
323 334 )
324 335 st.write(packed)
325 336 st.close()
326 337 self._dirtyparents = False
327 338
328 339 @propertycache
329 340 def identity(self):
330 341 self._map
331 342 return self.identity
332 343
333 344 ### code related to maintaining and accessing "extra" property
334 345 # (e.g. "has_dir")
335 346
336 347 def _dirs_incr(self, filename, old_entry=None):
337 348 """increment the dirstate counter if applicable"""
338 349 if (
339 350 old_entry is None or old_entry.removed
340 351 ) and "_dirs" in self.__dict__:
341 352 self._dirs.addpath(filename)
342 353 if old_entry is None and "_alldirs" in self.__dict__:
343 354 self._alldirs.addpath(filename)
344 355
345 356 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
346 357 """decrement the dirstate counter if applicable"""
347 358 if old_entry is not None:
348 359 if "_dirs" in self.__dict__ and not old_entry.removed:
349 360 self._dirs.delpath(filename)
350 361 if "_alldirs" in self.__dict__ and not remove_variant:
351 362 self._alldirs.delpath(filename)
352 363 elif remove_variant and "_alldirs" in self.__dict__:
353 364 self._alldirs.addpath(filename)
354 365 if "filefoldmap" in self.__dict__:
355 366 normed = util.normcase(filename)
356 367 self.filefoldmap.pop(normed, None)
357 368
358 369 @propertycache
359 370 def filefoldmap(self):
360 371 """Returns a dictionary mapping normalized case paths to their
361 372 non-normalized versions.
362 373 """
363 374 try:
364 375 makefilefoldmap = parsers.make_file_foldmap
365 376 except AttributeError:
366 377 pass
367 378 else:
368 379 return makefilefoldmap(
369 380 self._map, util.normcasespec, util.normcasefallback
370 381 )
371 382
372 383 f = {}
373 384 normcase = util.normcase
374 385 for name, s in self._map.items():
375 386 if not s.removed:
376 387 f[normcase(name)] = name
377 388 f[b'.'] = b'.' # prevents useless util.fspath() invocation
378 389 return f
379 390
380 391 @propertycache
381 392 def dirfoldmap(self):
382 393 f = {}
383 394 normcase = util.normcase
384 395 for name in self._dirs:
385 396 f[normcase(name)] = name
386 397 return f
387 398
388 399 def hastrackeddir(self, d):
389 400 """
390 401 Returns True if the dirstate contains a tracked (not removed) file
391 402 in this directory.
392 403 """
393 404 return d in self._dirs
394 405
395 406 def hasdir(self, d):
396 407 """
397 408 Returns True if the dirstate contains a file (tracked or removed)
398 409 in this directory.
399 410 """
400 411 return d in self._alldirs
401 412
402 413 @propertycache
403 414 def _dirs(self):
404 415 return pathutil.dirs(self._map, only_tracked=True)
405 416
406 417 @propertycache
407 418 def _alldirs(self):
408 419 return pathutil.dirs(self._map)
409 420
410 421 ### code related to manipulation of entries and copy-sources
411 422
412 423 def reset_state(
413 424 self,
414 425 filename,
415 426 wc_tracked=False,
416 427 p1_tracked=False,
417 428 p2_info=False,
418 429 has_meaningful_mtime=True,
419 430 parentfiledata=None,
420 431 ):
421 432 """Set a entry to a given state, diregarding all previous state
422 433
423 434 This is to be used by the part of the dirstate API dedicated to
424 435 adjusting the dirstate after a update/merge.
425 436
426 437 note: calling this might result to no entry existing at all if the
427 438 dirstate map does not see any point at having one for this file
428 439 anymore.
429 440 """
430 441 # copy information are now outdated
431 442 # (maybe new information should be in directly passed to this function)
432 443 self.copymap.pop(filename, None)
433 444
434 445 if not (p1_tracked or p2_info or wc_tracked):
435 446 old_entry = self._map.get(filename)
436 447 self._drop_entry(filename)
437 448 self._dirs_decr(filename, old_entry=old_entry)
438 449 return
439 450
440 451 old_entry = self._map.get(filename)
441 452 self._dirs_incr(filename, old_entry)
442 453 entry = DirstateItem(
443 454 wc_tracked=wc_tracked,
444 455 p1_tracked=p1_tracked,
445 456 p2_info=p2_info,
446 457 has_meaningful_mtime=has_meaningful_mtime,
447 458 parentfiledata=parentfiledata,
448 459 )
449 460 self._map[filename] = entry
450 461
451 462 def set_tracked(self, filename):
452 463 new = False
453 464 entry = self.get(filename)
454 465 if entry is None:
455 466 self._dirs_incr(filename)
456 467 entry = DirstateItem(
457 468 wc_tracked=True,
458 469 )
459 470
460 471 self._map[filename] = entry
461 472 new = True
462 473 elif not entry.tracked:
463 474 self._dirs_incr(filename, entry)
464 475 entry.set_tracked()
465 476 self._refresh_entry(filename, entry)
466 477 new = True
467 478 else:
468 479 # XXX This is probably overkill for more case, but we need this to
469 480 # fully replace the `normallookup` call with `set_tracked` one.
470 481 # Consider smoothing this in the future.
471 482 entry.set_possibly_dirty()
472 483 self._refresh_entry(filename, entry)
473 484 return new
474 485
475 486 def set_untracked(self, f):
476 487 """Mark a file as no longer tracked in the dirstate map"""
477 488 entry = self.get(f)
478 489 if entry is None:
479 490 return False
480 491 else:
481 492 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
482 493 if not entry.p2_info:
483 494 self.copymap.pop(f, None)
484 495 entry.set_untracked()
485 496 self._refresh_entry(f, entry)
486 497 return True
487 498
488 499 def set_clean(self, filename, mode, size, mtime):
489 500 """mark a file as back to a clean state"""
490 501 entry = self[filename]
491 502 size = size & rangemask
492 503 entry.set_clean(mode, size, mtime)
493 504 self._refresh_entry(filename, entry)
494 505 self.copymap.pop(filename, None)
495 506
496 507 def set_possibly_dirty(self, filename):
497 508 """record that the current state of the file on disk is unknown"""
498 509 entry = self[filename]
499 510 entry.set_possibly_dirty()
500 511 self._refresh_entry(filename, entry)
501 512
502 513 def _refresh_entry(self, f, entry):
503 514 """record updated state of an entry"""
504 515 if not entry.any_tracked:
505 516 self._map.pop(f, None)
506 517
507 518 def _drop_entry(self, f):
508 519 """remove any entry for file f
509 520
510 521 This should also drop associated copy information
511 522
512 523 The fact we actually need to drop it is the responsability of the caller"""
513 524 self._map.pop(f, None)
514 525 self.copymap.pop(f, None)
515 526
516 527
517 528 if rustmod is not None:
518 529
519 530 class dirstatemap(_dirstatemapcommon):
520 531
521 532 ### Core data storage and access
522 533
523 534 @propertycache
524 535 def _map(self):
525 536 """
526 537 Fills the Dirstatemap when called.
527 538 """
528 539 # ignore HG_PENDING because identity is used only for writing
529 540 self.identity = util.filestat.frompath(
530 541 self._opener.join(self._filename)
531 542 )
532 543
533 544 if self._use_dirstate_v2:
534 545 if self.docket.uuid:
535 546 # TODO: use mmap when possible
536 547 data = self._opener.read(self.docket.data_filename())
537 548 else:
538 549 data = b''
539 550 self._map = rustmod.DirstateMap.new_v2(
540 551 data, self.docket.data_size, self.docket.tree_metadata
541 552 )
542 553 parents = self.docket.parents
543 554 else:
544 555 self._map, parents = rustmod.DirstateMap.new_v1(
545 556 self._readdirstatefile()
546 557 )
547 558
548 559 if parents and not self._dirtyparents:
549 560 self.setparents(*parents)
550 561
551 562 self.__contains__ = self._map.__contains__
552 563 self.__getitem__ = self._map.__getitem__
553 564 self.get = self._map.get
554 565 return self._map
555 566
556 567 @property
557 568 def copymap(self):
558 569 return self._map.copymap()
559 570
560 571 def debug_iter(self, all):
561 572 """
562 573 Return an iterator of (filename, state, mode, size, mtime) tuples
563 574
564 575 `all`: also include with `state == b' '` dirstate tree nodes that
565 576 don't have an associated `DirstateItem`.
566 577
567 578 """
568 579 return self._map.debug_iter(all)
569 580
570 581 def clear(self):
571 582 self._map.clear()
572 583 self.setparents(
573 584 self._nodeconstants.nullid, self._nodeconstants.nullid
574 585 )
575 586 util.clearcachedproperty(self, b"_dirs")
576 587 util.clearcachedproperty(self, b"_alldirs")
577 588 util.clearcachedproperty(self, b"dirfoldmap")
578 589
579 590 def items(self):
580 591 return self._map.items()
581 592
582 593 # forward for python2,3 compat
583 594 iteritems = items
584 595
585 596 def keys(self):
586 597 return iter(self._map)
587 598
588 599 ### reading/setting parents
589 600
590 601 def setparents(self, p1, p2, fold_p2=False):
591 602 self._parents = (p1, p2)
592 603 self._dirtyparents = True
593 604 copies = {}
594 605 if fold_p2:
595 606 copies = self._map.setparents_fixup()
596 607 return copies
597 608
598 609 ### disk interaction
599 610
600 611 @propertycache
601 612 def identity(self):
602 613 self._map
603 614 return self.identity
604 615
605 616 def write(self, tr, st):
606 617 if not self._use_dirstate_v2:
607 618 p1, p2 = self.parents()
608 619 packed = self._map.write_v1(p1, p2)
609 620 st.write(packed)
610 621 st.close()
611 622 self._dirtyparents = False
612 623 return
613 624
614 625 # We can only append to an existing data file if there is one
615 write_mode = WRITE_MODE_AUTO
626 write_mode = self._write_mode
616 627 if self.docket.uuid is None:
617 628 write_mode = WRITE_MODE_FORCE_NEW
618 629 packed, meta, append = self._map.write_v2(write_mode)
619 630 if append:
620 631 docket = self.docket
621 632 data_filename = docket.data_filename()
622 633 with self._opener(data_filename, b'r+b') as fp:
623 634 fp.seek(docket.data_size)
624 635 assert fp.tell() == docket.data_size
625 636 written = fp.write(packed)
626 637 if written is not None: # py2 may return None
627 638 assert written == len(packed), (written, len(packed))
628 639 docket.data_size += len(packed)
629 640 docket.parents = self.parents()
630 641 docket.tree_metadata = meta
631 642 st.write(docket.serialize())
632 643 st.close()
633 644 else:
634 645 self.write_v2_no_append(tr, st, meta, packed)
635 646 # Reload from the newly-written file
636 647 util.clearcachedproperty(self, b"_map")
637 648 self._dirtyparents = False
638 649
639 650 ### code related to maintaining and accessing "extra" property
640 651 # (e.g. "has_dir")
641 652
642 653 @propertycache
643 654 def filefoldmap(self):
644 655 """Returns a dictionary mapping normalized case paths to their
645 656 non-normalized versions.
646 657 """
647 658 return self._map.filefoldmapasdict()
648 659
649 660 def hastrackeddir(self, d):
650 661 return self._map.hastrackeddir(d)
651 662
652 663 def hasdir(self, d):
653 664 return self._map.hasdir(d)
654 665
655 666 @propertycache
656 667 def dirfoldmap(self):
657 668 f = {}
658 669 normcase = util.normcase
659 670 for name in self._map.tracked_dirs():
660 671 f[normcase(name)] = name
661 672 return f
662 673
663 674 ### code related to manipulation of entries and copy-sources
664 675
665 676 def set_tracked(self, f):
666 677 return self._map.set_tracked(f)
667 678
668 679 def set_untracked(self, f):
669 680 return self._map.set_untracked(f)
670 681
671 682 def set_clean(self, filename, mode, size, mtime):
672 683 self._map.set_clean(filename, mode, size, mtime)
673 684
674 685 def set_possibly_dirty(self, f):
675 686 self._map.set_possibly_dirty(f)
676 687
677 688 def reset_state(
678 689 self,
679 690 filename,
680 691 wc_tracked=False,
681 692 p1_tracked=False,
682 693 p2_info=False,
683 694 has_meaningful_mtime=True,
684 695 parentfiledata=None,
685 696 ):
686 697 return self._map.reset_state(
687 698 filename,
688 699 wc_tracked,
689 700 p1_tracked,
690 701 p2_info,
691 702 has_meaningful_mtime,
692 703 parentfiledata,
693 704 )
@@ -1,1913 +1,1929 b''
1 1 use bytes_cast::BytesCast;
2 2 use micro_timer::timed;
3 3 use std::borrow::Cow;
4 4 use std::path::PathBuf;
5 5
6 6 use super::on_disk;
7 7 use super::on_disk::DirstateV2ParseError;
8 8 use super::owning::OwningDirstateMap;
9 9 use super::path_with_basename::WithBasename;
10 10 use crate::dirstate::parsers::pack_entry;
11 11 use crate::dirstate::parsers::packed_entry_size;
12 12 use crate::dirstate::parsers::parse_dirstate_entries;
13 13 use crate::dirstate::CopyMapIter;
14 14 use crate::dirstate::DirstateV2Data;
15 15 use crate::dirstate::ParentFileData;
16 16 use crate::dirstate::StateMapIter;
17 17 use crate::dirstate::TruncatedTimestamp;
18 18 use crate::matchers::Matcher;
19 19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 20 use crate::DirstateEntry;
21 21 use crate::DirstateError;
22 22 use crate::DirstateMapError;
23 23 use crate::DirstateParents;
24 24 use crate::DirstateStatus;
25 25 use crate::FastHashbrownMap as FastHashMap;
26 26 use crate::PatternFileWarning;
27 27 use crate::StatusError;
28 28 use crate::StatusOptions;
29 29
30 30 /// Append to an existing data file if the amount of unreachable data (not used
31 31 /// anymore) is less than this fraction of the total amount of existing data.
32 32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
33 33
34 34 #[derive(Debug, PartialEq, Eq)]
35 35 /// Version of the on-disk format
36 36 pub enum DirstateVersion {
37 37 V1,
38 38 V2,
39 39 }
40 40
41 41 #[derive(Debug, PartialEq, Eq)]
42 42 pub enum DirstateMapWriteMode {
43 43 Auto,
44 44 ForceNewDataFile,
45 ForceAppend,
45 46 }
46 47
47 48 #[derive(Debug)]
48 49 pub struct DirstateMap<'on_disk> {
49 50 /// Contents of the `.hg/dirstate` file
50 51 pub(super) on_disk: &'on_disk [u8],
51 52
52 53 pub(super) root: ChildNodes<'on_disk>,
53 54
54 55 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
55 56 pub(super) nodes_with_entry_count: u32,
56 57
57 58 /// Number of nodes anywhere in the tree that have
58 59 /// `.copy_source.is_some()`.
59 60 pub(super) nodes_with_copy_source_count: u32,
60 61
61 62 /// See on_disk::Header
62 63 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
63 64
64 65 /// How many bytes of `on_disk` are not used anymore
65 66 pub(super) unreachable_bytes: u32,
66 67
67 68 /// Size of the data used to first load this `DirstateMap`. Used in case
68 69 /// we need to write some new metadata, but no new data on disk.
69 70 pub(super) old_data_size: usize,
70 71
71 72 pub(super) dirstate_version: DirstateVersion,
73
74 /// Controlled by config option `devel.dirstate.v2.data_update_mode`
75 pub(super) write_mode: DirstateMapWriteMode,
72 76 }
73 77
74 78 /// Using a plain `HgPathBuf` of the full path from the repository root as a
75 79 /// map key would also work: all paths in a given map have the same parent
76 80 /// path, so comparing full paths gives the same result as comparing base
77 81 /// names. However `HashMap` would waste time always re-hashing the same
78 82 /// string prefix.
79 83 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
80 84
81 85 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
82 86 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
83 87 #[derive(Debug)]
84 88 pub(super) enum BorrowedPath<'tree, 'on_disk> {
85 89 InMemory(&'tree HgPathBuf),
86 90 OnDisk(&'on_disk HgPath),
87 91 }
88 92
89 93 #[derive(Debug)]
90 94 pub(super) enum ChildNodes<'on_disk> {
91 95 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
92 96 OnDisk(&'on_disk [on_disk::Node]),
93 97 }
94 98
95 99 #[derive(Debug)]
96 100 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
97 101 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
98 102 OnDisk(&'on_disk [on_disk::Node]),
99 103 }
100 104
101 105 #[derive(Debug)]
102 106 pub(super) enum NodeRef<'tree, 'on_disk> {
103 107 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
104 108 OnDisk(&'on_disk on_disk::Node),
105 109 }
106 110
107 111 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
108 112 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
109 113 match *self {
110 114 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
111 115 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
112 116 }
113 117 }
114 118 }
115 119
116 120 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
117 121 type Target = HgPath;
118 122
119 123 fn deref(&self) -> &HgPath {
120 124 match *self {
121 125 BorrowedPath::InMemory(in_memory) => in_memory,
122 126 BorrowedPath::OnDisk(on_disk) => on_disk,
123 127 }
124 128 }
125 129 }
126 130
127 131 impl Default for ChildNodes<'_> {
128 132 fn default() -> Self {
129 133 ChildNodes::InMemory(Default::default())
130 134 }
131 135 }
132 136
133 137 impl<'on_disk> ChildNodes<'on_disk> {
134 138 pub(super) fn as_ref<'tree>(
135 139 &'tree self,
136 140 ) -> ChildNodesRef<'tree, 'on_disk> {
137 141 match self {
138 142 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
139 143 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
140 144 }
141 145 }
142 146
143 147 pub(super) fn is_empty(&self) -> bool {
144 148 match self {
145 149 ChildNodes::InMemory(nodes) => nodes.is_empty(),
146 150 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
147 151 }
148 152 }
149 153
150 154 fn make_mut(
151 155 &mut self,
152 156 on_disk: &'on_disk [u8],
153 157 unreachable_bytes: &mut u32,
154 158 ) -> Result<
155 159 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
156 160 DirstateV2ParseError,
157 161 > {
158 162 match self {
159 163 ChildNodes::InMemory(nodes) => Ok(nodes),
160 164 ChildNodes::OnDisk(nodes) => {
161 165 *unreachable_bytes +=
162 166 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
163 167 let nodes = nodes
164 168 .iter()
165 169 .map(|node| {
166 170 Ok((
167 171 node.path(on_disk)?,
168 172 node.to_in_memory_node(on_disk)?,
169 173 ))
170 174 })
171 175 .collect::<Result<_, _>>()?;
172 176 *self = ChildNodes::InMemory(nodes);
173 177 match self {
174 178 ChildNodes::InMemory(nodes) => Ok(nodes),
175 179 ChildNodes::OnDisk(_) => unreachable!(),
176 180 }
177 181 }
178 182 }
179 183 }
180 184 }
181 185
182 186 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
183 187 pub(super) fn get(
184 188 &self,
185 189 base_name: &HgPath,
186 190 on_disk: &'on_disk [u8],
187 191 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
188 192 match self {
189 193 ChildNodesRef::InMemory(nodes) => Ok(nodes
190 194 .get_key_value(base_name)
191 195 .map(|(k, v)| NodeRef::InMemory(k, v))),
192 196 ChildNodesRef::OnDisk(nodes) => {
193 197 let mut parse_result = Ok(());
194 198 let search_result = nodes.binary_search_by(|node| {
195 199 match node.base_name(on_disk) {
196 200 Ok(node_base_name) => node_base_name.cmp(base_name),
197 201 Err(e) => {
198 202 parse_result = Err(e);
199 203 // Dummy comparison result, `search_result` won’t
200 204 // be used since `parse_result` is an error
201 205 std::cmp::Ordering::Equal
202 206 }
203 207 }
204 208 });
205 209 parse_result.map(|()| {
206 210 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
207 211 })
208 212 }
209 213 }
210 214 }
211 215
212 216 /// Iterate in undefined order
213 217 pub(super) fn iter(
214 218 &self,
215 219 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
216 220 match self {
217 221 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
218 222 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
219 223 ),
220 224 ChildNodesRef::OnDisk(nodes) => {
221 225 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
222 226 }
223 227 }
224 228 }
225 229
226 230 /// Iterate in parallel in undefined order
227 231 pub(super) fn par_iter(
228 232 &self,
229 233 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
230 234 {
231 235 use rayon::prelude::*;
232 236 match self {
233 237 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
234 238 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
235 239 ),
236 240 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
237 241 nodes.par_iter().map(NodeRef::OnDisk),
238 242 ),
239 243 }
240 244 }
241 245
242 246 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
243 247 match self {
244 248 ChildNodesRef::InMemory(nodes) => {
245 249 let mut vec: Vec<_> = nodes
246 250 .iter()
247 251 .map(|(k, v)| NodeRef::InMemory(k, v))
248 252 .collect();
249 253 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
250 254 match node {
251 255 NodeRef::InMemory(path, _node) => path.base_name(),
252 256 NodeRef::OnDisk(_) => unreachable!(),
253 257 }
254 258 }
255 259 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
256 260 // value: https://github.com/rust-lang/rust/issues/34162
257 261 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
258 262 vec
259 263 }
260 264 ChildNodesRef::OnDisk(nodes) => {
261 265 // Nodes on disk are already sorted
262 266 nodes.iter().map(NodeRef::OnDisk).collect()
263 267 }
264 268 }
265 269 }
266 270 }
267 271
268 272 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
269 273 pub(super) fn full_path(
270 274 &self,
271 275 on_disk: &'on_disk [u8],
272 276 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
273 277 match self {
274 278 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
275 279 NodeRef::OnDisk(node) => node.full_path(on_disk),
276 280 }
277 281 }
278 282
279 283 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
280 284 /// HgPath>` detached from `'tree`
281 285 pub(super) fn full_path_borrowed(
282 286 &self,
283 287 on_disk: &'on_disk [u8],
284 288 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
285 289 match self {
286 290 NodeRef::InMemory(path, _node) => match path.full_path() {
287 291 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
288 292 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
289 293 },
290 294 NodeRef::OnDisk(node) => {
291 295 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
292 296 }
293 297 }
294 298 }
295 299
296 300 pub(super) fn base_name(
297 301 &self,
298 302 on_disk: &'on_disk [u8],
299 303 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
300 304 match self {
301 305 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
302 306 NodeRef::OnDisk(node) => node.base_name(on_disk),
303 307 }
304 308 }
305 309
306 310 pub(super) fn children(
307 311 &self,
308 312 on_disk: &'on_disk [u8],
309 313 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
310 314 match self {
311 315 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
312 316 NodeRef::OnDisk(node) => {
313 317 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
314 318 }
315 319 }
316 320 }
317 321
318 322 pub(super) fn has_copy_source(&self) -> bool {
319 323 match self {
320 324 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
321 325 NodeRef::OnDisk(node) => node.has_copy_source(),
322 326 }
323 327 }
324 328
325 329 pub(super) fn copy_source(
326 330 &self,
327 331 on_disk: &'on_disk [u8],
328 332 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
329 333 match self {
330 334 NodeRef::InMemory(_path, node) => {
331 335 Ok(node.copy_source.as_ref().map(|s| &**s))
332 336 }
333 337 NodeRef::OnDisk(node) => node.copy_source(on_disk),
334 338 }
335 339 }
336 340 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
337 341 /// HgPath>` detached from `'tree`
338 342 pub(super) fn copy_source_borrowed(
339 343 &self,
340 344 on_disk: &'on_disk [u8],
341 345 ) -> Result<Option<BorrowedPath<'tree, 'on_disk>>, DirstateV2ParseError>
342 346 {
343 347 Ok(match self {
344 348 NodeRef::InMemory(_path, node) => {
345 349 node.copy_source.as_ref().map(|source| match source {
346 350 Cow::Borrowed(on_disk) => BorrowedPath::OnDisk(on_disk),
347 351 Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
348 352 })
349 353 }
350 354 NodeRef::OnDisk(node) => node
351 355 .copy_source(on_disk)?
352 356 .map(|source| BorrowedPath::OnDisk(source)),
353 357 })
354 358 }
355 359
356 360 pub(super) fn entry(
357 361 &self,
358 362 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
359 363 match self {
360 364 NodeRef::InMemory(_path, node) => {
361 365 Ok(node.data.as_entry().copied())
362 366 }
363 367 NodeRef::OnDisk(node) => node.entry(),
364 368 }
365 369 }
366 370
367 371 pub(super) fn cached_directory_mtime(
368 372 &self,
369 373 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
370 374 match self {
371 375 NodeRef::InMemory(_path, node) => Ok(match node.data {
372 376 NodeData::CachedDirectory { mtime } => Some(mtime),
373 377 _ => None,
374 378 }),
375 379 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
376 380 }
377 381 }
378 382
379 383 pub(super) fn descendants_with_entry_count(&self) -> u32 {
380 384 match self {
381 385 NodeRef::InMemory(_path, node) => {
382 386 node.descendants_with_entry_count
383 387 }
384 388 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
385 389 }
386 390 }
387 391
388 392 pub(super) fn tracked_descendants_count(&self) -> u32 {
389 393 match self {
390 394 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
391 395 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
392 396 }
393 397 }
394 398 }
395 399
396 400 /// Represents a file or a directory
397 401 #[derive(Default, Debug)]
398 402 pub(super) struct Node<'on_disk> {
399 403 pub(super) data: NodeData,
400 404
401 405 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
402 406
403 407 pub(super) children: ChildNodes<'on_disk>,
404 408
405 409 /// How many (non-inclusive) descendants of this node have an entry.
406 410 pub(super) descendants_with_entry_count: u32,
407 411
408 412 /// How many (non-inclusive) descendants of this node have an entry whose
409 413 /// state is "tracked".
410 414 pub(super) tracked_descendants_count: u32,
411 415 }
412 416
413 417 #[derive(Debug)]
414 418 pub(super) enum NodeData {
415 419 Entry(DirstateEntry),
416 420 CachedDirectory { mtime: TruncatedTimestamp },
417 421 None,
418 422 }
419 423
420 424 impl Default for NodeData {
421 425 fn default() -> Self {
422 426 NodeData::None
423 427 }
424 428 }
425 429
426 430 impl NodeData {
427 431 fn has_entry(&self) -> bool {
428 432 match self {
429 433 NodeData::Entry(_) => true,
430 434 _ => false,
431 435 }
432 436 }
433 437
434 438 fn as_entry(&self) -> Option<&DirstateEntry> {
435 439 match self {
436 440 NodeData::Entry(entry) => Some(entry),
437 441 _ => None,
438 442 }
439 443 }
440 444
441 445 fn as_entry_mut(&mut self) -> Option<&mut DirstateEntry> {
442 446 match self {
443 447 NodeData::Entry(entry) => Some(entry),
444 448 _ => None,
445 449 }
446 450 }
447 451 }
448 452
449 453 impl<'on_disk> DirstateMap<'on_disk> {
450 454 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
451 455 Self {
452 456 on_disk,
453 457 root: ChildNodes::default(),
454 458 nodes_with_entry_count: 0,
455 459 nodes_with_copy_source_count: 0,
456 460 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
457 461 unreachable_bytes: 0,
458 462 old_data_size: 0,
459 463 dirstate_version: DirstateVersion::V1,
464 write_mode: DirstateMapWriteMode::Auto,
460 465 }
461 466 }
462 467
463 468 #[timed]
464 469 pub fn new_v2(
465 470 on_disk: &'on_disk [u8],
466 471 data_size: usize,
467 472 metadata: &[u8],
468 473 ) -> Result<Self, DirstateError> {
469 474 if let Some(data) = on_disk.get(..data_size) {
470 475 Ok(on_disk::read(data, metadata)?)
471 476 } else {
472 477 Err(DirstateV2ParseError::new("not enough bytes on disk").into())
473 478 }
474 479 }
475 480
476 481 #[timed]
477 482 pub fn new_v1(
478 483 on_disk: &'on_disk [u8],
479 484 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
480 485 let mut map = Self::empty(on_disk);
481 486 if map.on_disk.is_empty() {
482 487 return Ok((map, None));
483 488 }
484 489
485 490 let parents = parse_dirstate_entries(
486 491 map.on_disk,
487 492 |path, entry, copy_source| {
488 493 let tracked = entry.tracked();
489 494 let node = Self::get_or_insert_node_inner(
490 495 map.on_disk,
491 496 &mut map.unreachable_bytes,
492 497 &mut map.root,
493 498 path,
494 499 WithBasename::to_cow_borrowed,
495 500 |ancestor| {
496 501 if tracked {
497 502 ancestor.tracked_descendants_count += 1
498 503 }
499 504 ancestor.descendants_with_entry_count += 1
500 505 },
501 506 )?;
502 507 assert!(
503 508 !node.data.has_entry(),
504 509 "duplicate dirstate entry in read"
505 510 );
506 511 assert!(
507 512 node.copy_source.is_none(),
508 513 "duplicate dirstate entry in read"
509 514 );
510 515 node.data = NodeData::Entry(*entry);
511 516 node.copy_source = copy_source.map(Cow::Borrowed);
512 517 map.nodes_with_entry_count += 1;
513 518 if copy_source.is_some() {
514 519 map.nodes_with_copy_source_count += 1
515 520 }
516 521 Ok(())
517 522 },
518 523 )?;
519 524 let parents = Some(parents.clone());
520 525
521 526 Ok((map, parents))
522 527 }
523 528
524 529 /// Assuming dirstate-v2 format, returns whether the next write should
525 530 /// append to the existing data file that contains `self.on_disk` (true),
526 531 /// or create a new data file from scratch (false).
527 532 pub(super) fn write_should_append(&self) -> bool {
528 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
529 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
533 match self.write_mode {
534 DirstateMapWriteMode::ForceAppend => true,
535 DirstateMapWriteMode::ForceNewDataFile => false,
536 DirstateMapWriteMode::Auto => {
537 let ratio =
538 self.unreachable_bytes as f32 / self.on_disk.len() as f32;
539 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
540 }
541 }
530 542 }
531 543
532 544 fn get_node<'tree>(
533 545 &'tree self,
534 546 path: &HgPath,
535 547 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
536 548 let mut children = self.root.as_ref();
537 549 let mut components = path.components();
538 550 let mut component =
539 551 components.next().expect("expected at least one components");
540 552 loop {
541 553 if let Some(child) = children.get(component, self.on_disk)? {
542 554 if let Some(next_component) = components.next() {
543 555 component = next_component;
544 556 children = child.children(self.on_disk)?;
545 557 } else {
546 558 return Ok(Some(child));
547 559 }
548 560 } else {
549 561 return Ok(None);
550 562 }
551 563 }
552 564 }
553 565
554 566 /// Returns a mutable reference to the node at `path` if it exists
555 567 ///
556 568 /// `each_ancestor` is a callback that is called for each ancestor node
557 569 /// when descending the tree. It is used to keep the different counters
558 570 /// of the `DirstateMap` up-to-date.
559 571 fn get_node_mut<'tree>(
560 572 &'tree mut self,
561 573 path: &HgPath,
562 574 each_ancestor: impl FnMut(&mut Node),
563 575 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
564 576 Self::get_node_mut_inner(
565 577 self.on_disk,
566 578 &mut self.unreachable_bytes,
567 579 &mut self.root,
568 580 path,
569 581 each_ancestor,
570 582 )
571 583 }
572 584
573 585 /// Lower-level version of `get_node_mut`.
574 586 ///
575 587 /// This takes `root` instead of `&mut self` so that callers can mutate
576 588 /// other fields while the returned borrow is still valid.
577 589 ///
578 590 /// `each_ancestor` is a callback that is called for each ancestor node
579 591 /// when descending the tree. It is used to keep the different counters
580 592 /// of the `DirstateMap` up-to-date.
581 593 fn get_node_mut_inner<'tree>(
582 594 on_disk: &'on_disk [u8],
583 595 unreachable_bytes: &mut u32,
584 596 root: &'tree mut ChildNodes<'on_disk>,
585 597 path: &HgPath,
586 598 mut each_ancestor: impl FnMut(&mut Node),
587 599 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
588 600 let mut children = root;
589 601 let mut components = path.components();
590 602 let mut component =
591 603 components.next().expect("expected at least one components");
592 604 loop {
593 605 if let Some(child) = children
594 606 .make_mut(on_disk, unreachable_bytes)?
595 607 .get_mut(component)
596 608 {
597 609 if let Some(next_component) = components.next() {
598 610 each_ancestor(child);
599 611 component = next_component;
600 612 children = &mut child.children;
601 613 } else {
602 614 return Ok(Some(child));
603 615 }
604 616 } else {
605 617 return Ok(None);
606 618 }
607 619 }
608 620 }
609 621
610 622 /// Get a mutable reference to the node at `path`, creating it if it does
611 623 /// not exist.
612 624 ///
613 625 /// `each_ancestor` is a callback that is called for each ancestor node
614 626 /// when descending the tree. It is used to keep the different counters
615 627 /// of the `DirstateMap` up-to-date.
616 628 fn get_or_insert_node<'tree, 'path>(
617 629 &'tree mut self,
618 630 path: &'path HgPath,
619 631 each_ancestor: impl FnMut(&mut Node),
620 632 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
621 633 Self::get_or_insert_node_inner(
622 634 self.on_disk,
623 635 &mut self.unreachable_bytes,
624 636 &mut self.root,
625 637 path,
626 638 WithBasename::to_cow_owned,
627 639 each_ancestor,
628 640 )
629 641 }
630 642
631 643 /// Lower-level version of `get_or_insert_node_inner`, which is used when
632 644 /// parsing disk data to remove allocations for new nodes.
633 645 fn get_or_insert_node_inner<'tree, 'path>(
634 646 on_disk: &'on_disk [u8],
635 647 unreachable_bytes: &mut u32,
636 648 root: &'tree mut ChildNodes<'on_disk>,
637 649 path: &'path HgPath,
638 650 to_cow: impl Fn(
639 651 WithBasename<&'path HgPath>,
640 652 ) -> WithBasename<Cow<'on_disk, HgPath>>,
641 653 mut each_ancestor: impl FnMut(&mut Node),
642 654 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
643 655 let mut child_nodes = root;
644 656 let mut inclusive_ancestor_paths =
645 657 WithBasename::inclusive_ancestors_of(path);
646 658 let mut ancestor_path = inclusive_ancestor_paths
647 659 .next()
648 660 .expect("expected at least one inclusive ancestor");
649 661 loop {
650 662 let (_, child_node) = child_nodes
651 663 .make_mut(on_disk, unreachable_bytes)?
652 664 .raw_entry_mut()
653 665 .from_key(ancestor_path.base_name())
654 666 .or_insert_with(|| (to_cow(ancestor_path), Node::default()));
655 667 if let Some(next) = inclusive_ancestor_paths.next() {
656 668 each_ancestor(child_node);
657 669 ancestor_path = next;
658 670 child_nodes = &mut child_node.children;
659 671 } else {
660 672 return Ok(child_node);
661 673 }
662 674 }
663 675 }
664 676
665 677 fn reset_state(
666 678 &mut self,
667 679 filename: &HgPath,
668 680 old_entry_opt: Option<DirstateEntry>,
669 681 wc_tracked: bool,
670 682 p1_tracked: bool,
671 683 p2_info: bool,
672 684 has_meaningful_mtime: bool,
673 685 parent_file_data_opt: Option<ParentFileData>,
674 686 ) -> Result<(), DirstateError> {
675 687 let (had_entry, was_tracked) = match old_entry_opt {
676 688 Some(old_entry) => (true, old_entry.tracked()),
677 689 None => (false, false),
678 690 };
679 691 let node = self.get_or_insert_node(filename, |ancestor| {
680 692 if !had_entry {
681 693 ancestor.descendants_with_entry_count += 1;
682 694 }
683 695 if was_tracked {
684 696 if !wc_tracked {
685 697 ancestor.tracked_descendants_count = ancestor
686 698 .tracked_descendants_count
687 699 .checked_sub(1)
688 700 .expect("tracked count to be >= 0");
689 701 }
690 702 } else {
691 703 if wc_tracked {
692 704 ancestor.tracked_descendants_count += 1;
693 705 }
694 706 }
695 707 })?;
696 708
697 709 let v2_data = if let Some(parent_file_data) = parent_file_data_opt {
698 710 DirstateV2Data {
699 711 wc_tracked,
700 712 p1_tracked,
701 713 p2_info,
702 714 mode_size: parent_file_data.mode_size,
703 715 mtime: if has_meaningful_mtime {
704 716 parent_file_data.mtime
705 717 } else {
706 718 None
707 719 },
708 720 ..Default::default()
709 721 }
710 722 } else {
711 723 DirstateV2Data {
712 724 wc_tracked,
713 725 p1_tracked,
714 726 p2_info,
715 727 ..Default::default()
716 728 }
717 729 };
718 730 node.data = NodeData::Entry(DirstateEntry::from_v2_data(v2_data));
719 731 if !had_entry {
720 732 self.nodes_with_entry_count += 1;
721 733 }
722 734 Ok(())
723 735 }
724 736
725 737 fn set_tracked(
726 738 &mut self,
727 739 filename: &HgPath,
728 740 old_entry_opt: Option<DirstateEntry>,
729 741 ) -> Result<bool, DirstateV2ParseError> {
730 742 let was_tracked = old_entry_opt.map_or(false, |e| e.tracked());
731 743 let had_entry = old_entry_opt.is_some();
732 744 let tracked_count_increment = if was_tracked { 0 } else { 1 };
733 745 let mut new = false;
734 746
735 747 let node = self.get_or_insert_node(filename, |ancestor| {
736 748 if !had_entry {
737 749 ancestor.descendants_with_entry_count += 1;
738 750 }
739 751
740 752 ancestor.tracked_descendants_count += tracked_count_increment;
741 753 })?;
742 754 if let Some(old_entry) = old_entry_opt {
743 755 let mut e = old_entry.clone();
744 756 if e.tracked() {
745 757 // XXX
746 758 // This is probably overkill for more case, but we need this to
747 759 // fully replace the `normallookup` call with `set_tracked`
748 760 // one. Consider smoothing this in the future.
749 761 e.set_possibly_dirty();
750 762 } else {
751 763 new = true;
752 764 e.set_tracked();
753 765 }
754 766 node.data = NodeData::Entry(e)
755 767 } else {
756 768 node.data = NodeData::Entry(DirstateEntry::new_tracked());
757 769 self.nodes_with_entry_count += 1;
758 770 new = true;
759 771 };
760 772 Ok(new)
761 773 }
762 774
763 775 /// Set a node as untracked in the dirstate.
764 776 ///
765 777 /// It is the responsibility of the caller to remove the copy source and/or
766 778 /// the entry itself if appropriate.
767 779 ///
768 780 /// # Panics
769 781 ///
770 782 /// Panics if the node does not exist.
771 783 fn set_untracked(
772 784 &mut self,
773 785 filename: &HgPath,
774 786 old_entry: DirstateEntry,
775 787 ) -> Result<(), DirstateV2ParseError> {
776 788 let node = self
777 789 .get_node_mut(filename, |ancestor| {
778 790 ancestor.tracked_descendants_count = ancestor
779 791 .tracked_descendants_count
780 792 .checked_sub(1)
781 793 .expect("tracked_descendants_count should be >= 0");
782 794 })?
783 795 .expect("node should exist");
784 796 let mut new_entry = old_entry.clone();
785 797 new_entry.set_untracked();
786 798 node.data = NodeData::Entry(new_entry);
787 799 Ok(())
788 800 }
789 801
790 802 /// Set a node as clean in the dirstate.
791 803 ///
792 804 /// It is the responsibility of the caller to remove the copy source.
793 805 ///
794 806 /// # Panics
795 807 ///
796 808 /// Panics if the node does not exist.
797 809 fn set_clean(
798 810 &mut self,
799 811 filename: &HgPath,
800 812 old_entry: DirstateEntry,
801 813 mode: u32,
802 814 size: u32,
803 815 mtime: TruncatedTimestamp,
804 816 ) -> Result<(), DirstateError> {
805 817 let node = self
806 818 .get_node_mut(filename, |ancestor| {
807 819 if !old_entry.tracked() {
808 820 ancestor.tracked_descendants_count += 1;
809 821 }
810 822 })?
811 823 .expect("node should exist");
812 824 let mut new_entry = old_entry.clone();
813 825 new_entry.set_clean(mode, size, mtime);
814 826 node.data = NodeData::Entry(new_entry);
815 827 Ok(())
816 828 }
817 829
818 830 /// Set a node as possibly dirty in the dirstate.
819 831 ///
820 832 /// # Panics
821 833 ///
822 834 /// Panics if the node does not exist.
823 835 fn set_possibly_dirty(
824 836 &mut self,
825 837 filename: &HgPath,
826 838 ) -> Result<(), DirstateError> {
827 839 let node = self
828 840 .get_node_mut(filename, |_ancestor| {})?
829 841 .expect("node should exist");
830 842 let entry = node.data.as_entry_mut().expect("entry should exist");
831 843 entry.set_possibly_dirty();
832 844 node.data = NodeData::Entry(*entry);
833 845 Ok(())
834 846 }
835 847
836 848 /// Clears the cached mtime for the (potential) folder at `path`.
837 849 pub(super) fn clear_cached_mtime(
838 850 &mut self,
839 851 path: &HgPath,
840 852 ) -> Result<(), DirstateV2ParseError> {
841 853 let node = match self.get_node_mut(path, |_ancestor| {})? {
842 854 Some(node) => node,
843 855 None => return Ok(()),
844 856 };
845 857 if let NodeData::CachedDirectory { .. } = &node.data {
846 858 node.data = NodeData::None
847 859 }
848 860 Ok(())
849 861 }
850 862
851 863 /// Sets the cached mtime for the (potential) folder at `path`.
852 864 pub(super) fn set_cached_mtime(
853 865 &mut self,
854 866 path: &HgPath,
855 867 mtime: TruncatedTimestamp,
856 868 ) -> Result<(), DirstateV2ParseError> {
857 869 let node = match self.get_node_mut(path, |_ancestor| {})? {
858 870 Some(node) => node,
859 871 None => return Ok(()),
860 872 };
861 873 match &node.data {
862 874 NodeData::Entry(_) => {} // Don’t overwrite an entry
863 875 NodeData::CachedDirectory { .. } | NodeData::None => {
864 876 node.data = NodeData::CachedDirectory { mtime }
865 877 }
866 878 }
867 879 Ok(())
868 880 }
869 881
870 882 fn iter_nodes<'tree>(
871 883 &'tree self,
872 884 ) -> impl Iterator<
873 885 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
874 886 > + 'tree {
875 887 // Depth first tree traversal.
876 888 //
877 889 // If we could afford internal iteration and recursion,
878 890 // this would look like:
879 891 //
880 892 // ```
881 893 // fn traverse_children(
882 894 // children: &ChildNodes,
883 895 // each: &mut impl FnMut(&Node),
884 896 // ) {
885 897 // for child in children.values() {
886 898 // traverse_children(&child.children, each);
887 899 // each(child);
888 900 // }
889 901 // }
890 902 // ```
891 903 //
892 904 // However we want an external iterator and therefore can’t use the
893 905 // call stack. Use an explicit stack instead:
894 906 let mut stack = Vec::new();
895 907 let mut iter = self.root.as_ref().iter();
896 908 std::iter::from_fn(move || {
897 909 while let Some(child_node) = iter.next() {
898 910 let children = match child_node.children(self.on_disk) {
899 911 Ok(children) => children,
900 912 Err(error) => return Some(Err(error)),
901 913 };
902 914 // Pseudo-recursion
903 915 let new_iter = children.iter();
904 916 let old_iter = std::mem::replace(&mut iter, new_iter);
905 917 stack.push((child_node, old_iter));
906 918 }
907 919 // Found the end of a `children.iter()` iterator.
908 920 if let Some((child_node, next_iter)) = stack.pop() {
909 921 // "Return" from pseudo-recursion by restoring state from the
910 922 // explicit stack
911 923 iter = next_iter;
912 924
913 925 Some(Ok(child_node))
914 926 } else {
915 927 // Reached the bottom of the stack, we’re done
916 928 None
917 929 }
918 930 })
919 931 }
920 932
921 933 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
922 934 if let Cow::Borrowed(path) = path {
923 935 *unreachable_bytes += path.len() as u32
924 936 }
925 937 }
938
939 pub(crate) fn set_write_mode(&mut self, write_mode: DirstateMapWriteMode) {
940 self.write_mode = write_mode;
941 }
926 942 }
927 943
928 944 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
929 945 ///
930 946 /// The callback is only called for incoming `Ok` values. Errors are passed
931 947 /// through as-is. In order to let it use the `?` operator the callback is
932 948 /// expected to return a `Result` of `Option`, instead of an `Option` of
933 949 /// `Result`.
934 950 fn filter_map_results<'a, I, F, A, B, E>(
935 951 iter: I,
936 952 f: F,
937 953 ) -> impl Iterator<Item = Result<B, E>> + 'a
938 954 where
939 955 I: Iterator<Item = Result<A, E>> + 'a,
940 956 F: Fn(A) -> Result<Option<B>, E> + 'a,
941 957 {
942 958 iter.filter_map(move |result| match result {
943 959 Ok(node) => f(node).transpose(),
944 960 Err(e) => Some(Err(e)),
945 961 })
946 962 }
947 963
948 964 impl OwningDirstateMap {
949 965 pub fn clear(&mut self) {
950 966 self.with_dmap_mut(|map| {
951 967 map.root = Default::default();
952 968 map.nodes_with_entry_count = 0;
953 969 map.nodes_with_copy_source_count = 0;
954 970 });
955 971 }
956 972
957 973 pub fn set_tracked(
958 974 &mut self,
959 975 filename: &HgPath,
960 976 ) -> Result<bool, DirstateV2ParseError> {
961 977 let old_entry_opt = self.get(filename)?;
962 978 self.with_dmap_mut(|map| map.set_tracked(filename, old_entry_opt))
963 979 }
964 980
965 981 pub fn set_untracked(
966 982 &mut self,
967 983 filename: &HgPath,
968 984 ) -> Result<bool, DirstateError> {
969 985 let old_entry_opt = self.get(filename)?;
970 986 match old_entry_opt {
971 987 None => Ok(false),
972 988 Some(old_entry) => {
973 989 if !old_entry.tracked() {
974 990 // `DirstateMap::set_untracked` is not a noop if
975 991 // already not tracked as it will decrement the
976 992 // tracked counters while going down.
977 993 return Ok(true);
978 994 }
979 995 if old_entry.added() {
980 996 // Untracking an "added" entry will just result in a
981 997 // worthless entry (and other parts of the code will
982 998 // complain about it), just drop it entirely.
983 999 self.drop_entry_and_copy_source(filename)?;
984 1000 return Ok(true);
985 1001 }
986 1002 if !old_entry.p2_info() {
987 1003 self.copy_map_remove(filename)?;
988 1004 }
989 1005
990 1006 self.with_dmap_mut(|map| {
991 1007 map.set_untracked(filename, old_entry)?;
992 1008 Ok(true)
993 1009 })
994 1010 }
995 1011 }
996 1012 }
997 1013
998 1014 pub fn set_clean(
999 1015 &mut self,
1000 1016 filename: &HgPath,
1001 1017 mode: u32,
1002 1018 size: u32,
1003 1019 mtime: TruncatedTimestamp,
1004 1020 ) -> Result<(), DirstateError> {
1005 1021 let old_entry = match self.get(filename)? {
1006 1022 None => {
1007 1023 return Err(
1008 1024 DirstateMapError::PathNotFound(filename.into()).into()
1009 1025 )
1010 1026 }
1011 1027 Some(e) => e,
1012 1028 };
1013 1029 self.copy_map_remove(filename)?;
1014 1030 self.with_dmap_mut(|map| {
1015 1031 map.set_clean(filename, old_entry, mode, size, mtime)
1016 1032 })
1017 1033 }
1018 1034
1019 1035 pub fn set_possibly_dirty(
1020 1036 &mut self,
1021 1037 filename: &HgPath,
1022 1038 ) -> Result<(), DirstateError> {
1023 1039 if self.get(filename)?.is_none() {
1024 1040 return Err(DirstateMapError::PathNotFound(filename.into()).into());
1025 1041 }
1026 1042 self.with_dmap_mut(|map| map.set_possibly_dirty(filename))
1027 1043 }
1028 1044
1029 1045 pub fn reset_state(
1030 1046 &mut self,
1031 1047 filename: &HgPath,
1032 1048 wc_tracked: bool,
1033 1049 p1_tracked: bool,
1034 1050 p2_info: bool,
1035 1051 has_meaningful_mtime: bool,
1036 1052 parent_file_data_opt: Option<ParentFileData>,
1037 1053 ) -> Result<(), DirstateError> {
1038 1054 if !(p1_tracked || p2_info || wc_tracked) {
1039 1055 self.drop_entry_and_copy_source(filename)?;
1040 1056 return Ok(());
1041 1057 }
1042 1058 self.copy_map_remove(filename)?;
1043 1059 let old_entry_opt = self.get(filename)?;
1044 1060 self.with_dmap_mut(|map| {
1045 1061 map.reset_state(
1046 1062 filename,
1047 1063 old_entry_opt,
1048 1064 wc_tracked,
1049 1065 p1_tracked,
1050 1066 p2_info,
1051 1067 has_meaningful_mtime,
1052 1068 parent_file_data_opt,
1053 1069 )
1054 1070 })
1055 1071 }
1056 1072
1057 1073 pub fn drop_entry_and_copy_source(
1058 1074 &mut self,
1059 1075 filename: &HgPath,
1060 1076 ) -> Result<(), DirstateError> {
1061 1077 let was_tracked = self.get(filename)?.map_or(false, |e| e.tracked());
1062 1078 struct Dropped {
1063 1079 was_tracked: bool,
1064 1080 had_entry: bool,
1065 1081 had_copy_source: bool,
1066 1082 }
1067 1083
1068 1084 /// If this returns `Ok(Some((dropped, removed)))`, then
1069 1085 ///
1070 1086 /// * `dropped` is about the leaf node that was at `filename`
1071 1087 /// * `removed` is whether this particular level of recursion just
1072 1088 /// removed a node in `nodes`.
1073 1089 fn recur<'on_disk>(
1074 1090 on_disk: &'on_disk [u8],
1075 1091 unreachable_bytes: &mut u32,
1076 1092 nodes: &mut ChildNodes<'on_disk>,
1077 1093 path: &HgPath,
1078 1094 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
1079 1095 let (first_path_component, rest_of_path) =
1080 1096 path.split_first_component();
1081 1097 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
1082 1098 let node = if let Some(node) = nodes.get_mut(first_path_component)
1083 1099 {
1084 1100 node
1085 1101 } else {
1086 1102 return Ok(None);
1087 1103 };
1088 1104 let dropped;
1089 1105 if let Some(rest) = rest_of_path {
1090 1106 if let Some((d, removed)) = recur(
1091 1107 on_disk,
1092 1108 unreachable_bytes,
1093 1109 &mut node.children,
1094 1110 rest,
1095 1111 )? {
1096 1112 dropped = d;
1097 1113 if dropped.had_entry {
1098 1114 node.descendants_with_entry_count = node
1099 1115 .descendants_with_entry_count
1100 1116 .checked_sub(1)
1101 1117 .expect(
1102 1118 "descendants_with_entry_count should be >= 0",
1103 1119 );
1104 1120 }
1105 1121 if dropped.was_tracked {
1106 1122 node.tracked_descendants_count = node
1107 1123 .tracked_descendants_count
1108 1124 .checked_sub(1)
1109 1125 .expect(
1110 1126 "tracked_descendants_count should be >= 0",
1111 1127 );
1112 1128 }
1113 1129
1114 1130 // Directory caches must be invalidated when removing a
1115 1131 // child node
1116 1132 if removed {
1117 1133 if let NodeData::CachedDirectory { .. } = &node.data {
1118 1134 node.data = NodeData::None
1119 1135 }
1120 1136 }
1121 1137 } else {
1122 1138 return Ok(None);
1123 1139 }
1124 1140 } else {
1125 1141 let entry = node.data.as_entry();
1126 1142 let was_tracked = entry.map_or(false, |entry| entry.tracked());
1127 1143 let had_entry = entry.is_some();
1128 1144 if had_entry {
1129 1145 node.data = NodeData::None
1130 1146 }
1131 1147 let mut had_copy_source = false;
1132 1148 if let Some(source) = &node.copy_source {
1133 1149 DirstateMap::count_dropped_path(unreachable_bytes, source);
1134 1150 had_copy_source = true;
1135 1151 node.copy_source = None
1136 1152 }
1137 1153 dropped = Dropped {
1138 1154 was_tracked,
1139 1155 had_entry,
1140 1156 had_copy_source,
1141 1157 };
1142 1158 }
1143 1159 // After recursion, for both leaf (rest_of_path is None) nodes and
1144 1160 // parent nodes, remove a node if it just became empty.
1145 1161 let remove = !node.data.has_entry()
1146 1162 && node.copy_source.is_none()
1147 1163 && node.children.is_empty();
1148 1164 if remove {
1149 1165 let (key, _) =
1150 1166 nodes.remove_entry(first_path_component).unwrap();
1151 1167 DirstateMap::count_dropped_path(
1152 1168 unreachable_bytes,
1153 1169 key.full_path(),
1154 1170 )
1155 1171 }
1156 1172 Ok(Some((dropped, remove)))
1157 1173 }
1158 1174
1159 1175 self.with_dmap_mut(|map| {
1160 1176 if let Some((dropped, _removed)) = recur(
1161 1177 map.on_disk,
1162 1178 &mut map.unreachable_bytes,
1163 1179 &mut map.root,
1164 1180 filename,
1165 1181 )? {
1166 1182 if dropped.had_entry {
1167 1183 map.nodes_with_entry_count = map
1168 1184 .nodes_with_entry_count
1169 1185 .checked_sub(1)
1170 1186 .expect("nodes_with_entry_count should be >= 0");
1171 1187 }
1172 1188 if dropped.had_copy_source {
1173 1189 map.nodes_with_copy_source_count = map
1174 1190 .nodes_with_copy_source_count
1175 1191 .checked_sub(1)
1176 1192 .expect("nodes_with_copy_source_count should be >= 0");
1177 1193 }
1178 1194 } else {
1179 1195 debug_assert!(!was_tracked);
1180 1196 }
1181 1197 Ok(())
1182 1198 })
1183 1199 }
1184 1200
1185 1201 pub fn has_tracked_dir(
1186 1202 &mut self,
1187 1203 directory: &HgPath,
1188 1204 ) -> Result<bool, DirstateError> {
1189 1205 self.with_dmap_mut(|map| {
1190 1206 if let Some(node) = map.get_node(directory)? {
1191 1207 // A node without a `DirstateEntry` was created to hold child
1192 1208 // nodes, and is therefore a directory.
1193 1209 let is_dir = node.entry()?.is_none();
1194 1210 Ok(is_dir && node.tracked_descendants_count() > 0)
1195 1211 } else {
1196 1212 Ok(false)
1197 1213 }
1198 1214 })
1199 1215 }
1200 1216
1201 1217 pub fn has_dir(
1202 1218 &mut self,
1203 1219 directory: &HgPath,
1204 1220 ) -> Result<bool, DirstateError> {
1205 1221 self.with_dmap_mut(|map| {
1206 1222 if let Some(node) = map.get_node(directory)? {
1207 1223 // A node without a `DirstateEntry` was created to hold child
1208 1224 // nodes, and is therefore a directory.
1209 1225 let is_dir = node.entry()?.is_none();
1210 1226 Ok(is_dir && node.descendants_with_entry_count() > 0)
1211 1227 } else {
1212 1228 Ok(false)
1213 1229 }
1214 1230 })
1215 1231 }
1216 1232
1217 1233 #[timed]
1218 1234 pub fn pack_v1(
1219 1235 &self,
1220 1236 parents: DirstateParents,
1221 1237 ) -> Result<Vec<u8>, DirstateError> {
1222 1238 let map = self.get_map();
1223 1239 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1224 1240 // reallocations
1225 1241 let mut size = parents.as_bytes().len();
1226 1242 for node in map.iter_nodes() {
1227 1243 let node = node?;
1228 1244 if node.entry()?.is_some() {
1229 1245 size += packed_entry_size(
1230 1246 node.full_path(map.on_disk)?,
1231 1247 node.copy_source(map.on_disk)?,
1232 1248 );
1233 1249 }
1234 1250 }
1235 1251
1236 1252 let mut packed = Vec::with_capacity(size);
1237 1253 packed.extend(parents.as_bytes());
1238 1254
1239 1255 for node in map.iter_nodes() {
1240 1256 let node = node?;
1241 1257 if let Some(entry) = node.entry()? {
1242 1258 pack_entry(
1243 1259 node.full_path(map.on_disk)?,
1244 1260 &entry,
1245 1261 node.copy_source(map.on_disk)?,
1246 1262 &mut packed,
1247 1263 );
1248 1264 }
1249 1265 }
1250 1266 Ok(packed)
1251 1267 }
1252 1268
1253 1269 /// Returns new data and metadata together with whether that data should be
1254 1270 /// appended to the existing data file whose content is at
1255 1271 /// `map.on_disk` (true), instead of written to a new data file
1256 1272 /// (false), and the previous size of data on disk.
1257 1273 #[timed]
1258 1274 pub fn pack_v2(
1259 1275 &self,
1260 1276 write_mode: DirstateMapWriteMode,
1261 1277 ) -> Result<(Vec<u8>, on_disk::TreeMetadata, bool, usize), DirstateError>
1262 1278 {
1263 1279 let map = self.get_map();
1264 1280 on_disk::write(map, write_mode)
1265 1281 }
1266 1282
1267 1283 /// `callback` allows the caller to process and do something with the
1268 1284 /// results of the status. This is needed to do so efficiently (i.e.
1269 1285 /// without cloning the `DirstateStatus` object with its paths) because
1270 1286 /// we need to borrow from `Self`.
1271 1287 pub fn with_status<R>(
1272 1288 &mut self,
1273 1289 matcher: &(dyn Matcher + Sync),
1274 1290 root_dir: PathBuf,
1275 1291 ignore_files: Vec<PathBuf>,
1276 1292 options: StatusOptions,
1277 1293 callback: impl for<'r> FnOnce(
1278 1294 Result<(DirstateStatus<'r>, Vec<PatternFileWarning>), StatusError>,
1279 1295 ) -> R,
1280 1296 ) -> R {
1281 1297 self.with_dmap_mut(|map| {
1282 1298 callback(super::status::status(
1283 1299 map,
1284 1300 matcher,
1285 1301 root_dir,
1286 1302 ignore_files,
1287 1303 options,
1288 1304 ))
1289 1305 })
1290 1306 }
1291 1307
1292 1308 pub fn copy_map_len(&self) -> usize {
1293 1309 let map = self.get_map();
1294 1310 map.nodes_with_copy_source_count as usize
1295 1311 }
1296 1312
1297 1313 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1298 1314 let map = self.get_map();
1299 1315 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1300 1316 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1301 1317 Some((node.full_path(map.on_disk)?, source))
1302 1318 } else {
1303 1319 None
1304 1320 })
1305 1321 }))
1306 1322 }
1307 1323
1308 1324 pub fn copy_map_contains_key(
1309 1325 &self,
1310 1326 key: &HgPath,
1311 1327 ) -> Result<bool, DirstateV2ParseError> {
1312 1328 let map = self.get_map();
1313 1329 Ok(if let Some(node) = map.get_node(key)? {
1314 1330 node.has_copy_source()
1315 1331 } else {
1316 1332 false
1317 1333 })
1318 1334 }
1319 1335
1320 1336 pub fn copy_map_get(
1321 1337 &self,
1322 1338 key: &HgPath,
1323 1339 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1324 1340 let map = self.get_map();
1325 1341 if let Some(node) = map.get_node(key)? {
1326 1342 if let Some(source) = node.copy_source(map.on_disk)? {
1327 1343 return Ok(Some(source));
1328 1344 }
1329 1345 }
1330 1346 Ok(None)
1331 1347 }
1332 1348
1333 1349 pub fn copy_map_remove(
1334 1350 &mut self,
1335 1351 key: &HgPath,
1336 1352 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1337 1353 self.with_dmap_mut(|map| {
1338 1354 let count = &mut map.nodes_with_copy_source_count;
1339 1355 let unreachable_bytes = &mut map.unreachable_bytes;
1340 1356 Ok(DirstateMap::get_node_mut_inner(
1341 1357 map.on_disk,
1342 1358 unreachable_bytes,
1343 1359 &mut map.root,
1344 1360 key,
1345 1361 |_ancestor| {},
1346 1362 )?
1347 1363 .and_then(|node| {
1348 1364 if let Some(source) = &node.copy_source {
1349 1365 *count = count
1350 1366 .checked_sub(1)
1351 1367 .expect("nodes_with_copy_source_count should be >= 0");
1352 1368 DirstateMap::count_dropped_path(unreachable_bytes, source);
1353 1369 }
1354 1370 node.copy_source.take().map(Cow::into_owned)
1355 1371 }))
1356 1372 })
1357 1373 }
1358 1374
1359 1375 pub fn copy_map_insert(
1360 1376 &mut self,
1361 1377 key: &HgPath,
1362 1378 value: &HgPath,
1363 1379 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1364 1380 self.with_dmap_mut(|map| {
1365 1381 let node = map.get_or_insert_node(&key, |_ancestor| {})?;
1366 1382 let had_copy_source = node.copy_source.is_none();
1367 1383 let old = node
1368 1384 .copy_source
1369 1385 .replace(value.to_owned().into())
1370 1386 .map(Cow::into_owned);
1371 1387 if had_copy_source {
1372 1388 map.nodes_with_copy_source_count += 1
1373 1389 }
1374 1390 Ok(old)
1375 1391 })
1376 1392 }
1377 1393
1378 1394 pub fn len(&self) -> usize {
1379 1395 let map = self.get_map();
1380 1396 map.nodes_with_entry_count as usize
1381 1397 }
1382 1398
1383 1399 pub fn contains_key(
1384 1400 &self,
1385 1401 key: &HgPath,
1386 1402 ) -> Result<bool, DirstateV2ParseError> {
1387 1403 Ok(self.get(key)?.is_some())
1388 1404 }
1389 1405
1390 1406 pub fn get(
1391 1407 &self,
1392 1408 key: &HgPath,
1393 1409 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1394 1410 let map = self.get_map();
1395 1411 Ok(if let Some(node) = map.get_node(key)? {
1396 1412 node.entry()?
1397 1413 } else {
1398 1414 None
1399 1415 })
1400 1416 }
1401 1417
1402 1418 pub fn iter(&self) -> StateMapIter<'_> {
1403 1419 let map = self.get_map();
1404 1420 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1405 1421 Ok(if let Some(entry) = node.entry()? {
1406 1422 Some((node.full_path(map.on_disk)?, entry))
1407 1423 } else {
1408 1424 None
1409 1425 })
1410 1426 }))
1411 1427 }
1412 1428
1413 1429 pub fn iter_tracked_dirs(
1414 1430 &mut self,
1415 1431 ) -> Result<
1416 1432 Box<
1417 1433 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1418 1434 + Send
1419 1435 + '_,
1420 1436 >,
1421 1437 DirstateError,
1422 1438 > {
1423 1439 let map = self.get_map();
1424 1440 let on_disk = map.on_disk;
1425 1441 Ok(Box::new(filter_map_results(
1426 1442 map.iter_nodes(),
1427 1443 move |node| {
1428 1444 Ok(if node.tracked_descendants_count() > 0 {
1429 1445 Some(node.full_path(on_disk)?)
1430 1446 } else {
1431 1447 None
1432 1448 })
1433 1449 },
1434 1450 )))
1435 1451 }
1436 1452
1437 1453 /// Only public because it needs to be exposed to the Python layer.
1438 1454 /// It is not the full `setparents` logic, only the parts that mutate the
1439 1455 /// entries.
1440 1456 pub fn setparents_fixup(
1441 1457 &mut self,
1442 1458 ) -> Result<Vec<(HgPathBuf, HgPathBuf)>, DirstateV2ParseError> {
1443 1459 // XXX
1444 1460 // All the copying and re-querying is quite inefficient, but this is
1445 1461 // still a lot better than doing it from Python.
1446 1462 //
1447 1463 // The better solution is to develop a mechanism for `iter_mut`,
1448 1464 // which will be a lot more involved: we're dealing with a lazy,
1449 1465 // append-mostly, tree-like data structure. This will do for now.
1450 1466 let mut copies = vec![];
1451 1467 let mut files_with_p2_info = vec![];
1452 1468 for res in self.iter() {
1453 1469 let (path, entry) = res?;
1454 1470 if entry.p2_info() {
1455 1471 files_with_p2_info.push(path.to_owned())
1456 1472 }
1457 1473 }
1458 1474 self.with_dmap_mut(|map| {
1459 1475 for path in files_with_p2_info.iter() {
1460 1476 let node = map.get_or_insert_node(path, |_| {})?;
1461 1477 let entry =
1462 1478 node.data.as_entry_mut().expect("entry should exist");
1463 1479 entry.drop_merge_data();
1464 1480 if let Some(source) = node.copy_source.take().as_deref() {
1465 1481 copies.push((path.to_owned(), source.to_owned()));
1466 1482 }
1467 1483 }
1468 1484 Ok(copies)
1469 1485 })
1470 1486 }
1471 1487
1472 1488 pub fn debug_iter(
1473 1489 &self,
1474 1490 all: bool,
1475 1491 ) -> Box<
1476 1492 dyn Iterator<
1477 1493 Item = Result<
1478 1494 (&HgPath, (u8, i32, i32, i32)),
1479 1495 DirstateV2ParseError,
1480 1496 >,
1481 1497 > + Send
1482 1498 + '_,
1483 1499 > {
1484 1500 let map = self.get_map();
1485 1501 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1486 1502 let debug_tuple = if let Some(entry) = node.entry()? {
1487 1503 entry.debug_tuple()
1488 1504 } else if !all {
1489 1505 return Ok(None);
1490 1506 } else if let Some(mtime) = node.cached_directory_mtime()? {
1491 1507 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1492 1508 } else {
1493 1509 (b' ', 0, -1, -1)
1494 1510 };
1495 1511 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1496 1512 }))
1497 1513 }
1498 1514 }
1499 1515 #[cfg(test)]
1500 1516 mod tests {
1501 1517 use super::*;
1502 1518
1503 1519 /// Shortcut to return tracked descendants of a path.
1504 1520 /// Panics if the path does not exist.
1505 1521 fn tracked_descendants(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1506 1522 let path = dbg!(HgPath::new(path));
1507 1523 let node = map.get_map().get_node(path);
1508 1524 node.unwrap().unwrap().tracked_descendants_count()
1509 1525 }
1510 1526
1511 1527 /// Shortcut to return descendants with an entry.
1512 1528 /// Panics if the path does not exist.
1513 1529 fn descendants_with_an_entry(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1514 1530 let path = dbg!(HgPath::new(path));
1515 1531 let node = map.get_map().get_node(path);
1516 1532 node.unwrap().unwrap().descendants_with_entry_count()
1517 1533 }
1518 1534
1519 1535 fn assert_does_not_exist(map: &OwningDirstateMap, path: &[u8]) {
1520 1536 let path = dbg!(HgPath::new(path));
1521 1537 let node = map.get_map().get_node(path);
1522 1538 assert!(node.unwrap().is_none());
1523 1539 }
1524 1540
1525 1541 /// Shortcut for path creation in tests
1526 1542 fn p(b: &[u8]) -> &HgPath {
1527 1543 HgPath::new(b)
1528 1544 }
1529 1545
1530 1546 /// Test the very simple case a single tracked file
1531 1547 #[test]
1532 1548 fn test_tracked_descendants_simple() -> Result<(), DirstateError> {
1533 1549 let mut map = OwningDirstateMap::new_empty(vec![]);
1534 1550 assert_eq!(map.len(), 0);
1535 1551
1536 1552 map.set_tracked(p(b"some/nested/path"))?;
1537 1553
1538 1554 assert_eq!(map.len(), 1);
1539 1555 assert_eq!(tracked_descendants(&map, b"some"), 1);
1540 1556 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1541 1557 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1542 1558
1543 1559 map.set_untracked(p(b"some/nested/path"))?;
1544 1560 assert_eq!(map.len(), 0);
1545 1561 assert!(map.get_map().get_node(p(b"some"))?.is_none());
1546 1562
1547 1563 Ok(())
1548 1564 }
1549 1565
1550 1566 /// Test the simple case of all tracked, but multiple files
1551 1567 #[test]
1552 1568 fn test_tracked_descendants_multiple() -> Result<(), DirstateError> {
1553 1569 let mut map = OwningDirstateMap::new_empty(vec![]);
1554 1570
1555 1571 map.set_tracked(p(b"some/nested/path"))?;
1556 1572 map.set_tracked(p(b"some/nested/file"))?;
1557 1573 // one layer without any files to test deletion cascade
1558 1574 map.set_tracked(p(b"some/other/nested/path"))?;
1559 1575 map.set_tracked(p(b"root_file"))?;
1560 1576 map.set_tracked(p(b"some/file"))?;
1561 1577 map.set_tracked(p(b"some/file2"))?;
1562 1578 map.set_tracked(p(b"some/file3"))?;
1563 1579
1564 1580 assert_eq!(map.len(), 7);
1565 1581 assert_eq!(tracked_descendants(&map, b"some"), 6);
1566 1582 assert_eq!(tracked_descendants(&map, b"some/nested"), 2);
1567 1583 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1568 1584 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1569 1585 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1570 1586
1571 1587 map.set_untracked(p(b"some/nested/path"))?;
1572 1588 assert_eq!(map.len(), 6);
1573 1589 assert_eq!(tracked_descendants(&map, b"some"), 5);
1574 1590 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1575 1591 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1576 1592 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1577 1593
1578 1594 map.set_untracked(p(b"some/nested/file"))?;
1579 1595 assert_eq!(map.len(), 5);
1580 1596 assert_eq!(tracked_descendants(&map, b"some"), 4);
1581 1597 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1582 1598 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1583 1599 assert_does_not_exist(&map, b"some_nested");
1584 1600
1585 1601 map.set_untracked(p(b"some/other/nested/path"))?;
1586 1602 assert_eq!(map.len(), 4);
1587 1603 assert_eq!(tracked_descendants(&map, b"some"), 3);
1588 1604 assert_does_not_exist(&map, b"some/other");
1589 1605
1590 1606 map.set_untracked(p(b"root_file"))?;
1591 1607 assert_eq!(map.len(), 3);
1592 1608 assert_eq!(tracked_descendants(&map, b"some"), 3);
1593 1609 assert_does_not_exist(&map, b"root_file");
1594 1610
1595 1611 map.set_untracked(p(b"some/file"))?;
1596 1612 assert_eq!(map.len(), 2);
1597 1613 assert_eq!(tracked_descendants(&map, b"some"), 2);
1598 1614 assert_does_not_exist(&map, b"some/file");
1599 1615
1600 1616 map.set_untracked(p(b"some/file2"))?;
1601 1617 assert_eq!(map.len(), 1);
1602 1618 assert_eq!(tracked_descendants(&map, b"some"), 1);
1603 1619 assert_does_not_exist(&map, b"some/file2");
1604 1620
1605 1621 map.set_untracked(p(b"some/file3"))?;
1606 1622 assert_eq!(map.len(), 0);
1607 1623 assert_does_not_exist(&map, b"some/file3");
1608 1624
1609 1625 Ok(())
1610 1626 }
1611 1627
1612 1628 /// Check with a mix of tracked and non-tracked items
1613 1629 #[test]
1614 1630 fn test_tracked_descendants_different() -> Result<(), DirstateError> {
1615 1631 let mut map = OwningDirstateMap::new_empty(vec![]);
1616 1632
1617 1633 // A file that was just added
1618 1634 map.set_tracked(p(b"some/nested/path"))?;
1619 1635 // This has no information, the dirstate should ignore it
1620 1636 map.reset_state(p(b"some/file"), false, false, false, false, None)?;
1621 1637 assert_does_not_exist(&map, b"some/file");
1622 1638
1623 1639 // A file that was removed
1624 1640 map.reset_state(
1625 1641 p(b"some/nested/file"),
1626 1642 false,
1627 1643 true,
1628 1644 false,
1629 1645 false,
1630 1646 None,
1631 1647 )?;
1632 1648 assert!(!map.get(p(b"some/nested/file"))?.unwrap().tracked());
1633 1649 // Only present in p2
1634 1650 map.reset_state(p(b"some/file3"), false, false, true, false, None)?;
1635 1651 assert!(!map.get(p(b"some/file3"))?.unwrap().tracked());
1636 1652 // A file that was merged
1637 1653 map.reset_state(p(b"root_file"), true, true, true, false, None)?;
1638 1654 assert!(map.get(p(b"root_file"))?.unwrap().tracked());
1639 1655 // A file that is added, with info from p2
1640 1656 // XXX is that actually possible?
1641 1657 map.reset_state(p(b"some/file2"), true, false, true, false, None)?;
1642 1658 assert!(map.get(p(b"some/file2"))?.unwrap().tracked());
1643 1659 // A clean file
1644 1660 // One layer without any files to test deletion cascade
1645 1661 map.reset_state(
1646 1662 p(b"some/other/nested/path"),
1647 1663 true,
1648 1664 true,
1649 1665 false,
1650 1666 false,
1651 1667 None,
1652 1668 )?;
1653 1669 assert!(map.get(p(b"some/other/nested/path"))?.unwrap().tracked());
1654 1670
1655 1671 assert_eq!(map.len(), 6);
1656 1672 assert_eq!(tracked_descendants(&map, b"some"), 3);
1657 1673 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1658 1674 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1659 1675 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1660 1676 assert_eq!(tracked_descendants(&map, b"some/other/nested/path"), 0);
1661 1677 assert_eq!(
1662 1678 descendants_with_an_entry(&map, b"some/other/nested/path"),
1663 1679 0
1664 1680 );
1665 1681 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1666 1682 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1667 1683
1668 1684 // might as well check this
1669 1685 map.set_untracked(p(b"path/does/not/exist"))?;
1670 1686 assert_eq!(map.len(), 6);
1671 1687
1672 1688 map.set_untracked(p(b"some/other/nested/path"))?;
1673 1689 // It is set untracked but not deleted since it held other information
1674 1690 assert_eq!(map.len(), 6);
1675 1691 assert_eq!(tracked_descendants(&map, b"some"), 2);
1676 1692 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1677 1693 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1678 1694 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1679 1695 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1680 1696 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1681 1697
1682 1698 map.set_untracked(p(b"some/nested/path"))?;
1683 1699 // It is set untracked *and* deleted since it was only added
1684 1700 assert_eq!(map.len(), 5);
1685 1701 assert_eq!(tracked_descendants(&map, b"some"), 1);
1686 1702 assert_eq!(descendants_with_an_entry(&map, b"some"), 4);
1687 1703 assert_eq!(tracked_descendants(&map, b"some/nested"), 0);
1688 1704 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 1);
1689 1705 assert_does_not_exist(&map, b"some/nested/path");
1690 1706
1691 1707 map.set_untracked(p(b"root_file"))?;
1692 1708 // Untracked but not deleted
1693 1709 assert_eq!(map.len(), 5);
1694 1710 assert!(map.get(p(b"root_file"))?.is_some());
1695 1711
1696 1712 map.set_untracked(p(b"some/file2"))?;
1697 1713 assert_eq!(map.len(), 5);
1698 1714 assert_eq!(tracked_descendants(&map, b"some"), 0);
1699 1715 assert!(map.get(p(b"some/file2"))?.is_some());
1700 1716
1701 1717 map.set_untracked(p(b"some/file3"))?;
1702 1718 assert_eq!(map.len(), 5);
1703 1719 assert_eq!(tracked_descendants(&map, b"some"), 0);
1704 1720 assert!(map.get(p(b"some/file3"))?.is_some());
1705 1721
1706 1722 Ok(())
1707 1723 }
1708 1724
1709 1725 /// Check that copies counter is correctly updated
1710 1726 #[test]
1711 1727 fn test_copy_source() -> Result<(), DirstateError> {
1712 1728 let mut map = OwningDirstateMap::new_empty(vec![]);
1713 1729
1714 1730 // Clean file
1715 1731 map.reset_state(p(b"files/clean"), true, true, false, false, None)?;
1716 1732 // Merged file
1717 1733 map.reset_state(p(b"files/from_p2"), true, true, true, false, None)?;
1718 1734 // Removed file
1719 1735 map.reset_state(p(b"removed"), false, true, false, false, None)?;
1720 1736 // Added file
1721 1737 map.reset_state(p(b"files/added"), true, false, false, false, None)?;
1722 1738 // Add copy
1723 1739 map.copy_map_insert(p(b"files/clean"), p(b"clean_copy_source"))?;
1724 1740 assert_eq!(map.copy_map_len(), 1);
1725 1741
1726 1742 // Copy override
1727 1743 map.copy_map_insert(p(b"files/clean"), p(b"other_clean_copy_source"))?;
1728 1744 assert_eq!(map.copy_map_len(), 1);
1729 1745
1730 1746 // Multiple copies
1731 1747 map.copy_map_insert(p(b"removed"), p(b"removed_copy_source"))?;
1732 1748 assert_eq!(map.copy_map_len(), 2);
1733 1749
1734 1750 map.copy_map_insert(p(b"files/added"), p(b"added_copy_source"))?;
1735 1751 assert_eq!(map.copy_map_len(), 3);
1736 1752
1737 1753 // Added, so the entry is completely removed
1738 1754 map.set_untracked(p(b"files/added"))?;
1739 1755 assert_does_not_exist(&map, b"files/added");
1740 1756 assert_eq!(map.copy_map_len(), 2);
1741 1757
1742 1758 // Removed, so the entry is kept around, so is its copy
1743 1759 map.set_untracked(p(b"removed"))?;
1744 1760 assert!(map.get(p(b"removed"))?.is_some());
1745 1761 assert_eq!(map.copy_map_len(), 2);
1746 1762
1747 1763 // Clean, so the entry is kept around, but not its copy
1748 1764 map.set_untracked(p(b"files/clean"))?;
1749 1765 assert!(map.get(p(b"files/clean"))?.is_some());
1750 1766 assert_eq!(map.copy_map_len(), 1);
1751 1767
1752 1768 map.copy_map_insert(p(b"files/from_p2"), p(b"from_p2_copy_source"))?;
1753 1769 assert_eq!(map.copy_map_len(), 2);
1754 1770
1755 1771 // Info from p2, so its copy source info is kept around
1756 1772 map.set_untracked(p(b"files/from_p2"))?;
1757 1773 assert!(map.get(p(b"files/from_p2"))?.is_some());
1758 1774 assert_eq!(map.copy_map_len(), 2);
1759 1775
1760 1776 Ok(())
1761 1777 }
1762 1778
1763 1779 /// Test with "on disk" data. For the sake of this test, the "on disk" data
1764 1780 /// does not actually come from the disk, but it's opaque to the code being
1765 1781 /// tested.
1766 1782 #[test]
1767 1783 fn test_on_disk() -> Result<(), DirstateError> {
1768 1784 // First let's create some data to put "on disk"
1769 1785 let mut map = OwningDirstateMap::new_empty(vec![]);
1770 1786
1771 1787 // A file that was just added
1772 1788 map.set_tracked(p(b"some/nested/added"))?;
1773 1789 map.copy_map_insert(p(b"some/nested/added"), p(b"added_copy_source"))?;
1774 1790
1775 1791 // A file that was removed
1776 1792 map.reset_state(
1777 1793 p(b"some/nested/removed"),
1778 1794 false,
1779 1795 true,
1780 1796 false,
1781 1797 false,
1782 1798 None,
1783 1799 )?;
1784 1800 // Only present in p2
1785 1801 map.reset_state(
1786 1802 p(b"other/p2_info_only"),
1787 1803 false,
1788 1804 false,
1789 1805 true,
1790 1806 false,
1791 1807 None,
1792 1808 )?;
1793 1809 map.copy_map_insert(
1794 1810 p(b"other/p2_info_only"),
1795 1811 p(b"other/p2_info_copy_source"),
1796 1812 )?;
1797 1813 // A file that was merged
1798 1814 map.reset_state(p(b"merged"), true, true, true, false, None)?;
1799 1815 // A file that is added, with info from p2
1800 1816 // XXX is that actually possible?
1801 1817 map.reset_state(
1802 1818 p(b"other/added_with_p2"),
1803 1819 true,
1804 1820 false,
1805 1821 true,
1806 1822 false,
1807 1823 None,
1808 1824 )?;
1809 1825 // One layer without any files to test deletion cascade
1810 1826 // A clean file
1811 1827 map.reset_state(
1812 1828 p(b"some/other/nested/clean"),
1813 1829 true,
1814 1830 true,
1815 1831 false,
1816 1832 false,
1817 1833 None,
1818 1834 )?;
1819 1835
1820 1836 let (packed, metadata, _should_append, _old_data_size) =
1821 1837 map.pack_v2(DirstateMapWriteMode::ForceNewDataFile)?;
1822 1838 let packed_len = packed.len();
1823 1839 assert!(packed_len > 0);
1824 1840
1825 1841 // Recreate "from disk"
1826 1842 let mut map = OwningDirstateMap::new_v2(
1827 1843 packed,
1828 1844 packed_len,
1829 1845 metadata.as_bytes(),
1830 1846 )?;
1831 1847
1832 1848 // Check that everything is accounted for
1833 1849 assert!(map.contains_key(p(b"some/nested/added"))?);
1834 1850 assert!(map.contains_key(p(b"some/nested/removed"))?);
1835 1851 assert!(map.contains_key(p(b"merged"))?);
1836 1852 assert!(map.contains_key(p(b"other/p2_info_only"))?);
1837 1853 assert!(map.contains_key(p(b"other/added_with_p2"))?);
1838 1854 assert!(map.contains_key(p(b"some/other/nested/clean"))?);
1839 1855 assert_eq!(
1840 1856 map.copy_map_get(p(b"some/nested/added"))?,
1841 1857 Some(p(b"added_copy_source"))
1842 1858 );
1843 1859 assert_eq!(
1844 1860 map.copy_map_get(p(b"other/p2_info_only"))?,
1845 1861 Some(p(b"other/p2_info_copy_source"))
1846 1862 );
1847 1863 assert_eq!(tracked_descendants(&map, b"some"), 2);
1848 1864 assert_eq!(descendants_with_an_entry(&map, b"some"), 3);
1849 1865 assert_eq!(tracked_descendants(&map, b"other"), 1);
1850 1866 assert_eq!(descendants_with_an_entry(&map, b"other"), 2);
1851 1867 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1852 1868 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1853 1869 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1854 1870 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1855 1871 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1856 1872 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1857 1873 assert_eq!(map.len(), 6);
1858 1874 assert_eq!(map.get_map().unreachable_bytes, 0);
1859 1875 assert_eq!(map.copy_map_len(), 2);
1860 1876
1861 1877 // Shouldn't change anything since it's already not tracked
1862 1878 map.set_untracked(p(b"some/nested/removed"))?;
1863 1879 assert_eq!(map.get_map().unreachable_bytes, 0);
1864 1880
1865 1881 match map.get_map().root {
1866 1882 ChildNodes::InMemory(_) => {
1867 1883 panic!("root should not have been mutated")
1868 1884 }
1869 1885 _ => (),
1870 1886 }
1871 1887 // We haven't mutated enough (nothing, actually), we should still be in
1872 1888 // the append strategy
1873 1889 assert!(map.get_map().write_should_append());
1874 1890
1875 1891 // But this mutates the structure, so there should be unreachable_bytes
1876 1892 assert!(map.set_untracked(p(b"some/nested/added"))?);
1877 1893 let unreachable_bytes = map.get_map().unreachable_bytes;
1878 1894 assert!(unreachable_bytes > 0);
1879 1895
1880 1896 match map.get_map().root {
1881 1897 ChildNodes::OnDisk(_) => panic!("root should have been mutated"),
1882 1898 _ => (),
1883 1899 }
1884 1900
1885 1901 // This should not mutate the structure either, since `root` has
1886 1902 // already been mutated along with its direct children.
1887 1903 map.set_untracked(p(b"merged"))?;
1888 1904 assert_eq!(map.get_map().unreachable_bytes, unreachable_bytes);
1889 1905
1890 1906 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1891 1907 NodeRef::InMemory(_, _) => {
1892 1908 panic!("'other/added_with_p2' should not have been mutated")
1893 1909 }
1894 1910 _ => (),
1895 1911 }
1896 1912 // But this should, since it's in a different path
1897 1913 // than `<root>some/nested/add`
1898 1914 map.set_untracked(p(b"other/added_with_p2"))?;
1899 1915 assert!(map.get_map().unreachable_bytes > unreachable_bytes);
1900 1916
1901 1917 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1902 1918 NodeRef::OnDisk(_) => {
1903 1919 panic!("'other/added_with_p2' should have been mutated")
1904 1920 }
1905 1921 _ => (),
1906 1922 }
1907 1923
1908 1924 // We have rewritten most of the tree, we should create a new file
1909 1925 assert!(!map.get_map().write_should_append());
1910 1926
1911 1927 Ok(())
1912 1928 }
1913 1929 }
@@ -1,888 +1,890 b''
1 1 //! The "version 2" disk representation of the dirstate
2 2 //!
3 3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4 4
5 5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
6 6 use crate::dirstate_tree::dirstate_map::DirstateVersion;
7 7 use crate::dirstate_tree::dirstate_map::{
8 8 self, DirstateMap, DirstateMapWriteMode, NodeRef,
9 9 };
10 10 use crate::dirstate_tree::path_with_basename::WithBasename;
11 11 use crate::errors::HgError;
12 12 use crate::utils::hg_path::HgPath;
13 13 use crate::DirstateEntry;
14 14 use crate::DirstateError;
15 15 use crate::DirstateParents;
16 16 use bitflags::bitflags;
17 17 use bytes_cast::unaligned::{U16Be, U32Be};
18 18 use bytes_cast::BytesCast;
19 19 use format_bytes::format_bytes;
20 20 use rand::Rng;
21 21 use std::borrow::Cow;
22 22 use std::convert::{TryFrom, TryInto};
23 23 use std::fmt::Write;
24 24
25 25 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
26 26 /// This a redundant sanity check more than an actual "magic number" since
27 27 /// `.hg/requires` already governs which format should be used.
28 28 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
29 29
30 30 /// Keep space for 256-bit hashes
31 31 const STORED_NODE_ID_BYTES: usize = 32;
32 32
33 33 /// … even though only 160 bits are used for now, with SHA-1
34 34 const USED_NODE_ID_BYTES: usize = 20;
35 35
36 36 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
37 37 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
38 38
39 39 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
40 40 const TREE_METADATA_SIZE: usize = 44;
41 41 const NODE_SIZE: usize = 44;
42 42
43 43 /// Make sure that size-affecting changes are made knowingly
44 44 #[allow(unused)]
45 45 fn static_assert_size_of() {
46 46 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
47 47 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
48 48 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
49 49 }
50 50
51 51 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
52 52 #[derive(BytesCast)]
53 53 #[repr(C)]
54 54 struct DocketHeader {
55 55 marker: [u8; V2_FORMAT_MARKER.len()],
56 56 parent_1: [u8; STORED_NODE_ID_BYTES],
57 57 parent_2: [u8; STORED_NODE_ID_BYTES],
58 58
59 59 metadata: TreeMetadata,
60 60
61 61 /// Counted in bytes
62 62 data_size: Size,
63 63
64 64 uuid_size: u8,
65 65 }
66 66
67 67 pub struct Docket<'on_disk> {
68 68 header: &'on_disk DocketHeader,
69 69 pub uuid: &'on_disk [u8],
70 70 }
71 71
72 72 /// Fields are documented in the *Tree metadata in the docket file*
73 73 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
74 74 #[derive(BytesCast)]
75 75 #[repr(C)]
76 76 pub struct TreeMetadata {
77 77 root_nodes: ChildNodes,
78 78 nodes_with_entry_count: Size,
79 79 nodes_with_copy_source_count: Size,
80 80 unreachable_bytes: Size,
81 81 unused: [u8; 4],
82 82
83 83 /// See *Optional hash of ignore patterns* section of
84 84 /// `mercurial/helptext/internals/dirstate-v2.txt`
85 85 ignore_patterns_hash: IgnorePatternsHash,
86 86 }
87 87
88 88 /// Fields are documented in the *The data file format*
89 89 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
90 90 #[derive(BytesCast, Debug)]
91 91 #[repr(C)]
92 92 pub(super) struct Node {
93 93 full_path: PathSlice,
94 94
95 95 /// In bytes from `self.full_path.start`
96 96 base_name_start: PathSize,
97 97
98 98 copy_source: OptPathSlice,
99 99 children: ChildNodes,
100 100 pub(super) descendants_with_entry_count: Size,
101 101 pub(super) tracked_descendants_count: Size,
102 102 flags: U16Be,
103 103 size: U32Be,
104 104 mtime: PackedTruncatedTimestamp,
105 105 }
106 106
107 107 bitflags! {
108 108 #[repr(C)]
109 109 struct Flags: u16 {
110 110 const WDIR_TRACKED = 1 << 0;
111 111 const P1_TRACKED = 1 << 1;
112 112 const P2_INFO = 1 << 2;
113 113 const MODE_EXEC_PERM = 1 << 3;
114 114 const MODE_IS_SYMLINK = 1 << 4;
115 115 const HAS_FALLBACK_EXEC = 1 << 5;
116 116 const FALLBACK_EXEC = 1 << 6;
117 117 const HAS_FALLBACK_SYMLINK = 1 << 7;
118 118 const FALLBACK_SYMLINK = 1 << 8;
119 119 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
120 120 const HAS_MODE_AND_SIZE = 1 <<10;
121 121 const HAS_MTIME = 1 <<11;
122 122 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
123 123 const DIRECTORY = 1 <<13;
124 124 const ALL_UNKNOWN_RECORDED = 1 <<14;
125 125 const ALL_IGNORED_RECORDED = 1 <<15;
126 126 }
127 127 }
128 128
129 129 /// Duration since the Unix epoch
130 130 #[derive(BytesCast, Copy, Clone, Debug)]
131 131 #[repr(C)]
132 132 struct PackedTruncatedTimestamp {
133 133 truncated_seconds: U32Be,
134 134 nanoseconds: U32Be,
135 135 }
136 136
137 137 /// Counted in bytes from the start of the file
138 138 ///
139 139 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
140 140 type Offset = U32Be;
141 141
142 142 /// Counted in number of items
143 143 ///
144 144 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
145 145 type Size = U32Be;
146 146
147 147 /// Counted in bytes
148 148 ///
149 149 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
150 150 type PathSize = U16Be;
151 151
152 152 /// A contiguous sequence of `len` times `Node`, representing the child nodes
153 153 /// of either some other node or of the repository root.
154 154 ///
155 155 /// Always sorted by ascending `full_path`, to allow binary search.
156 156 /// Since nodes with the same parent nodes also have the same parent path,
157 157 /// only the `base_name`s need to be compared during binary search.
158 158 #[derive(BytesCast, Copy, Clone, Debug)]
159 159 #[repr(C)]
160 160 struct ChildNodes {
161 161 start: Offset,
162 162 len: Size,
163 163 }
164 164
165 165 /// A `HgPath` of `len` bytes
166 166 #[derive(BytesCast, Copy, Clone, Debug)]
167 167 #[repr(C)]
168 168 struct PathSlice {
169 169 start: Offset,
170 170 len: PathSize,
171 171 }
172 172
173 173 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
174 174 type OptPathSlice = PathSlice;
175 175
176 176 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
177 177 ///
178 178 /// This should only happen if Mercurial is buggy or a repository is corrupted.
179 179 #[derive(Debug)]
180 180 pub struct DirstateV2ParseError {
181 181 message: String,
182 182 }
183 183
184 184 impl DirstateV2ParseError {
185 185 pub fn new<S: Into<String>>(message: S) -> Self {
186 186 Self {
187 187 message: message.into(),
188 188 }
189 189 }
190 190 }
191 191
192 192 impl From<DirstateV2ParseError> for HgError {
193 193 fn from(e: DirstateV2ParseError) -> Self {
194 194 HgError::corrupted(format!("dirstate-v2 parse error: {}", e.message))
195 195 }
196 196 }
197 197
198 198 impl From<DirstateV2ParseError> for crate::DirstateError {
199 199 fn from(error: DirstateV2ParseError) -> Self {
200 200 HgError::from(error).into()
201 201 }
202 202 }
203 203
204 204 impl TreeMetadata {
205 205 pub fn as_bytes(&self) -> &[u8] {
206 206 BytesCast::as_bytes(self)
207 207 }
208 208 }
209 209
210 210 impl<'on_disk> Docket<'on_disk> {
211 211 /// Generate the identifier for a new data file
212 212 ///
213 213 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
214 214 /// See `mercurial/revlogutils/docket.py`
215 215 pub fn new_uid() -> String {
216 216 const ID_LENGTH: usize = 8;
217 217 let mut id = String::with_capacity(ID_LENGTH);
218 218 let mut rng = rand::thread_rng();
219 219 for _ in 0..ID_LENGTH {
220 220 // One random hexadecimal digit.
221 221 // `unwrap` never panics because `impl Write for String`
222 222 // never returns an error.
223 223 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
224 224 }
225 225 id
226 226 }
227 227
228 228 pub fn serialize(
229 229 parents: DirstateParents,
230 230 tree_metadata: TreeMetadata,
231 231 data_size: u64,
232 232 uuid: &[u8],
233 233 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
234 234 let header = DocketHeader {
235 235 marker: *V2_FORMAT_MARKER,
236 236 parent_1: parents.p1.pad_to_256_bits(),
237 237 parent_2: parents.p2.pad_to_256_bits(),
238 238 metadata: tree_metadata,
239 239 data_size: u32::try_from(data_size)?.into(),
240 240 uuid_size: uuid.len().try_into()?,
241 241 };
242 242 let header = header.as_bytes();
243 243 let mut docket = Vec::with_capacity(header.len() + uuid.len());
244 244 docket.extend_from_slice(header);
245 245 docket.extend_from_slice(uuid);
246 246 Ok(docket)
247 247 }
248 248
249 249 pub fn parents(&self) -> DirstateParents {
250 250 use crate::Node;
251 251 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
252 252 .unwrap()
253 253 .clone();
254 254 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
255 255 .unwrap()
256 256 .clone();
257 257 DirstateParents { p1, p2 }
258 258 }
259 259
260 260 pub fn tree_metadata(&self) -> &[u8] {
261 261 self.header.metadata.as_bytes()
262 262 }
263 263
264 264 pub fn data_size(&self) -> usize {
265 265 // This `unwrap` could only panic on a 16-bit CPU
266 266 self.header.data_size.get().try_into().unwrap()
267 267 }
268 268
269 269 pub fn data_filename(&self) -> String {
270 270 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
271 271 }
272 272 }
273 273
274 274 pub fn read_docket(
275 275 on_disk: &[u8],
276 276 ) -> Result<Docket<'_>, DirstateV2ParseError> {
277 277 let (header, uuid) = DocketHeader::from_bytes(on_disk).map_err(|e| {
278 278 DirstateV2ParseError::new(format!("when reading docket, {}", e))
279 279 })?;
280 280 let uuid_size = header.uuid_size as usize;
281 281 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
282 282 Ok(Docket { header, uuid })
283 283 } else {
284 284 Err(DirstateV2ParseError::new(
285 285 "invalid format marker or uuid size",
286 286 ))
287 287 }
288 288 }
289 289
290 290 pub(super) fn read<'on_disk>(
291 291 on_disk: &'on_disk [u8],
292 292 metadata: &[u8],
293 293 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
294 294 if on_disk.is_empty() {
295 295 let mut map = DirstateMap::empty(on_disk);
296 296 map.dirstate_version = DirstateVersion::V2;
297 297 return Ok(map);
298 298 }
299 299 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
300 300 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
301 301 })?;
302 302 let dirstate_map = DirstateMap {
303 303 on_disk,
304 304 root: dirstate_map::ChildNodes::OnDisk(
305 305 read_nodes(on_disk, meta.root_nodes).map_err(|mut e| {
306 306 e.message = format!("{}, when reading root notes", e.message);
307 307 e
308 308 })?,
309 309 ),
310 310 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
311 311 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
312 312 ignore_patterns_hash: meta.ignore_patterns_hash,
313 313 unreachable_bytes: meta.unreachable_bytes.get(),
314 314 old_data_size: on_disk.len(),
315 315 dirstate_version: DirstateVersion::V2,
316 write_mode: DirstateMapWriteMode::Auto,
316 317 };
317 318 Ok(dirstate_map)
318 319 }
319 320
320 321 impl Node {
321 322 pub(super) fn full_path<'on_disk>(
322 323 &self,
323 324 on_disk: &'on_disk [u8],
324 325 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
325 326 read_hg_path(on_disk, self.full_path)
326 327 }
327 328
328 329 pub(super) fn base_name_start<'on_disk>(
329 330 &self,
330 331 ) -> Result<usize, DirstateV2ParseError> {
331 332 let start = self.base_name_start.get();
332 333 if start < self.full_path.len.get() {
333 334 let start = usize::try_from(start)
334 335 // u32 -> usize, could only panic on a 16-bit CPU
335 336 .expect("dirstate-v2 base_name_start out of bounds");
336 337 Ok(start)
337 338 } else {
338 339 Err(DirstateV2ParseError::new("not enough bytes for base name"))
339 340 }
340 341 }
341 342
342 343 pub(super) fn base_name<'on_disk>(
343 344 &self,
344 345 on_disk: &'on_disk [u8],
345 346 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
346 347 let full_path = self.full_path(on_disk)?;
347 348 let base_name_start = self.base_name_start()?;
348 349 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
349 350 }
350 351
351 352 pub(super) fn path<'on_disk>(
352 353 &self,
353 354 on_disk: &'on_disk [u8],
354 355 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
355 356 Ok(WithBasename::from_raw_parts(
356 357 Cow::Borrowed(self.full_path(on_disk)?),
357 358 self.base_name_start()?,
358 359 ))
359 360 }
360 361
361 362 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
362 363 self.copy_source.start.get() != 0
363 364 }
364 365
365 366 pub(super) fn copy_source<'on_disk>(
366 367 &self,
367 368 on_disk: &'on_disk [u8],
368 369 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
369 370 Ok(if self.has_copy_source() {
370 371 Some(read_hg_path(on_disk, self.copy_source)?)
371 372 } else {
372 373 None
373 374 })
374 375 }
375 376
376 377 fn flags(&self) -> Flags {
377 378 Flags::from_bits_truncate(self.flags.get())
378 379 }
379 380
380 381 fn has_entry(&self) -> bool {
381 382 self.flags().intersects(
382 383 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
383 384 )
384 385 }
385 386
386 387 pub(super) fn node_data(
387 388 &self,
388 389 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
389 390 if self.has_entry() {
390 391 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
391 392 } else if let Some(mtime) = self.cached_directory_mtime()? {
392 393 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
393 394 } else {
394 395 Ok(dirstate_map::NodeData::None)
395 396 }
396 397 }
397 398
398 399 pub(super) fn cached_directory_mtime(
399 400 &self,
400 401 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
401 402 // For now we do not have code to handle the absence of
402 403 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
403 404 // unset.
404 405 if self.flags().contains(Flags::DIRECTORY)
405 406 && self.flags().contains(Flags::HAS_MTIME)
406 407 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
407 408 {
408 409 Ok(Some(self.mtime()?))
409 410 } else {
410 411 Ok(None)
411 412 }
412 413 }
413 414
414 415 fn synthesize_unix_mode(&self) -> u32 {
415 416 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
416 417 libc::S_IFLNK
417 418 } else {
418 419 libc::S_IFREG
419 420 };
420 421 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
421 422 0o755
422 423 } else {
423 424 0o644
424 425 };
425 426 (file_type | permisions).into()
426 427 }
427 428
428 429 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
429 430 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
430 431 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
431 432 m.second_ambiguous = true;
432 433 }
433 434 Ok(m)
434 435 }
435 436
436 437 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
437 438 // TODO: convert through raw bits instead?
438 439 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
439 440 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
440 441 let p2_info = self.flags().contains(Flags::P2_INFO);
441 442 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
442 443 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
443 444 {
444 445 Some((self.synthesize_unix_mode(), self.size.into()))
445 446 } else {
446 447 None
447 448 };
448 449 let mtime = if self.flags().contains(Flags::HAS_MTIME)
449 450 && !self.flags().contains(Flags::DIRECTORY)
450 451 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
451 452 {
452 453 Some(self.mtime()?)
453 454 } else {
454 455 None
455 456 };
456 457 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
457 458 {
458 459 Some(self.flags().contains(Flags::FALLBACK_EXEC))
459 460 } else {
460 461 None
461 462 };
462 463 let fallback_symlink =
463 464 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
464 465 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
465 466 } else {
466 467 None
467 468 };
468 469 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
469 470 wc_tracked,
470 471 p1_tracked,
471 472 p2_info,
472 473 mode_size,
473 474 mtime,
474 475 fallback_exec,
475 476 fallback_symlink,
476 477 }))
477 478 }
478 479
479 480 pub(super) fn entry(
480 481 &self,
481 482 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
482 483 if self.has_entry() {
483 484 Ok(Some(self.assume_entry()?))
484 485 } else {
485 486 Ok(None)
486 487 }
487 488 }
488 489
489 490 pub(super) fn children<'on_disk>(
490 491 &self,
491 492 on_disk: &'on_disk [u8],
492 493 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
493 494 read_nodes(on_disk, self.children)
494 495 }
495 496
496 497 pub(super) fn to_in_memory_node<'on_disk>(
497 498 &self,
498 499 on_disk: &'on_disk [u8],
499 500 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
500 501 Ok(dirstate_map::Node {
501 502 children: dirstate_map::ChildNodes::OnDisk(
502 503 self.children(on_disk)?,
503 504 ),
504 505 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
505 506 data: self.node_data()?,
506 507 descendants_with_entry_count: self
507 508 .descendants_with_entry_count
508 509 .get(),
509 510 tracked_descendants_count: self.tracked_descendants_count.get(),
510 511 })
511 512 }
512 513
513 514 fn from_dirstate_entry(
514 515 entry: &DirstateEntry,
515 516 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
516 517 let DirstateV2Data {
517 518 wc_tracked,
518 519 p1_tracked,
519 520 p2_info,
520 521 mode_size: mode_size_opt,
521 522 mtime: mtime_opt,
522 523 fallback_exec,
523 524 fallback_symlink,
524 525 } = entry.v2_data();
525 526 // TODO: convert through raw flag bits instead?
526 527 let mut flags = Flags::empty();
527 528 flags.set(Flags::WDIR_TRACKED, wc_tracked);
528 529 flags.set(Flags::P1_TRACKED, p1_tracked);
529 530 flags.set(Flags::P2_INFO, p2_info);
530 531 let size = if let Some((m, s)) = mode_size_opt {
531 532 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
532 533 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
533 534 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
534 535 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
535 536 flags.insert(Flags::HAS_MODE_AND_SIZE);
536 537 s.into()
537 538 } else {
538 539 0.into()
539 540 };
540 541 let mtime = if let Some(m) = mtime_opt {
541 542 flags.insert(Flags::HAS_MTIME);
542 543 if m.second_ambiguous {
543 544 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
544 545 };
545 546 m.into()
546 547 } else {
547 548 PackedTruncatedTimestamp::null()
548 549 };
549 550 if let Some(f_exec) = fallback_exec {
550 551 flags.insert(Flags::HAS_FALLBACK_EXEC);
551 552 if f_exec {
552 553 flags.insert(Flags::FALLBACK_EXEC);
553 554 }
554 555 }
555 556 if let Some(f_symlink) = fallback_symlink {
556 557 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
557 558 if f_symlink {
558 559 flags.insert(Flags::FALLBACK_SYMLINK);
559 560 }
560 561 }
561 562 (flags, size, mtime)
562 563 }
563 564 }
564 565
565 566 fn read_hg_path(
566 567 on_disk: &[u8],
567 568 slice: PathSlice,
568 569 ) -> Result<&HgPath, DirstateV2ParseError> {
569 570 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
570 571 }
571 572
572 573 fn read_nodes(
573 574 on_disk: &[u8],
574 575 slice: ChildNodes,
575 576 ) -> Result<&[Node], DirstateV2ParseError> {
576 577 read_slice(on_disk, slice.start, slice.len.get())
577 578 }
578 579
579 580 fn read_slice<T, Len>(
580 581 on_disk: &[u8],
581 582 start: Offset,
582 583 len: Len,
583 584 ) -> Result<&[T], DirstateV2ParseError>
584 585 where
585 586 T: BytesCast,
586 587 Len: TryInto<usize>,
587 588 {
588 589 // Either `usize::MAX` would result in "out of bounds" error since a single
589 590 // `&[u8]` cannot occupy the entire addess space.
590 591 let start = start.get().try_into().unwrap_or(std::usize::MAX);
591 592 let len = len.try_into().unwrap_or(std::usize::MAX);
592 593 let bytes = match on_disk.get(start..) {
593 594 Some(bytes) => bytes,
594 595 None => {
595 596 return Err(DirstateV2ParseError::new(
596 597 "not enough bytes from disk",
597 598 ))
598 599 }
599 600 };
600 601 T::slice_from_bytes(bytes, len)
601 602 .map_err(|e| {
602 603 DirstateV2ParseError::new(format!("when reading a slice, {}", e))
603 604 })
604 605 .map(|(slice, _rest)| slice)
605 606 }
606 607
607 608 pub(crate) fn for_each_tracked_path<'on_disk>(
608 609 on_disk: &'on_disk [u8],
609 610 metadata: &[u8],
610 611 mut f: impl FnMut(&'on_disk HgPath),
611 612 ) -> Result<(), DirstateV2ParseError> {
612 613 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
613 614 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
614 615 })?;
615 616 fn recur<'on_disk>(
616 617 on_disk: &'on_disk [u8],
617 618 nodes: ChildNodes,
618 619 f: &mut impl FnMut(&'on_disk HgPath),
619 620 ) -> Result<(), DirstateV2ParseError> {
620 621 for node in read_nodes(on_disk, nodes)? {
621 622 if let Some(entry) = node.entry()? {
622 623 if entry.tracked() {
623 624 f(node.full_path(on_disk)?)
624 625 }
625 626 }
626 627 recur(on_disk, node.children, f)?
627 628 }
628 629 Ok(())
629 630 }
630 631 recur(on_disk, meta.root_nodes, &mut f)
631 632 }
632 633
633 634 /// Returns new data and metadata, together with whether that data should be
634 635 /// appended to the existing data file whose content is at
635 636 /// `dirstate_map.on_disk` (true), instead of written to a new data file
636 637 /// (false), and the previous size of data on disk.
637 638 pub(super) fn write(
638 639 dirstate_map: &DirstateMap,
639 640 write_mode: DirstateMapWriteMode,
640 641 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
641 642 let append = match write_mode {
642 643 DirstateMapWriteMode::Auto => dirstate_map.write_should_append(),
643 644 DirstateMapWriteMode::ForceNewDataFile => false,
645 DirstateMapWriteMode::ForceAppend => true,
644 646 };
645 647 if append {
646 648 log::trace!("appending to the dirstate data file");
647 649 } else {
648 650 log::trace!("creating new dirstate data file");
649 651 }
650 652
651 653 // This ignores the space for paths, and for nodes without an entry.
652 654 // TODO: better estimate? Skip the `Vec` and write to a file directly?
653 655 let size_guess = std::mem::size_of::<Node>()
654 656 * dirstate_map.nodes_with_entry_count as usize;
655 657
656 658 let mut writer = Writer {
657 659 dirstate_map,
658 660 append,
659 661 out: Vec::with_capacity(size_guess),
660 662 };
661 663
662 664 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
663 665
664 666 let unreachable_bytes = if append {
665 667 dirstate_map.unreachable_bytes
666 668 } else {
667 669 0
668 670 };
669 671 let meta = TreeMetadata {
670 672 root_nodes,
671 673 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
672 674 nodes_with_copy_source_count: dirstate_map
673 675 .nodes_with_copy_source_count
674 676 .into(),
675 677 unreachable_bytes: unreachable_bytes.into(),
676 678 unused: [0; 4],
677 679 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
678 680 };
679 681 Ok((writer.out, meta, append, dirstate_map.old_data_size))
680 682 }
681 683
682 684 struct Writer<'dmap, 'on_disk> {
683 685 dirstate_map: &'dmap DirstateMap<'on_disk>,
684 686 append: bool,
685 687 out: Vec<u8>,
686 688 }
687 689
688 690 impl Writer<'_, '_> {
689 691 fn write_nodes(
690 692 &mut self,
691 693 nodes: dirstate_map::ChildNodesRef,
692 694 ) -> Result<ChildNodes, DirstateError> {
693 695 // Reuse already-written nodes if possible
694 696 if self.append {
695 697 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
696 698 let start = self.on_disk_offset_of(nodes_slice).expect(
697 699 "dirstate-v2 OnDisk nodes not found within on_disk",
698 700 );
699 701 let len = child_nodes_len_from_usize(nodes_slice.len());
700 702 return Ok(ChildNodes { start, len });
701 703 }
702 704 }
703 705
704 706 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
705 707 // undefined iteration order. Sort to enable binary search in the
706 708 // written file.
707 709 let nodes = nodes.sorted();
708 710 let nodes_len = nodes.len();
709 711
710 712 // First accumulate serialized nodes in a `Vec`
711 713 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
712 714 for node in nodes {
713 715 let children =
714 716 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
715 717 let full_path = node.full_path(self.dirstate_map.on_disk)?;
716 718 let full_path = self.write_path(full_path.as_bytes());
717 719 let copy_source = if let Some(source) =
718 720 node.copy_source(self.dirstate_map.on_disk)?
719 721 {
720 722 self.write_path(source.as_bytes())
721 723 } else {
722 724 PathSlice {
723 725 start: 0.into(),
724 726 len: 0.into(),
725 727 }
726 728 };
727 729 on_disk_nodes.push(match node {
728 730 NodeRef::InMemory(path, node) => {
729 731 let (flags, size, mtime) = match &node.data {
730 732 dirstate_map::NodeData::Entry(entry) => {
731 733 Node::from_dirstate_entry(entry)
732 734 }
733 735 dirstate_map::NodeData::CachedDirectory { mtime } => {
734 736 // we currently never set a mtime if unknown file
735 737 // are present.
736 738 // So if we have a mtime for a directory, we know
737 739 // they are no unknown
738 740 // files and we
739 741 // blindly set ALL_UNKNOWN_RECORDED.
740 742 //
741 743 // We never set ALL_IGNORED_RECORDED since we
742 744 // don't track that case
743 745 // currently.
744 746 let mut flags = Flags::DIRECTORY
745 747 | Flags::HAS_MTIME
746 748 | Flags::ALL_UNKNOWN_RECORDED;
747 749 if mtime.second_ambiguous {
748 750 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
749 751 }
750 752 (flags, 0.into(), (*mtime).into())
751 753 }
752 754 dirstate_map::NodeData::None => (
753 755 Flags::DIRECTORY,
754 756 0.into(),
755 757 PackedTruncatedTimestamp::null(),
756 758 ),
757 759 };
758 760 Node {
759 761 children,
760 762 copy_source,
761 763 full_path,
762 764 base_name_start: u16::try_from(path.base_name_start())
763 765 // Could only panic for paths over 64 KiB
764 766 .expect("dirstate-v2 path length overflow")
765 767 .into(),
766 768 descendants_with_entry_count: node
767 769 .descendants_with_entry_count
768 770 .into(),
769 771 tracked_descendants_count: node
770 772 .tracked_descendants_count
771 773 .into(),
772 774 flags: flags.bits().into(),
773 775 size,
774 776 mtime,
775 777 }
776 778 }
777 779 NodeRef::OnDisk(node) => Node {
778 780 children,
779 781 copy_source,
780 782 full_path,
781 783 ..*node
782 784 },
783 785 })
784 786 }
785 787 // … so we can write them contiguously, after writing everything else
786 788 // they refer to.
787 789 let start = self.current_offset();
788 790 let len = child_nodes_len_from_usize(nodes_len);
789 791 self.out.extend(on_disk_nodes.as_bytes());
790 792 Ok(ChildNodes { start, len })
791 793 }
792 794
793 795 /// If the given slice of items is within `on_disk`, returns its offset
794 796 /// from the start of `on_disk`.
795 797 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
796 798 where
797 799 T: BytesCast,
798 800 {
799 801 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
800 802 let start = slice.as_ptr() as usize;
801 803 let end = start + slice.len();
802 804 start..=end
803 805 }
804 806 let slice_addresses = address_range(slice.as_bytes());
805 807 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
806 808 if on_disk_addresses.contains(slice_addresses.start())
807 809 && on_disk_addresses.contains(slice_addresses.end())
808 810 {
809 811 let offset = slice_addresses.start() - on_disk_addresses.start();
810 812 Some(offset_from_usize(offset))
811 813 } else {
812 814 None
813 815 }
814 816 }
815 817
816 818 fn current_offset(&mut self) -> Offset {
817 819 let mut offset = self.out.len();
818 820 if self.append {
819 821 offset += self.dirstate_map.on_disk.len()
820 822 }
821 823 offset_from_usize(offset)
822 824 }
823 825
824 826 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
825 827 let len = path_len_from_usize(slice.len());
826 828 // Reuse an already-written path if possible
827 829 if self.append {
828 830 if let Some(start) = self.on_disk_offset_of(slice) {
829 831 return PathSlice { start, len };
830 832 }
831 833 }
832 834 let start = self.current_offset();
833 835 self.out.extend(slice.as_bytes());
834 836 PathSlice { start, len }
835 837 }
836 838 }
837 839
838 840 fn offset_from_usize(x: usize) -> Offset {
839 841 u32::try_from(x)
840 842 // Could only panic for a dirstate file larger than 4 GiB
841 843 .expect("dirstate-v2 offset overflow")
842 844 .into()
843 845 }
844 846
845 847 fn child_nodes_len_from_usize(x: usize) -> Size {
846 848 u32::try_from(x)
847 849 // Could only panic with over 4 billion nodes
848 850 .expect("dirstate-v2 slice length overflow")
849 851 .into()
850 852 }
851 853
852 854 fn path_len_from_usize(x: usize) -> PathSize {
853 855 u16::try_from(x)
854 856 // Could only panic for paths over 64 KiB
855 857 .expect("dirstate-v2 path length overflow")
856 858 .into()
857 859 }
858 860
859 861 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
860 862 fn from(timestamp: TruncatedTimestamp) -> Self {
861 863 Self {
862 864 truncated_seconds: timestamp.truncated_seconds().into(),
863 865 nanoseconds: timestamp.nanoseconds().into(),
864 866 }
865 867 }
866 868 }
867 869
868 870 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
869 871 type Error = DirstateV2ParseError;
870 872
871 873 fn try_from(
872 874 timestamp: PackedTruncatedTimestamp,
873 875 ) -> Result<Self, Self::Error> {
874 876 Self::from_already_truncated(
875 877 timestamp.truncated_seconds.get(),
876 878 timestamp.nanoseconds.get(),
877 879 false,
878 880 )
879 881 }
880 882 }
881 883 impl PackedTruncatedTimestamp {
882 884 fn null() -> Self {
883 885 Self {
884 886 truncated_seconds: 0.into(),
885 887 nanoseconds: 0.into(),
886 888 }
887 889 }
888 890 }
@@ -1,582 +1,599 b''
1 1 use crate::changelog::Changelog;
2 2 use crate::config::{Config, ConfigError, ConfigParseError};
3 3 use crate::dirstate::DirstateParents;
4 4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
5 5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
6 6 use crate::dirstate_tree::owning::OwningDirstateMap;
7 7 use crate::errors::HgResultExt;
8 8 use crate::errors::{HgError, IoResultExt};
9 9 use crate::lock::{try_with_lock_no_wait, LockError};
10 10 use crate::manifest::{Manifest, Manifestlog};
11 11 use crate::revlog::filelog::Filelog;
12 12 use crate::revlog::revlog::RevlogError;
13 13 use crate::utils::files::get_path_from_bytes;
14 14 use crate::utils::hg_path::HgPath;
15 15 use crate::utils::SliceExt;
16 16 use crate::vfs::{is_dir, is_file, Vfs};
17 17 use crate::{requirements, NodePrefix};
18 18 use crate::{DirstateError, Revision};
19 19 use std::cell::{Ref, RefCell, RefMut};
20 20 use std::collections::HashSet;
21 21 use std::io::Seek;
22 22 use std::io::SeekFrom;
23 23 use std::io::Write as IoWrite;
24 24 use std::path::{Path, PathBuf};
25 25
26 26 /// A repository on disk
27 27 pub struct Repo {
28 28 working_directory: PathBuf,
29 29 dot_hg: PathBuf,
30 30 store: PathBuf,
31 31 requirements: HashSet<String>,
32 32 config: Config,
33 33 dirstate_parents: LazyCell<DirstateParents>,
34 34 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>>,
35 35 dirstate_map: LazyCell<OwningDirstateMap>,
36 36 changelog: LazyCell<Changelog>,
37 37 manifestlog: LazyCell<Manifestlog>,
38 38 }
39 39
40 40 #[derive(Debug, derive_more::From)]
41 41 pub enum RepoError {
42 42 NotFound {
43 43 at: PathBuf,
44 44 },
45 45 #[from]
46 46 ConfigParseError(ConfigParseError),
47 47 #[from]
48 48 Other(HgError),
49 49 }
50 50
51 51 impl From<ConfigError> for RepoError {
52 52 fn from(error: ConfigError) -> Self {
53 53 match error {
54 54 ConfigError::Parse(error) => error.into(),
55 55 ConfigError::Other(error) => error.into(),
56 56 }
57 57 }
58 58 }
59 59
60 60 impl Repo {
61 61 /// tries to find nearest repository root in current working directory or
62 62 /// its ancestors
63 63 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
64 64 let current_directory = crate::utils::current_dir()?;
65 65 // ancestors() is inclusive: it first yields `current_directory`
66 66 // as-is.
67 67 for ancestor in current_directory.ancestors() {
68 68 if is_dir(ancestor.join(".hg"))? {
69 69 return Ok(ancestor.to_path_buf());
70 70 }
71 71 }
72 72 return Err(RepoError::NotFound {
73 73 at: current_directory,
74 74 });
75 75 }
76 76
77 77 /// Find a repository, either at the given path (which must contain a `.hg`
78 78 /// sub-directory) or by searching the current directory and its
79 79 /// ancestors.
80 80 ///
81 81 /// A method with two very different "modes" like this usually a code smell
82 82 /// to make two methods instead, but in this case an `Option` is what rhg
83 83 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
84 84 /// Having two methods would just move that `if` to almost all callers.
85 85 pub fn find(
86 86 config: &Config,
87 87 explicit_path: Option<PathBuf>,
88 88 ) -> Result<Self, RepoError> {
89 89 if let Some(root) = explicit_path {
90 90 if is_dir(root.join(".hg"))? {
91 91 Self::new_at_path(root.to_owned(), config)
92 92 } else if is_file(&root)? {
93 93 Err(HgError::unsupported("bundle repository").into())
94 94 } else {
95 95 Err(RepoError::NotFound {
96 96 at: root.to_owned(),
97 97 })
98 98 }
99 99 } else {
100 100 let root = Self::find_repo_root()?;
101 101 Self::new_at_path(root, config)
102 102 }
103 103 }
104 104
105 105 /// To be called after checking that `.hg` is a sub-directory
106 106 fn new_at_path(
107 107 working_directory: PathBuf,
108 108 config: &Config,
109 109 ) -> Result<Self, RepoError> {
110 110 let dot_hg = working_directory.join(".hg");
111 111
112 112 let mut repo_config_files = Vec::new();
113 113 repo_config_files.push(dot_hg.join("hgrc"));
114 114 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
115 115
116 116 let hg_vfs = Vfs { base: &dot_hg };
117 117 let mut reqs = requirements::load_if_exists(hg_vfs)?;
118 118 let relative =
119 119 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
120 120 let shared =
121 121 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
122 122
123 123 // From `mercurial/localrepo.py`:
124 124 //
125 125 // if .hg/requires contains the sharesafe requirement, it means
126 126 // there exists a `.hg/store/requires` too and we should read it
127 127 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
128 128 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
129 129 // is not present, refer checkrequirementscompat() for that
130 130 //
131 131 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
132 132 // repository was shared the old way. We check the share source
133 133 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
134 134 // current repository needs to be reshared
135 135 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
136 136
137 137 let store_path;
138 138 if !shared {
139 139 store_path = dot_hg.join("store");
140 140 } else {
141 141 let bytes = hg_vfs.read("sharedpath")?;
142 142 let mut shared_path =
143 143 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
144 144 .to_owned();
145 145 if relative {
146 146 shared_path = dot_hg.join(shared_path)
147 147 }
148 148 if !is_dir(&shared_path)? {
149 149 return Err(HgError::corrupted(format!(
150 150 ".hg/sharedpath points to nonexistent directory {}",
151 151 shared_path.display()
152 152 ))
153 153 .into());
154 154 }
155 155
156 156 store_path = shared_path.join("store");
157 157
158 158 let source_is_share_safe =
159 159 requirements::load(Vfs { base: &shared_path })?
160 160 .contains(requirements::SHARESAFE_REQUIREMENT);
161 161
162 162 if share_safe != source_is_share_safe {
163 163 return Err(HgError::unsupported("share-safe mismatch").into());
164 164 }
165 165
166 166 if share_safe {
167 167 repo_config_files.insert(0, shared_path.join("hgrc"))
168 168 }
169 169 }
170 170 if share_safe {
171 171 reqs.extend(requirements::load(Vfs { base: &store_path })?);
172 172 }
173 173
174 174 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
175 175 config.combine_with_repo(&repo_config_files)?
176 176 } else {
177 177 config.clone()
178 178 };
179 179
180 180 let repo = Self {
181 181 requirements: reqs,
182 182 working_directory,
183 183 store: store_path,
184 184 dot_hg,
185 185 config: repo_config,
186 186 dirstate_parents: LazyCell::new(),
187 187 dirstate_data_file_uuid: LazyCell::new(),
188 188 dirstate_map: LazyCell::new(),
189 189 changelog: LazyCell::new(),
190 190 manifestlog: LazyCell::new(),
191 191 };
192 192
193 193 requirements::check(&repo)?;
194 194
195 195 Ok(repo)
196 196 }
197 197
198 198 pub fn working_directory_path(&self) -> &Path {
199 199 &self.working_directory
200 200 }
201 201
202 202 pub fn requirements(&self) -> &HashSet<String> {
203 203 &self.requirements
204 204 }
205 205
206 206 pub fn config(&self) -> &Config {
207 207 &self.config
208 208 }
209 209
210 210 /// For accessing repository files (in `.hg`), except for the store
211 211 /// (`.hg/store`).
212 212 pub fn hg_vfs(&self) -> Vfs<'_> {
213 213 Vfs { base: &self.dot_hg }
214 214 }
215 215
216 216 /// For accessing repository store files (in `.hg/store`)
217 217 pub fn store_vfs(&self) -> Vfs<'_> {
218 218 Vfs { base: &self.store }
219 219 }
220 220
221 221 /// For accessing the working copy
222 222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
223 223 Vfs {
224 224 base: &self.working_directory,
225 225 }
226 226 }
227 227
228 228 pub fn try_with_wlock_no_wait<R>(
229 229 &self,
230 230 f: impl FnOnce() -> R,
231 231 ) -> Result<R, LockError> {
232 232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 233 }
234 234
235 235 pub fn has_dirstate_v2(&self) -> bool {
236 236 self.requirements
237 237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 238 }
239 239
240 240 pub fn has_sparse(&self) -> bool {
241 241 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
242 242 }
243 243
244 244 pub fn has_narrow(&self) -> bool {
245 245 self.requirements.contains(requirements::NARROW_REQUIREMENT)
246 246 }
247 247
248 248 pub fn has_nodemap(&self) -> bool {
249 249 self.requirements
250 250 .contains(requirements::NODEMAP_REQUIREMENT)
251 251 }
252 252
253 253 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
254 254 Ok(self
255 255 .hg_vfs()
256 256 .read("dirstate")
257 257 .io_not_found_as_none()?
258 258 .unwrap_or(Vec::new()))
259 259 }
260 260
261 261 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
262 262 Ok(*self
263 263 .dirstate_parents
264 264 .get_or_init(|| self.read_dirstate_parents())?)
265 265 }
266 266
267 267 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
268 268 let dirstate = self.dirstate_file_contents()?;
269 269 let parents = if dirstate.is_empty() {
270 270 if self.has_dirstate_v2() {
271 271 self.dirstate_data_file_uuid.set(None);
272 272 }
273 273 DirstateParents::NULL
274 274 } else if self.has_dirstate_v2() {
275 275 let docket =
276 276 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
277 277 self.dirstate_data_file_uuid
278 278 .set(Some(docket.uuid.to_owned()));
279 279 docket.parents()
280 280 } else {
281 281 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
282 282 .clone()
283 283 };
284 284 self.dirstate_parents.set(parents);
285 285 Ok(parents)
286 286 }
287 287
288 288 fn read_dirstate_data_file_uuid(
289 289 &self,
290 290 ) -> Result<Option<Vec<u8>>, HgError> {
291 291 assert!(
292 292 self.has_dirstate_v2(),
293 293 "accessing dirstate data file ID without dirstate-v2"
294 294 );
295 295 let dirstate = self.dirstate_file_contents()?;
296 296 if dirstate.is_empty() {
297 297 self.dirstate_parents.set(DirstateParents::NULL);
298 298 Ok(None)
299 299 } else {
300 300 let docket =
301 301 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
302 302 self.dirstate_parents.set(docket.parents());
303 303 Ok(Some(docket.uuid.to_owned()))
304 304 }
305 305 }
306 306
307 307 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
308 308 let dirstate_file_contents = self.dirstate_file_contents()?;
309 309 if dirstate_file_contents.is_empty() {
310 310 self.dirstate_parents.set(DirstateParents::NULL);
311 311 if self.has_dirstate_v2() {
312 312 self.dirstate_data_file_uuid.set(None);
313 313 }
314 314 Ok(OwningDirstateMap::new_empty(Vec::new()))
315 315 } else if self.has_dirstate_v2() {
316 316 let docket = crate::dirstate_tree::on_disk::read_docket(
317 317 &dirstate_file_contents,
318 318 )?;
319 319 self.dirstate_parents.set(docket.parents());
320 320 self.dirstate_data_file_uuid
321 321 .set(Some(docket.uuid.to_owned()));
322 322 let data_size = docket.data_size();
323 323 let metadata = docket.tree_metadata();
324 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
325 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
326 OwningDirstateMap::new_v2(
327 self.hg_vfs().read(docket.data_filename())?,
328 data_size,
329 metadata,
330 )
331 } else if let Some(data_mmap) = self
332 .hg_vfs()
333 .mmap_open(docket.data_filename())
334 .io_not_found_as_none()?
335 {
336 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
337 } else {
338 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
339 }
324 let mut map =
325 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
326 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
327 OwningDirstateMap::new_v2(
328 self.hg_vfs().read(docket.data_filename())?,
329 data_size,
330 metadata,
331 )
332 } else if let Some(data_mmap) = self
333 .hg_vfs()
334 .mmap_open(docket.data_filename())
335 .io_not_found_as_none()?
336 {
337 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
338 } else {
339 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
340 }?;
341
342 let write_mode_config = self
343 .config()
344 .get_str(b"devel", b"dirstate.v2.data_update_mode")
345 .unwrap_or(Some("auto"))
346 .unwrap_or("auto"); // don't bother for devel options
347 let write_mode = match write_mode_config {
348 "auto" => DirstateMapWriteMode::Auto,
349 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
350 "force-append" => DirstateMapWriteMode::ForceAppend,
351 _ => DirstateMapWriteMode::Auto,
352 };
353
354 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
355
356 Ok(map)
340 357 } else {
341 358 let (map, parents) =
342 359 OwningDirstateMap::new_v1(dirstate_file_contents)?;
343 360 self.dirstate_parents.set(parents);
344 361 Ok(map)
345 362 }
346 363 }
347 364
348 365 pub fn dirstate_map(
349 366 &self,
350 367 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
351 368 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
352 369 }
353 370
354 371 pub fn dirstate_map_mut(
355 372 &self,
356 373 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
357 374 self.dirstate_map
358 375 .get_mut_or_init(|| self.new_dirstate_map())
359 376 }
360 377
361 378 fn new_changelog(&self) -> Result<Changelog, HgError> {
362 379 Changelog::open(&self.store_vfs(), self.has_nodemap())
363 380 }
364 381
365 382 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
366 383 self.changelog.get_or_init(|| self.new_changelog())
367 384 }
368 385
369 386 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
370 387 self.changelog.get_mut_or_init(|| self.new_changelog())
371 388 }
372 389
373 390 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
374 391 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
375 392 }
376 393
377 394 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
378 395 self.manifestlog.get_or_init(|| self.new_manifestlog())
379 396 }
380 397
381 398 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
382 399 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
383 400 }
384 401
385 402 /// Returns the manifest of the *changeset* with the given node ID
386 403 pub fn manifest_for_node(
387 404 &self,
388 405 node: impl Into<NodePrefix>,
389 406 ) -> Result<Manifest, RevlogError> {
390 407 self.manifestlog()?.data_for_node(
391 408 self.changelog()?
392 409 .data_for_node(node.into())?
393 410 .manifest_node()?
394 411 .into(),
395 412 )
396 413 }
397 414
398 415 /// Returns the manifest of the *changeset* with the given revision number
399 416 pub fn manifest_for_rev(
400 417 &self,
401 418 revision: Revision,
402 419 ) -> Result<Manifest, RevlogError> {
403 420 self.manifestlog()?.data_for_node(
404 421 self.changelog()?
405 422 .data_for_rev(revision)?
406 423 .manifest_node()?
407 424 .into(),
408 425 )
409 426 }
410 427
411 428 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
412 429 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
413 430 Ok(entry.tracked())
414 431 } else {
415 432 Ok(false)
416 433 }
417 434 }
418 435
419 436 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
420 437 Filelog::open(self, path)
421 438 }
422 439
423 440 /// Write to disk any updates that were made through `dirstate_map_mut`.
424 441 ///
425 442 /// The "wlock" must be held while calling this.
426 443 /// See for example `try_with_wlock_no_wait`.
427 444 ///
428 445 /// TODO: have a `WritableRepo` type only accessible while holding the
429 446 /// lock?
430 447 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
431 448 let map = self.dirstate_map()?;
432 449 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
433 450 // it’s unset
434 451 let parents = self.dirstate_parents()?;
435 452 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
436 453 let uuid_opt = self
437 454 .dirstate_data_file_uuid
438 455 .get_or_init(|| self.read_dirstate_data_file_uuid())?;
439 456 let uuid_opt = uuid_opt.as_ref();
440 457 let write_mode = if uuid_opt.is_some() {
441 458 DirstateMapWriteMode::Auto
442 459 } else {
443 460 DirstateMapWriteMode::ForceNewDataFile
444 461 };
445 462 let (data, tree_metadata, append, old_data_size) =
446 463 map.pack_v2(write_mode)?;
447 464
448 465 // Reuse the uuid, or generate a new one, keeping the old for
449 466 // deletion.
450 467 let (uuid, old_uuid) = match uuid_opt {
451 468 Some(uuid) => {
452 469 let as_str = std::str::from_utf8(uuid)
453 470 .map_err(|_| {
454 471 HgError::corrupted(
455 472 "non-UTF-8 dirstate data file ID",
456 473 )
457 474 })?
458 475 .to_owned();
459 476 if append {
460 477 (as_str, None)
461 478 } else {
462 479 (DirstateDocket::new_uid(), Some(as_str))
463 480 }
464 481 }
465 482 None => (DirstateDocket::new_uid(), None),
466 483 };
467 484
468 485 let data_filename = format!("dirstate.{}", uuid);
469 486 let data_filename = self.hg_vfs().join(data_filename);
470 487 let mut options = std::fs::OpenOptions::new();
471 488 options.write(true);
472 489
473 490 // Why are we not using the O_APPEND flag when appending?
474 491 //
475 492 // - O_APPEND makes it trickier to deal with garbage at the end of
476 493 // the file, left by a previous uncommitted transaction. By
477 494 // starting the write at [old_data_size] we make sure we erase
478 495 // all such garbage.
479 496 //
480 497 // - O_APPEND requires to special-case 0-byte writes, whereas we
481 498 // don't need that.
482 499 //
483 500 // - Some OSes have bugs in implementation O_APPEND:
484 501 // revlog.py talks about a Solaris bug, but we also saw some ZFS
485 502 // bug: https://github.com/openzfs/zfs/pull/3124,
486 503 // https://github.com/openzfs/zfs/issues/13370
487 504 //
488 505 if !append {
489 506 log::trace!("creating a new dirstate data file");
490 507 options.create_new(true);
491 508 } else {
492 509 log::trace!("appending to the dirstate data file");
493 510 }
494 511
495 512 let data_size = (|| {
496 513 // TODO: loop and try another random ID if !append and this
497 514 // returns `ErrorKind::AlreadyExists`? Collision chance of two
498 515 // random IDs is one in 2**32
499 516 let mut file = options.open(&data_filename)?;
500 517 if append {
501 518 file.seek(SeekFrom::Start(old_data_size as u64))?;
502 519 }
503 520 file.write_all(&data)?;
504 521 file.flush()?;
505 522 file.seek(SeekFrom::Current(0))
506 523 })()
507 524 .when_writing_file(&data_filename)?;
508 525
509 526 let packed_dirstate = DirstateDocket::serialize(
510 527 parents,
511 528 tree_metadata,
512 529 data_size,
513 530 uuid.as_bytes(),
514 531 )
515 532 .map_err(|_: std::num::TryFromIntError| {
516 533 HgError::corrupted("overflow in dirstate docket serialization")
517 534 })?;
518 535
519 536 (packed_dirstate, old_uuid)
520 537 } else {
521 538 (map.pack_v1(parents)?, None)
522 539 };
523 540
524 541 let vfs = self.hg_vfs();
525 542 vfs.atomic_write("dirstate", &packed_dirstate)?;
526 543 if let Some(uuid) = old_uuid_to_remove {
527 544 // Remove the old data file after the new docket pointing to the
528 545 // new data file was written.
529 546 vfs.remove_file(format!("dirstate.{}", uuid))?;
530 547 }
531 548 Ok(())
532 549 }
533 550 }
534 551
535 552 /// Lazily-initialized component of `Repo` with interior mutability
536 553 ///
537 554 /// This differs from `OnceCell` in that the value can still be "deinitialized"
538 555 /// later by setting its inner `Option` to `None`. It also takes the
539 556 /// initialization function as an argument when the value is requested, not
540 557 /// when the instance is created.
541 558 struct LazyCell<T> {
542 559 value: RefCell<Option<T>>,
543 560 }
544 561
545 562 impl<T> LazyCell<T> {
546 563 fn new() -> Self {
547 564 Self {
548 565 value: RefCell::new(None),
549 566 }
550 567 }
551 568
552 569 fn set(&self, value: T) {
553 570 *self.value.borrow_mut() = Some(value)
554 571 }
555 572
556 573 fn get_or_init<E>(
557 574 &self,
558 575 init: impl Fn() -> Result<T, E>,
559 576 ) -> Result<Ref<T>, E> {
560 577 let mut borrowed = self.value.borrow();
561 578 if borrowed.is_none() {
562 579 drop(borrowed);
563 580 // Only use `borrow_mut` if it is really needed to avoid panic in
564 581 // case there is another outstanding borrow but mutation is not
565 582 // needed.
566 583 *self.value.borrow_mut() = Some(init()?);
567 584 borrowed = self.value.borrow()
568 585 }
569 586 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
570 587 }
571 588
572 589 fn get_mut_or_init<E>(
573 590 &self,
574 591 init: impl Fn() -> Result<T, E>,
575 592 ) -> Result<RefMut<T>, E> {
576 593 let mut borrowed = self.value.borrow_mut();
577 594 if borrowed.is_none() {
578 595 *borrowed = Some(init()?);
579 596 }
580 597 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
581 598 }
582 599 }
@@ -1,550 +1,551 b''
1 1 // dirstate_map.rs
2 2 //
3 3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 9 //! `hg-core` package.
10 10
11 11 use std::cell::{RefCell, RefMut};
12 12 use std::convert::TryInto;
13 13
14 14 use cpython::{
15 15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
17 17 };
18 18 use hg::dirstate::{ParentFileData, TruncatedTimestamp};
19 19
20 20 use crate::{
21 21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 22 dirstate::item::DirstateItem,
23 23 pybytes_deref::PyBytesDeref,
24 24 };
25 25 use hg::{
26 26 dirstate::StateMapIter, dirstate_tree::dirstate_map::DirstateMapWriteMode,
27 27 dirstate_tree::on_disk::DirstateV2ParseError,
28 28 dirstate_tree::owning::OwningDirstateMap, revlog::Node,
29 29 utils::files::normalize_case, utils::hg_path::HgPath, DirstateEntry,
30 30 DirstateError, DirstateParents,
31 31 };
32 32
33 33 // TODO
34 34 // This object needs to share references to multiple members of its Rust
35 35 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
36 36 // Right now `CopyMap` is done, but it needs to have an explicit reference
37 37 // to `RustDirstateMap` which itself needs to have an encapsulation for
38 38 // every method in `CopyMap` (copymapcopy, etc.).
39 39 // This is ugly and hard to maintain.
40 40 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
41 41 // `py_class!` is already implemented and does not mention
42 42 // `RustDirstateMap`, rightfully so.
43 43 // All attributes also have to have a separate refcount data attribute for
44 44 // leaks, with all methods that go along for reference sharing.
45 45 py_class!(pub class DirstateMap |py| {
46 46 @shared data inner: OwningDirstateMap;
47 47
48 48 /// Returns a `(dirstate_map, parents)` tuple
49 49 @staticmethod
50 50 def new_v1(
51 51 on_disk: PyBytes,
52 52 ) -> PyResult<PyObject> {
53 53 let on_disk = PyBytesDeref::new(py, on_disk);
54 54 let (map, parents) = OwningDirstateMap::new_v1(on_disk)
55 55 .map_err(|e| dirstate_error(py, e))?;
56 56 let map = Self::create_instance(py, map)?;
57 57 let p1 = PyBytes::new(py, parents.p1.as_bytes());
58 58 let p2 = PyBytes::new(py, parents.p2.as_bytes());
59 59 let parents = (p1, p2);
60 60 Ok((map, parents).to_py_object(py).into_object())
61 61 }
62 62
63 63 /// Returns a DirstateMap
64 64 @staticmethod
65 65 def new_v2(
66 66 on_disk: PyBytes,
67 67 data_size: usize,
68 68 tree_metadata: PyBytes,
69 69 ) -> PyResult<PyObject> {
70 70 let dirstate_error = |e: DirstateError| {
71 71 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
72 72 };
73 73 let on_disk = PyBytesDeref::new(py, on_disk);
74 74 let map = OwningDirstateMap::new_v2(
75 75 on_disk, data_size, tree_metadata.data(py),
76 76 ).map_err(dirstate_error)?;
77 77 let map = Self::create_instance(py, map)?;
78 78 Ok(map.into_object())
79 79 }
80 80
81 81 def clear(&self) -> PyResult<PyObject> {
82 82 self.inner(py).borrow_mut().clear();
83 83 Ok(py.None())
84 84 }
85 85
86 86 def get(
87 87 &self,
88 88 key: PyObject,
89 89 default: Option<PyObject> = None
90 90 ) -> PyResult<Option<PyObject>> {
91 91 let key = key.extract::<PyBytes>(py)?;
92 92 match self
93 93 .inner(py)
94 94 .borrow()
95 95 .get(HgPath::new(key.data(py)))
96 96 .map_err(|e| v2_error(py, e))?
97 97 {
98 98 Some(entry) => {
99 99 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
100 100 },
101 101 None => Ok(default)
102 102 }
103 103 }
104 104
105 105 def set_tracked(&self, f: PyObject) -> PyResult<PyBool> {
106 106 let bytes = f.extract::<PyBytes>(py)?;
107 107 let path = HgPath::new(bytes.data(py));
108 108 let res = self.inner(py).borrow_mut().set_tracked(path);
109 109 let was_tracked = res.or_else(|_| {
110 110 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
111 111 })?;
112 112 Ok(was_tracked.to_py_object(py))
113 113 }
114 114
115 115 def set_untracked(&self, f: PyObject) -> PyResult<PyBool> {
116 116 let bytes = f.extract::<PyBytes>(py)?;
117 117 let path = HgPath::new(bytes.data(py));
118 118 let res = self.inner(py).borrow_mut().set_untracked(path);
119 119 let was_tracked = res.or_else(|_| {
120 120 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
121 121 })?;
122 122 Ok(was_tracked.to_py_object(py))
123 123 }
124 124
125 125 def set_clean(
126 126 &self,
127 127 f: PyObject,
128 128 mode: u32,
129 129 size: u32,
130 130 mtime: (i64, u32, bool)
131 131 ) -> PyResult<PyNone> {
132 132 let (mtime_s, mtime_ns, second_ambiguous) = mtime;
133 133 let timestamp = TruncatedTimestamp::new_truncate(
134 134 mtime_s, mtime_ns, second_ambiguous
135 135 );
136 136 let bytes = f.extract::<PyBytes>(py)?;
137 137 let path = HgPath::new(bytes.data(py));
138 138 let res = self.inner(py).borrow_mut().set_clean(
139 139 path, mode, size, timestamp,
140 140 );
141 141 res.or_else(|_| {
142 142 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
143 143 })?;
144 144 Ok(PyNone)
145 145 }
146 146
147 147 def set_possibly_dirty(&self, f: PyObject) -> PyResult<PyNone> {
148 148 let bytes = f.extract::<PyBytes>(py)?;
149 149 let path = HgPath::new(bytes.data(py));
150 150 let res = self.inner(py).borrow_mut().set_possibly_dirty(path);
151 151 res.or_else(|_| {
152 152 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
153 153 })?;
154 154 Ok(PyNone)
155 155 }
156 156
157 157 def reset_state(
158 158 &self,
159 159 f: PyObject,
160 160 wc_tracked: bool,
161 161 p1_tracked: bool,
162 162 p2_info: bool,
163 163 has_meaningful_mtime: bool,
164 164 parentfiledata: Option<(u32, u32, Option<(i64, u32, bool)>)>,
165 165 ) -> PyResult<PyNone> {
166 166 let mut has_meaningful_mtime = has_meaningful_mtime;
167 167 let parent_file_data = match parentfiledata {
168 168 None => {
169 169 has_meaningful_mtime = false;
170 170 None
171 171 },
172 172 Some(data) => {
173 173 let (mode, size, mtime_info) = data;
174 174 let mtime = if let Some(mtime_info) = mtime_info {
175 175 let (mtime_s, mtime_ns, second_ambiguous) = mtime_info;
176 176 let timestamp = TruncatedTimestamp::new_truncate(
177 177 mtime_s, mtime_ns, second_ambiguous
178 178 );
179 179 Some(timestamp)
180 180 } else {
181 181 has_meaningful_mtime = false;
182 182 None
183 183 };
184 184 Some(ParentFileData {
185 185 mode_size: Some((mode, size)),
186 186 mtime,
187 187 })
188 188 }
189 189 };
190 190 let bytes = f.extract::<PyBytes>(py)?;
191 191 let path = HgPath::new(bytes.data(py));
192 192 let res = self.inner(py).borrow_mut().reset_state(
193 193 path,
194 194 wc_tracked,
195 195 p1_tracked,
196 196 p2_info,
197 197 has_meaningful_mtime,
198 198 parent_file_data,
199 199 );
200 200 res.or_else(|_| {
201 201 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
202 202 })?;
203 203 Ok(PyNone)
204 204 }
205 205
206 206 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
207 207 let d = d.extract::<PyBytes>(py)?;
208 208 Ok(self.inner(py).borrow_mut()
209 209 .has_tracked_dir(HgPath::new(d.data(py)))
210 210 .map_err(|e| {
211 211 PyErr::new::<exc::ValueError, _>(py, e.to_string())
212 212 })?
213 213 .to_py_object(py))
214 214 }
215 215
216 216 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
217 217 let d = d.extract::<PyBytes>(py)?;
218 218 Ok(self.inner(py).borrow_mut()
219 219 .has_dir(HgPath::new(d.data(py)))
220 220 .map_err(|e| {
221 221 PyErr::new::<exc::ValueError, _>(py, e.to_string())
222 222 })?
223 223 .to_py_object(py))
224 224 }
225 225
226 226 def write_v1(
227 227 &self,
228 228 p1: PyObject,
229 229 p2: PyObject,
230 230 ) -> PyResult<PyBytes> {
231 231 let inner = self.inner(py).borrow();
232 232 let parents = DirstateParents {
233 233 p1: extract_node_id(py, &p1)?,
234 234 p2: extract_node_id(py, &p2)?,
235 235 };
236 236 let result = inner.pack_v1(parents);
237 237 match result {
238 238 Ok(packed) => Ok(PyBytes::new(py, &packed)),
239 239 Err(_) => Err(PyErr::new::<exc::OSError, _>(
240 240 py,
241 241 "Dirstate error".to_string(),
242 242 )),
243 243 }
244 244 }
245 245
246 246 /// Returns new data together with whether that data should be appended to
247 247 /// the existing data file whose content is at `self.on_disk` (True),
248 248 /// instead of written to a new data file (False).
249 249 def write_v2(
250 250 &self,
251 251 write_mode: usize,
252 252 ) -> PyResult<PyObject> {
253 253 let inner = self.inner(py).borrow();
254 254 let rust_write_mode = match write_mode {
255 255 0 => DirstateMapWriteMode::Auto,
256 256 1 => DirstateMapWriteMode::ForceNewDataFile,
257 2 => DirstateMapWriteMode::ForceAppend,
257 258 _ => DirstateMapWriteMode::Auto, // XXX should we error out?
258 259 };
259 260 let result = inner.pack_v2(rust_write_mode);
260 261 match result {
261 262 Ok((packed, tree_metadata, append, _old_data_size)) => {
262 263 let packed = PyBytes::new(py, &packed);
263 264 let tree_metadata = PyBytes::new(py, tree_metadata.as_bytes());
264 265 let tuple = (packed, tree_metadata, append);
265 266 Ok(tuple.to_py_object(py).into_object())
266 267 },
267 268 Err(_) => Err(PyErr::new::<exc::OSError, _>(
268 269 py,
269 270 "Dirstate error".to_string(),
270 271 )),
271 272 }
272 273 }
273 274
274 275 def filefoldmapasdict(&self) -> PyResult<PyDict> {
275 276 let dict = PyDict::new(py);
276 277 for item in self.inner(py).borrow_mut().iter() {
277 278 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
278 279 if !entry.removed() {
279 280 let key = normalize_case(path);
280 281 let value = path;
281 282 dict.set_item(
282 283 py,
283 284 PyBytes::new(py, key.as_bytes()).into_object(),
284 285 PyBytes::new(py, value.as_bytes()).into_object(),
285 286 )?;
286 287 }
287 288 }
288 289 Ok(dict)
289 290 }
290 291
291 292 def __len__(&self) -> PyResult<usize> {
292 293 Ok(self.inner(py).borrow().len())
293 294 }
294 295
295 296 def __contains__(&self, key: PyObject) -> PyResult<bool> {
296 297 let key = key.extract::<PyBytes>(py)?;
297 298 self.inner(py)
298 299 .borrow()
299 300 .contains_key(HgPath::new(key.data(py)))
300 301 .map_err(|e| v2_error(py, e))
301 302 }
302 303
303 304 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
304 305 let key = key.extract::<PyBytes>(py)?;
305 306 let key = HgPath::new(key.data(py));
306 307 match self
307 308 .inner(py)
308 309 .borrow()
309 310 .get(key)
310 311 .map_err(|e| v2_error(py, e))?
311 312 {
312 313 Some(entry) => {
313 314 Ok(DirstateItem::new_as_pyobject(py, entry)?)
314 315 },
315 316 None => Err(PyErr::new::<exc::KeyError, _>(
316 317 py,
317 318 String::from_utf8_lossy(key.as_bytes()),
318 319 )),
319 320 }
320 321 }
321 322
322 323 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
323 324 let leaked_ref = self.inner(py).leak_immutable();
324 325 DirstateMapKeysIterator::from_inner(
325 326 py,
326 327 unsafe { leaked_ref.map(py, |o| o.iter()) },
327 328 )
328 329 }
329 330
330 331 def items(&self) -> PyResult<DirstateMapItemsIterator> {
331 332 let leaked_ref = self.inner(py).leak_immutable();
332 333 DirstateMapItemsIterator::from_inner(
333 334 py,
334 335 unsafe { leaked_ref.map(py, |o| o.iter()) },
335 336 )
336 337 }
337 338
338 339 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
339 340 let leaked_ref = self.inner(py).leak_immutable();
340 341 DirstateMapKeysIterator::from_inner(
341 342 py,
342 343 unsafe { leaked_ref.map(py, |o| o.iter()) },
343 344 )
344 345 }
345 346
346 347 // TODO all copymap* methods, see docstring above
347 348 def copymapcopy(&self) -> PyResult<PyDict> {
348 349 let dict = PyDict::new(py);
349 350 for item in self.inner(py).borrow().copy_map_iter() {
350 351 let (key, value) = item.map_err(|e| v2_error(py, e))?;
351 352 dict.set_item(
352 353 py,
353 354 PyBytes::new(py, key.as_bytes()),
354 355 PyBytes::new(py, value.as_bytes()),
355 356 )?;
356 357 }
357 358 Ok(dict)
358 359 }
359 360
360 361 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
361 362 let key = key.extract::<PyBytes>(py)?;
362 363 match self
363 364 .inner(py)
364 365 .borrow()
365 366 .copy_map_get(HgPath::new(key.data(py)))
366 367 .map_err(|e| v2_error(py, e))?
367 368 {
368 369 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
369 370 None => Err(PyErr::new::<exc::KeyError, _>(
370 371 py,
371 372 String::from_utf8_lossy(key.data(py)),
372 373 )),
373 374 }
374 375 }
375 376 def copymap(&self) -> PyResult<CopyMap> {
376 377 CopyMap::from_inner(py, self.clone_ref(py))
377 378 }
378 379
379 380 def copymaplen(&self) -> PyResult<usize> {
380 381 Ok(self.inner(py).borrow().copy_map_len())
381 382 }
382 383 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
383 384 let key = key.extract::<PyBytes>(py)?;
384 385 self.inner(py)
385 386 .borrow()
386 387 .copy_map_contains_key(HgPath::new(key.data(py)))
387 388 .map_err(|e| v2_error(py, e))
388 389 }
389 390 def copymapget(
390 391 &self,
391 392 key: PyObject,
392 393 default: Option<PyObject>
393 394 ) -> PyResult<Option<PyObject>> {
394 395 let key = key.extract::<PyBytes>(py)?;
395 396 match self
396 397 .inner(py)
397 398 .borrow()
398 399 .copy_map_get(HgPath::new(key.data(py)))
399 400 .map_err(|e| v2_error(py, e))?
400 401 {
401 402 Some(copy) => Ok(Some(
402 403 PyBytes::new(py, copy.as_bytes()).into_object(),
403 404 )),
404 405 None => Ok(default),
405 406 }
406 407 }
407 408 def copymapsetitem(
408 409 &self,
409 410 key: PyObject,
410 411 value: PyObject
411 412 ) -> PyResult<PyObject> {
412 413 let key = key.extract::<PyBytes>(py)?;
413 414 let value = value.extract::<PyBytes>(py)?;
414 415 self.inner(py)
415 416 .borrow_mut()
416 417 .copy_map_insert(
417 418 HgPath::new(key.data(py)),
418 419 HgPath::new(value.data(py)),
419 420 )
420 421 .map_err(|e| v2_error(py, e))?;
421 422 Ok(py.None())
422 423 }
423 424 def copymappop(
424 425 &self,
425 426 key: PyObject,
426 427 default: Option<PyObject>
427 428 ) -> PyResult<Option<PyObject>> {
428 429 let key = key.extract::<PyBytes>(py)?;
429 430 match self
430 431 .inner(py)
431 432 .borrow_mut()
432 433 .copy_map_remove(HgPath::new(key.data(py)))
433 434 .map_err(|e| v2_error(py, e))?
434 435 {
435 436 Some(copy) => Ok(Some(
436 437 PyBytes::new(py, copy.as_bytes()).into_object(),
437 438 )),
438 439 None => Ok(default),
439 440 }
440 441 }
441 442
442 443 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
443 444 let leaked_ref = self.inner(py).leak_immutable();
444 445 CopyMapKeysIterator::from_inner(
445 446 py,
446 447 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
447 448 )
448 449 }
449 450
450 451 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
451 452 let leaked_ref = self.inner(py).leak_immutable();
452 453 CopyMapItemsIterator::from_inner(
453 454 py,
454 455 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
455 456 )
456 457 }
457 458
458 459 def tracked_dirs(&self) -> PyResult<PyList> {
459 460 let dirs = PyList::new(py, &[]);
460 461 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
461 462 .map_err(|e |dirstate_error(py, e))?
462 463 {
463 464 let path = path.map_err(|e| v2_error(py, e))?;
464 465 let path = PyBytes::new(py, path.as_bytes());
465 466 dirs.append(py, path.into_object())
466 467 }
467 468 Ok(dirs)
468 469 }
469 470
470 471 def setparents_fixup(&self) -> PyResult<PyDict> {
471 472 let dict = PyDict::new(py);
472 473 let copies = self.inner(py).borrow_mut().setparents_fixup();
473 474 for (key, value) in copies.map_err(|e| v2_error(py, e))? {
474 475 dict.set_item(
475 476 py,
476 477 PyBytes::new(py, key.as_bytes()),
477 478 PyBytes::new(py, value.as_bytes()),
478 479 )?;
479 480 }
480 481 Ok(dict)
481 482 }
482 483
483 484 def debug_iter(&self, all: bool) -> PyResult<PyList> {
484 485 let dirs = PyList::new(py, &[]);
485 486 for item in self.inner(py).borrow().debug_iter(all) {
486 487 let (path, (state, mode, size, mtime)) =
487 488 item.map_err(|e| v2_error(py, e))?;
488 489 let path = PyBytes::new(py, path.as_bytes());
489 490 let item = (path, state, mode, size, mtime);
490 491 dirs.append(py, item.to_py_object(py).into_object())
491 492 }
492 493 Ok(dirs)
493 494 }
494 495 });
495 496
496 497 impl DirstateMap {
497 498 pub fn get_inner_mut<'a>(
498 499 &'a self,
499 500 py: Python<'a>,
500 501 ) -> RefMut<'a, OwningDirstateMap> {
501 502 self.inner(py).borrow_mut()
502 503 }
503 504 fn translate_key(
504 505 py: Python,
505 506 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
506 507 ) -> PyResult<Option<PyBytes>> {
507 508 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
508 509 Ok(Some(PyBytes::new(py, f.as_bytes())))
509 510 }
510 511 fn translate_key_value(
511 512 py: Python,
512 513 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
513 514 ) -> PyResult<Option<(PyBytes, PyObject)>> {
514 515 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
515 516 Ok(Some((
516 517 PyBytes::new(py, f.as_bytes()),
517 518 DirstateItem::new_as_pyobject(py, entry)?,
518 519 )))
519 520 }
520 521 }
521 522
522 523 py_shared_iterator!(
523 524 DirstateMapKeysIterator,
524 525 UnsafePyLeaked<StateMapIter<'static>>,
525 526 DirstateMap::translate_key,
526 527 Option<PyBytes>
527 528 );
528 529
529 530 py_shared_iterator!(
530 531 DirstateMapItemsIterator,
531 532 UnsafePyLeaked<StateMapIter<'static>>,
532 533 DirstateMap::translate_key_value,
533 534 Option<(PyBytes, PyObject)>
534 535 );
535 536
536 537 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
537 538 let bytes = obj.extract::<PyBytes>(py)?;
538 539 match bytes.data(py).try_into() {
539 540 Ok(s) => Ok(s),
540 541 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
541 542 }
542 543 }
543 544
544 545 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
545 546 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
546 547 }
547 548
548 549 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
549 550 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
550 551 }
@@ -1,433 +1,433 b''
1 1 =====================================================================
2 2 Check potential race conditions between a status and other operations
3 3 =====================================================================
4 4
5 5 #testcases dirstate-v1 dirstate-v2
6 6
7 7 The `hg status` command can run without the wlock, however it might end up
8 8 having to update the on-disk dirstate files, for example to mark ambiguous
9 9 files as clean, or to update directory caches information with dirstate-v2.
10 10
11 11
12 12 If another process updates the dirstate in the meantime we might run into
13 13 trouble. Especially, commands doing semantic changes like `hg add` or
14 14 `hg commit` should not see their update erased by a concurrent status.
15 15
16 16 Unlike commands like `add` or `commit`, `status` only writes the dirstate
17 17 to update caches, no actual information is lost if we fail to write to disk.
18 18
19 19
20 20 This test file is meant to test various cases where such parallel operations
21 21 between a status with reasons to update the dirstate and another semantic
22 22 changes happen.
23 23
24 24
25 25 Setup
26 26 =====
27 27
28 28 $ cat >> $HGRCPATH << EOF
29 29 > [storage]
30 30 > dirstate-v2.slow-path=allow
31 31 > EOF
32 32
33 33 #if dirstate-v2
34 34 $ cat >> $HGRCPATH << EOF
35 35 > [format]
36 36 > use-dirstate-v2=yes
37 37 > EOF
38 38 #else
39 39 $ cat >> $HGRCPATH << EOF
40 40 > [format]
41 41 > use-dirstate-v2=no
42 42 > EOF
43 43 #endif
44 44
45 45 $ directories="dir dir/nested dir2"
46 46 $ first_files="dir/nested/a dir/b dir/c dir/d dir2/e f"
47 47 $ second_files="g dir/nested/h dir/i dir/j dir2/k dir2/l dir/nested/m"
48 48 $ extra_files="dir/n dir/o p q"
49 49
50 50 $ hg init reference-repo
51 51 $ cd reference-repo
52 52 $ mkdir -p dir/nested dir2
53 53 $ touch -t 200001010000 $first_files $directories
54 54 $ hg commit -Aqm "recreate a bunch of files to facilitate dirstate-v2 append"
55 55 $ touch -t 200001010010 $second_files $directories
56 56 $ hg commit -Aqm "more files to have two commits"
57 57 $ hg log -G -v
58 58 @ changeset: 1:c349430a1631
59 59 | tag: tip
60 60 | user: test
61 61 | date: Thu Jan 01 00:00:00 1970 +0000
62 62 | files: dir/i dir/j dir/nested/h dir/nested/m dir2/k dir2/l g
63 63 | description:
64 64 | more files to have two commits
65 65 |
66 66 |
67 67 o changeset: 0:4f23db756b09
68 68 user: test
69 69 date: Thu Jan 01 00:00:00 1970 +0000
70 70 files: dir/b dir/c dir/d dir/nested/a dir2/e f
71 71 description:
72 72 recreate a bunch of files to facilitate dirstate-v2 append
73 73
74 74
75 75 $ hg manifest
76 76 dir/b
77 77 dir/c
78 78 dir/d
79 79 dir/i
80 80 dir/j
81 81 dir/nested/a
82 82 dir/nested/h
83 83 dir/nested/m
84 84 dir2/e
85 85 dir2/k
86 86 dir2/l
87 87 f
88 88 g
89 89
90 90 Add some unknown files and refresh the dirstate
91 91
92 92 $ touch -t 200001010020 $extra_files
93 93 $ hg add dir/o
94 94 $ hg remove dir/nested/m
95 95
96 $ hg st
96 $ hg st --config devel.dirstate.v2.data_update_mode=force-new
97 97 A dir/o
98 98 R dir/nested/m
99 99 ? dir/n
100 100 ? p
101 101 ? q
102 102 $ hg debugstate
103 103 n 644 0 2000-01-01 00:00:00 dir/b
104 104 n 644 0 2000-01-01 00:00:00 dir/c
105 105 n 644 0 2000-01-01 00:00:00 dir/d
106 106 n 644 0 2000-01-01 00:10:00 dir/i
107 107 n 644 0 2000-01-01 00:10:00 dir/j
108 108 n 644 0 2000-01-01 00:00:00 dir/nested/a
109 109 n 644 0 2000-01-01 00:10:00 dir/nested/h
110 110 r ?????????????????????????????????? dir/nested/m (glob)
111 111 a ?????????????????????????????????? dir/o (glob)
112 112 n 644 0 2000-01-01 00:00:00 dir2/e
113 113 n 644 0 2000-01-01 00:10:00 dir2/k
114 114 n 644 0 2000-01-01 00:10:00 dir2/l
115 115 n 644 0 2000-01-01 00:00:00 f
116 116 n 644 0 2000-01-01 00:10:00 g
117 117 $ hg debugstate > ../reference
118 118 $ cd ..
119 119
120 120 Explain / verify the test principles
121 121 ------------------------------------
122 122
123 123 First, we can properly copy the reference
124 124
125 125 $ cp -a reference-repo sanity-check
126 126 $ cd sanity-check
127 127 $ hg debugstate
128 128 n 644 0 2000-01-01 00:00:00 dir/b
129 129 n 644 0 2000-01-01 00:00:00 dir/c
130 130 n 644 0 2000-01-01 00:00:00 dir/d
131 131 n 644 0 2000-01-01 00:10:00 dir/i
132 132 n 644 0 2000-01-01 00:10:00 dir/j
133 133 n 644 0 2000-01-01 00:00:00 dir/nested/a
134 134 n 644 0 2000-01-01 00:10:00 dir/nested/h
135 135 r ?????????????????????????????????? dir/nested/m (glob)
136 136 a ?????????????????????????????????? dir/o (glob)
137 137 n 644 0 2000-01-01 00:00:00 dir2/e
138 138 n 644 0 2000-01-01 00:10:00 dir2/k
139 139 n 644 0 2000-01-01 00:10:00 dir2/l
140 140 n 644 0 2000-01-01 00:00:00 f
141 141 n 644 0 2000-01-01 00:10:00 g
142 142 $ hg debugstate > ../post-copy
143 143 $ diff ../reference ../post-copy
144 144
145 145 And status thinks the cache is in a proper state
146 146
147 147 $ hg st
148 148 A dir/o
149 149 R dir/nested/m
150 150 ? dir/n
151 151 ? p
152 152 ? q
153 153 $ hg debugstate
154 154 n 644 0 2000-01-01 00:00:00 dir/b
155 155 n 644 0 2000-01-01 00:00:00 dir/c
156 156 n 644 0 2000-01-01 00:00:00 dir/d
157 157 n 644 0 2000-01-01 00:10:00 dir/i
158 158 n 644 0 2000-01-01 00:10:00 dir/j
159 159 n 644 0 2000-01-01 00:00:00 dir/nested/a
160 160 n 644 0 2000-01-01 00:10:00 dir/nested/h
161 161 r ?????????????????????????????????? dir/nested/m (glob)
162 162 a ?????????????????????????????????? dir/o (glob)
163 163 n 644 0 2000-01-01 00:00:00 dir2/e
164 164 n 644 0 2000-01-01 00:10:00 dir2/k
165 165 n 644 0 2000-01-01 00:10:00 dir2/l
166 166 n 644 0 2000-01-01 00:00:00 f
167 167 n 644 0 2000-01-01 00:10:00 g
168 168 $ hg debugstate > ../post-status
169 169 $ diff ../reference ../post-status
170 170
171 171 Then we can start a status that:
172 172 - has some update to do (the touch call)
173 173 - will wait AFTER running status, but before updating the cache on disk
174 174
175 175 $ touch -t 200001010001 dir/c
176 176 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
177 177 > --config rhg.on-unsupported=abort \
178 178 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
179 179 > &
180 180 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
181 181
182 182 We check it runs the status first by modifying a file and updating another timestamp
183 183
184 184 $ touch -t 200001010003 dir/i
185 185 $ echo babar > dir/j
186 186 $ touch $TESTTMP/status-race-lock
187 187 $ wait
188 188
189 189 The test process should have reported a status before the change we made,
190 190 and should have missed the timestamp update
191 191
192 192 $ cat $TESTTMP/status-race-lock.out
193 193 A dir/o
194 194 R dir/nested/m
195 195 ? dir/n
196 196 ? p
197 197 ? q
198 198 $ cat $TESTTMP/status-race-lock.log
199 199 $ hg debugstate | grep dir/c
200 200 n 644 0 2000-01-01 00:01:00 dir/c
201 201 $ hg debugstate | grep dir/i
202 202 n 644 0 2000-01-01 00:10:00 dir/i
203 203 $ hg debugstate | grep dir/j
204 204 n 644 0 2000-01-01 00:10:00 dir/j
205 205
206 206 final cleanup
207 207
208 208 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
209 209 $ cd ..
210 210
211 211 Actual Testing
212 212 ==============
213 213
214 214 Race with a `hg add`
215 215 -------------------
216 216
217 217 $ cp -a reference-repo race-with-add
218 218 $ cd race-with-add
219 219
220 220 spin a `hg status` with some caches to update
221 221
222 222 $ touch -t 200001020001 f
223 223 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
224 224 > --config rhg.on-unsupported=abort \
225 225 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
226 226 > &
227 227 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
228 228
229 229 Add a file
230 230
231 231 $ hg add dir/n
232 232 $ touch $TESTTMP/status-race-lock
233 233 $ wait
234 234
235 235 The file should in a "added" state
236 236
237 237 $ hg status
238 238 A dir/n (no-rhg !)
239 239 A dir/n (rhg no-dirstate-v1 !)
240 240 A dir/n (missing-correct-output rhg dirstate-v1 !)
241 241 A dir/o
242 242 R dir/nested/m
243 243 ? dir/n (known-bad-output rhg dirstate-v1 !)
244 244 ? p
245 245 ? q
246 246
247 247 The status process should return a consistent result and not crash.
248 248
249 249 $ cat $TESTTMP/status-race-lock.out
250 250 A dir/o
251 251 R dir/nested/m
252 252 ? dir/n
253 253 ? p
254 254 ? q
255 255 $ cat $TESTTMP/status-race-lock.log
256 256 abort: when writing $TESTTMP/race-with-add/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
257 257
258 258 final cleanup
259 259
260 260 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
261 261 $ cd ..
262 262
263 263 Race with a `hg commit`
264 264 ----------------------
265 265
266 266 $ cp -a reference-repo race-with-commit
267 267 $ cd race-with-commit
268 268
269 269 spin a `hg status` with some caches to update
270 270
271 271 $ touch -t 200001020001 dir/j
272 272 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
273 273 > --config rhg.on-unsupported=abort \
274 274 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
275 275 > &
276 276 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
277 277
278 278 Add a file and force the data file rewrite
279 279
280 280 $ hg commit -m created-during-status dir/o
281 281 $ touch $TESTTMP/status-race-lock
282 282 $ wait
283 283
284 284 The parent must change and the status should be clean
285 285
286 286 # XXX rhg misbehaves here
287 287 #if no-rhg
288 288 $ hg summary
289 289 parent: 2:2e3b442a2fd4 tip
290 290 created-during-status
291 291 branch: default
292 292 commit: 1 removed, 3 unknown
293 293 update: (current)
294 294 phases: 3 draft
295 295 $ hg status
296 296 R dir/nested/m
297 297 ? dir/n
298 298 ? p
299 299 ? q
300 300 #else
301 301 $ hg summary
302 302 parent: 1:c349430a1631
303 303 more files to have two commits
304 304 branch: default
305 305 commit: 1 added, 1 removed, 3 unknown (new branch head)
306 306 update: 1 new changesets (update)
307 307 phases: 3 draft
308 308 $ hg status
309 309 A dir/o
310 310 R dir/nested/m
311 311 ? dir/n
312 312 ? p
313 313 ? q
314 314 #endif
315 315
316 316 The status process should return a consistent result and not crash.
317 317
318 318 $ cat $TESTTMP/status-race-lock.out
319 319 A dir/o
320 320 R dir/nested/m
321 321 ? dir/n
322 322 ? p
323 323 ? q
324 324 $ cat $TESTTMP/status-race-lock.log
325 325 abort: when removing $TESTTMP/race-with-commit/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
326 326
327 327 final cleanup
328 328
329 329 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
330 330 $ cd ..
331 331
332 332 Race with a `hg update`
333 333 ----------------------
334 334
335 335 $ cp -a reference-repo race-with-update
336 336 $ cd race-with-update
337 337
338 338 spin a `hg status` with some caches to update
339 339
340 340 $ touch -t 200001020001 dir2/k
341 341 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
342 342 > --config rhg.on-unsupported=abort \
343 343 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
344 344 > &
345 345 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
346 346
347 347 Add a file and force the data file rewrite
348 348
349 349 $ hg update ".~1"
350 350 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
351 351 $ touch $TESTTMP/status-race-lock
352 352 $ wait
353 353
354 354 The parent must change and the status should be clean
355 355
356 356 $ hg summary
357 357 parent: 0:4f23db756b09
358 358 recreate a bunch of files to facilitate dirstate-v2 append
359 359 branch: default
360 360 commit: 1 added, 3 unknown (new branch head)
361 361 update: 1 new changesets (update)
362 362 phases: 2 draft
363 363 $ hg status
364 364 A dir/o
365 365 ? dir/n
366 366 ? p
367 367 ? q
368 368
369 369 The status process should return a consistent result and not crash.
370 370
371 371 $ cat $TESTTMP/status-race-lock.out
372 372 A dir/o
373 373 R dir/nested/m
374 374 ? dir/n
375 375 ? p
376 376 ? q
377 377 $ cat $TESTTMP/status-race-lock.log
378 378 abort: when reading $TESTTMP/race-with-update/dir2/k: $ENOENT$ (known-bad-output rhg !)
379 379
380 380 final cleanup
381 381
382 382 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
383 383 $ cd ..
384 384
385 385 Race with another status
386 386 ------------------------
387 387
388 388 $ cp -a reference-repo race-with-status
389 389 $ cd race-with-status
390 390
391 391 spin a `hg status` with some caches to update
392 392
393 393 $ touch -t 200001010030 dir/nested/h
394 394 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
395 395 > --config rhg.on-unsupported=abort \
396 396 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
397 397 > &
398 398 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
399 399
400 400 touch g
401 401
402 402 $ touch -t 200001010025 g
403 403 $ hg status
404 404 A dir/o
405 405 R dir/nested/m
406 406 ? dir/n
407 407 ? p
408 408 ? q
409 409 $ touch $TESTTMP/status-race-lock
410 410 $ wait
411 411
412 412 the first update should be on disk
413 413
414 414 $ hg debugstate --all | grep "g"
415 415 n 644 0 2000-01-01 00:25:00 g (no-rhg !)
416 416 n 644 0 2000-01-01 00:25:00 g (missing-correct-output rhg !)
417 417 n 644 0 2000-01-01 00:10:00 g (known-bad-output rhg !)
418 418
419 419 The status process should return a consistent result and not crash.
420 420
421 421 $ cat $TESTTMP/status-race-lock.out
422 422 A dir/o
423 423 R dir/nested/m
424 424 ? dir/n
425 425 ? p
426 426 ? q
427 427 $ cat $TESTTMP/status-race-lock.log
428 428 abort: when removing $TESTTMP/race-with-status/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
429 429
430 430 final cleanup
431 431
432 432 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
433 433 $ cd ..
@@ -1,259 +1,474 b''
1 1 #testcases dirstate-v1 dirstate-v2
2 2
3 3 #if dirstate-v2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [format]
6 6 > use-dirstate-v2=1
7 7 > [storage]
8 8 > dirstate-v2.slow-path=allow
9 9 > EOF
10 10 #endif
11 11
12 12 ------ Test dirstate._dirs refcounting
13 13
14 14 $ hg init t
15 15 $ cd t
16 16 $ mkdir -p a/b/c/d
17 17 $ touch a/b/c/d/x
18 18 $ touch a/b/c/d/y
19 19 $ touch a/b/c/d/z
20 20 $ hg ci -Am m
21 21 adding a/b/c/d/x
22 22 adding a/b/c/d/y
23 23 adding a/b/c/d/z
24 24 $ hg mv a z
25 25 moving a/b/c/d/x to z/b/c/d/x
26 26 moving a/b/c/d/y to z/b/c/d/y
27 27 moving a/b/c/d/z to z/b/c/d/z
28 28
29 29 Test name collisions
30 30
31 31 $ rm z/b/c/d/x
32 32 $ mkdir z/b/c/d/x
33 33 $ touch z/b/c/d/x/y
34 34 $ hg add z/b/c/d/x/y
35 35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
36 36 [255]
37 37 $ rm -rf z/b/c/d
38 38 $ touch z/b/c/d
39 39 $ hg add z/b/c/d
40 40 abort: directory 'z/b/c/d' already in dirstate
41 41 [255]
42 42
43 43 $ cd ..
44 44
45 45 Issue1790: dirstate entry locked into unset if file mtime is set into
46 46 the future
47 47
48 48 Prepare test repo:
49 49
50 50 $ hg init u
51 51 $ cd u
52 52 $ echo a > a
53 53 $ hg add
54 54 adding a
55 55 $ hg ci -m1
56 56
57 57 Set mtime of a into the future:
58 58
59 59 $ touch -t 203101011200 a
60 60
61 61 Status must not set a's entry to unset (issue1790):
62 62
63 63 $ hg status
64 64 $ hg debugstate
65 65 n 644 2 2031-01-01 12:00:00 a
66 66
67 67 Test modulo storage/comparison of absurd dates:
68 68
69 69 #if no-aix
70 70 $ touch -t 195001011200 a
71 71 $ hg st
72 72 $ hg debugstate
73 73 n 644 2 2018-01-19 15:14:08 a
74 74 #endif
75 75
76 76 Verify that exceptions during a dirstate change leave the dirstate
77 77 coherent (issue4353)
78 78
79 79 $ cat > ../dirstateexception.py <<EOF
80 80 > from mercurial import (
81 81 > error,
82 82 > extensions,
83 83 > mergestate as mergestatemod,
84 84 > )
85 85 >
86 86 > def wraprecordupdates(*args):
87 87 > raise error.Abort(b"simulated error while recording dirstateupdates")
88 88 >
89 89 > def reposetup(ui, repo):
90 90 > extensions.wrapfunction(mergestatemod, 'recordupdates',
91 91 > wraprecordupdates)
92 92 > EOF
93 93
94 94 $ hg rm a
95 95 $ hg commit -m 'rm a'
96 96 $ echo "[extensions]" >> .hg/hgrc
97 97 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
98 98 $ hg up 0
99 99 abort: simulated error while recording dirstateupdates
100 100 [255]
101 101 $ hg log -r . -T '{rev}\n'
102 102 1
103 103 $ hg status
104 104 ? a
105 105
106 106 #if dirstate-v2
107 107 Check that folders that are prefixes of others do not throw the packer into an
108 108 infinite loop.
109 109
110 110 $ cd ..
111 111 $ hg init infinite-loop
112 112 $ cd infinite-loop
113 113 $ mkdir hgext3rd hgext
114 114 $ touch hgext3rd/__init__.py hgext/zeroconf.py
115 115 $ hg commit -Aqm0
116 116
117 117 $ hg st -c
118 118 C hgext/zeroconf.py
119 119 C hgext3rd/__init__.py
120 120
121 121 $ cd ..
122 122
123 123 Check that the old dirstate data file is removed correctly and the new one is
124 124 valid.
125 125
126 126 $ dirstate_data_files () {
127 127 > find .hg -maxdepth 1 -name "dirstate.*"
128 128 > }
129 129
130 130 $ find_dirstate_uuid () {
131 131 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
132 132 > }
133 133
134 134 $ find_dirstate_data_size () {
135 135 > hg debugstate --docket | grep 'size of dirstate data' | sed 's/.*size of dirstate data: \(.*\)/\1/'
136 136 > }
137 137
138 138 $ dirstate_uuid_has_not_changed () {
139 139 > # Non-Rust always rewrites the whole dirstate
140 140 > if [ $# -eq 1 ] || ([ -n "$HGMODULEPOLICY" ] && [ -z "${HGMODULEPOLICY##*rust*}" ]) || [ -n "$RHG_INSTALLED_AS_HG" ]; then
141 141 > test $current_uid = $(find_dirstate_uuid)
142 142 > else
143 143 > echo "not testing because using Python implementation"
144 144 > fi
145 145 > }
146 146
147 147 $ cd ..
148 148 $ hg init append-mostly
149 149 $ cd append-mostly
150 150 $ mkdir dir dir2
151 151 $ touch -t 200001010000 dir/a dir/b dir/c dir/d dir/e dir2/f dir dir2
152 152 $ hg commit -Aqm initial
153 153 $ hg st
154 154 $ dirstate_data_files | wc -l
155 155 *1 (re)
156 156 $ current_uid=$(find_dirstate_uuid)
157 157
158 158 Nothing changes here
159 159
160 160 $ hg st
161 161 $ dirstate_data_files | wc -l
162 162 *1 (re)
163 163 $ dirstate_uuid_has_not_changed
164 164 not testing because using Python implementation (no-rust no-rhg !)
165 165
166 166 Trigger an append with a small change to directory mtime
167 167
168 168 $ current_data_size=$(find_dirstate_data_size)
169 169 $ touch -t 201001010000 dir2
170 170 $ hg st
171 171 $ dirstate_data_files | wc -l
172 172 *1 (re)
173 173 $ dirstate_uuid_has_not_changed
174 174 not testing because using Python implementation (no-rust no-rhg !)
175 175 $ new_data_size=$(find_dirstate_data_size)
176 176 $ [ "$current_data_size" -eq "$new_data_size" ]; echo $?
177 177 0 (no-rust no-rhg !)
178 178 1 (rust !)
179 179 1 (no-rust rhg !)
180 180
181 181 Unused bytes counter is non-0 when appending
182 182 $ touch file
183 183 $ hg add file
184 184 $ current_uid=$(find_dirstate_uuid)
185 185
186 186 Trigger a rust/rhg run which updates the unused bytes value
187 187 $ hg st
188 188 A file
189 189 $ dirstate_data_files | wc -l
190 190 *1 (re)
191 191 $ dirstate_uuid_has_not_changed
192 192 not testing because using Python implementation (no-rust no-rhg !)
193 193
194 194 $ hg debugstate --docket | grep unused
195 195 number of unused bytes: 0 (no-rust no-rhg !)
196 196 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
197 197 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
198 198 number of unused bytes: [1-9]\d* (re) (rust rhg !)
199 199
200 200 Delete most of the dirstate to trigger a non-append
201 201 $ hg rm dir/a dir/b dir/c dir/d
202 202 $ dirstate_data_files | wc -l
203 203 *1 (re)
204 204 $ dirstate_uuid_has_not_changed also-if-python
205 205 [1]
206 206
207 207 Check that unused bytes counter is reset when creating a new docket
208 208
209 209 $ hg debugstate --docket | grep unused
210 210 number of unused bytes: 0
211 211
212 212 #endif
213 213
214 (non-Rust always rewrites)
215
216 Test the devel option to control write behavior
217 ==============================================
218
219 Sometimes, debugging or testing the dirstate requires making sure that we have
220 done a complete rewrite of the data file and have no unreachable data around,
221 sometimes it requires we ensure we don't.
222
223 We test the option to force this rewrite by creating the situation where an
224 append would happen and check that it doesn't happen.
225
226 $ cd ..
227 $ hg init force-base
228 $ cd force-base
229 $ mkdir -p dir/nested dir2
230 $ touch -t 200001010000 f dir/nested/a dir/b dir/c dir/d dir2/e dir/nested dir dir2
231 $ hg commit -Aqm "recreate a bunch of files to facilitate append"
232 $ hg st --config devel.dirstate.v2.data_update_mode=force-new
233 $ cd ..
234
235 #if dirstate-v2
236 $ hg -R force-base debugstate --docket | grep unused
237 number of unused bytes: 0
238
239 Check with the option in "auto" mode
240 ------------------------------------
241 $ cp -a force-base append-mostly-no-force-rewrite
242 $ cd append-mostly-no-force-rewrite
243 $ current_uid=$(find_dirstate_uuid)
244
245 Change mtime of dir on disk which will be recorded, causing a small enough change
246 to warrant only an append
247
248 $ touch -t 202212010000 dir2
249 $ hg st \
250 > --config rhg.on-unsupported=abort \
251 > --config devel.dirstate.v2.data_update_mode=auto
252
253 UUID hasn't changed and a non-zero number of unused bytes means we've appended
254
255 $ dirstate_uuid_has_not_changed
256 not testing because using Python implementation (no-rust no-rhg !)
257
258 #if no-rust no-rhg
259 The pure python implementation never appends at the time this is written.
260 $ hg debugstate --docket | grep unused
261 number of unused bytes: 0 (known-bad-output !)
262 #else
263 $ hg debugstate --docket | grep unused
264 number of unused bytes: [1-9]\d* (re)
265 #endif
266 $ cd ..
267
268 Check the same scenario with the option set to "force-new"
269 ---------------------------------------------------------
270
271 $ cp -a force-base append-mostly-force-rewrite
272 $ cd append-mostly-force-rewrite
273 $ current_uid=$(find_dirstate_uuid)
274
275 Change mtime of dir on disk which will be recorded, causing a small enough change
276 to warrant only an append, but we force the rewrite
277
278 $ touch -t 202212010000 dir2
279 $ hg st \
280 > --config rhg.on-unsupported=abort \
281 > --config devel.dirstate.v2.data_update_mode=force-new
282
283 UUID has changed and zero unused bytes means a full-rewrite happened
284
285
286 #if no-rust no-rhg
287 $ dirstate_uuid_has_not_changed
288 not testing because using Python implementation
289 #else
290 $ dirstate_uuid_has_not_changed
291 [1]
292 #endif
293 $ hg debugstate --docket | grep unused
294 number of unused bytes: 0
295 $ cd ..
296
297
298 Check the same scenario with the option set to "force-append"
299 -------------------------------------------------------------
300
301 (should behave the same as "auto" here)
302
303 $ cp -a force-base append-mostly-force-append
304 $ cd append-mostly-force-append
305 $ current_uid=$(find_dirstate_uuid)
306
307 Change mtime of dir on disk which will be recorded, causing a small enough change
308 to warrant only an append, which we are forcing here anyway.
309
310 $ touch -t 202212010000 dir2
311 $ hg st \
312 > --config rhg.on-unsupported=abort \
313 > --config devel.dirstate.v2.data_update_mode=force-append
314
315 UUID has not changed and some unused bytes exist in the data file
316
317 $ dirstate_uuid_has_not_changed
318 not testing because using Python implementation (no-rust no-rhg !)
319
320 #if no-rust no-rhg
321 The pure python implementation never appends at the time this is written.
322 $ hg debugstate --docket | grep unused
323 number of unused bytes: 0 (known-bad-output !)
324 #else
325 $ hg debugstate --docket | grep unused
326 number of unused bytes: [1-9]\d* (re)
327 #endif
328 $ cd ..
329
330 Check with the option in "auto" mode
331 ------------------------------------
332 $ cp -a force-base append-mostly-no-force-rewrite
333 $ cd append-mostly-no-force-rewrite
334 $ current_uid=$(find_dirstate_uuid)
335
336 Change mtime of everything on disk causing a full rewrite
337
338 $ touch -t 202212010005 `hg files`
339 $ hg st \
340 > --config rhg.on-unsupported=abort \
341 > --config devel.dirstate.v2.data_update_mode=auto
342
343 UUID has changed and zero unused bytes means we've rewritten.
344
345 #if no-rust no-rhg
346 $ dirstate_uuid_has_not_changed
347 not testing because using Python implementation
348 #else
349 $ dirstate_uuid_has_not_changed
350 [1]
351 #endif
352
353 $ hg debugstate --docket | grep unused
354 number of unused bytes: 0 (known-bad-output !)
355 $ cd ..
356
357 Check the same scenario with the option set to "force-new"
358 ---------------------------------------------------------
359
360 (should be the same as auto)
361
362 $ cp -a force-base append-mostly-force-rewrite
363 $ cd append-mostly-force-rewrite
364 $ current_uid=$(find_dirstate_uuid)
365
366 Change mtime of everything on disk causing a full rewrite
367
368 $ touch -t 202212010005 `hg files`
369 $ hg st \
370 > --config rhg.on-unsupported=abort \
371 > --config devel.dirstate.v2.data_update_mode=force-new
372
373 UUID has changed and a zero number unused bytes means we've rewritten.
374
375
376 #if no-rust no-rhg
377 $ dirstate_uuid_has_not_changed
378 not testing because using Python implementation
379 #else
380 $ dirstate_uuid_has_not_changed
381 [1]
382 #endif
383 $ hg debugstate --docket | grep unused
384 number of unused bytes: 0
385 $ cd ..
386
387
388 Check the same scenario with the option set to "force-append"
389 -------------------------------------------------------------
390
391 Should append even if "auto" did not
392
393 $ cp -a force-base append-mostly-force-append
394 $ cd append-mostly-force-append
395 $ current_uid=$(find_dirstate_uuid)
396
397 Change mtime of everything on disk causing a full rewrite
398
399 $ touch -t 202212010005 `hg files`
400 $ hg st \
401 > --config rhg.on-unsupported=abort \
402 > --config devel.dirstate.v2.data_update_mode=force-append
403
404 UUID has not changed and some unused bytes exist in the data file
405
406 $ dirstate_uuid_has_not_changed
407 not testing because using Python implementation (no-rust no-rhg !)
408
409 #if no-rust no-rhg
410 The pure python implementation is never appending at the time this is written.
411 $ hg debugstate --docket | grep unused
412 number of unused bytes: 0 (known-bad-output !)
413 #else
414 $ hg debugstate --docket | grep unused
415 number of unused bytes: [1-9]\d* (re)
416 #endif
417 $ cd ..
418
419
420
421 Get back into a state suitable for the test of the file.
422
423 $ cd ./append-mostly
424
425 #else
426 $ cd ./u
427 #endif
428
214 429 Transaction compatibility
215 -------------------------
430 =========================
216 431
217 432 The transaction preserves the dirstate.
218 433 We should make sure all of it (docket + data) is preserved
219 434
220 435 #if dirstate-v2
221 436 $ hg commit -m 'bli'
222 437 #endif
223 438
224 439 $ hg update --quiet
225 440 $ hg revert --all --quiet
226 441 $ rm -f a
227 442 $ echo foo > foo
228 443 $ hg add foo
229 444 $ hg commit -m foo
230 445
231 446 #if dirstate-v2
232 447 $ uid=$(find_dirstate_uuid)
233 448 $ touch bar
234 449 $ while [ uid = $(find_dirstate_uuid) ]; do
235 450 > hg add bar;
236 451 > hg remove bar;
237 452 > done;
238 453 $ rm bar
239 454 #endif
240 455 $ hg rollback
241 456 repository tip rolled back to revision 1 (undo commit)
242 457 working directory now based on revision 1
243 458
244 459 $ hg status
245 460 A foo
246 461 $ cd ..
247 462
248 463 Check dirstate ordering
249 464 (e.g. `src/dirstate/` and `src/dirstate.rs` shouldn't cause issues)
250 465
251 466 $ hg init repro
252 467 $ cd repro
253 468 $ mkdir src
254 469 $ mkdir src/dirstate
255 470 $ touch src/dirstate/file1 src/dirstate/file2 src/dirstate.rs
256 471 $ touch file1 file2
257 472 $ hg commit -Aqm1
258 473 $ hg st
259 474 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now