##// END OF EJS Templates
nodemap: add a new mode option, with an optional "warn" value...
marmoute -
r45292:6493f0a5 default
parent child Browse files
Show More
@@ -1,1579 +1,1582
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section, configprefix + b'nodates', default=False,
137 137 )
138 138 coreconfigitem(
139 139 section, configprefix + b'showfunc', default=False,
140 140 )
141 141 coreconfigitem(
142 142 section, configprefix + b'unified', default=None,
143 143 )
144 144 coreconfigitem(
145 145 section, configprefix + b'git', default=False,
146 146 )
147 147 coreconfigitem(
148 148 section, configprefix + b'ignorews', default=False,
149 149 )
150 150 coreconfigitem(
151 151 section, configprefix + b'ignorewsamount', default=False,
152 152 )
153 153 coreconfigitem(
154 154 section, configprefix + b'ignoreblanklines', default=False,
155 155 )
156 156 coreconfigitem(
157 157 section, configprefix + b'ignorewseol', default=False,
158 158 )
159 159 coreconfigitem(
160 160 section, configprefix + b'nobinary', default=False,
161 161 )
162 162 coreconfigitem(
163 163 section, configprefix + b'noprefix', default=False,
164 164 )
165 165 coreconfigitem(
166 166 section, configprefix + b'word-diff', default=False,
167 167 )
168 168
169 169
170 170 coreconfigitem(
171 171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 172 )
173 173 coreconfigitem(
174 174 b'auth', b'cookiefile', default=None,
175 175 )
176 176 _registerdiffopts(section=b'annotate')
177 177 # bookmarks.pushing: internal hack for discovery
178 178 coreconfigitem(
179 179 b'bookmarks', b'pushing', default=list,
180 180 )
181 181 # bundle.mainreporoot: internal hack for bundlerepo
182 182 coreconfigitem(
183 183 b'bundle', b'mainreporoot', default=b'',
184 184 )
185 185 coreconfigitem(
186 186 b'censor', b'policy', default=b'abort', experimental=True,
187 187 )
188 188 coreconfigitem(
189 189 b'chgserver', b'idletimeout', default=3600,
190 190 )
191 191 coreconfigitem(
192 192 b'chgserver', b'skiphash', default=False,
193 193 )
194 194 coreconfigitem(
195 195 b'cmdserver', b'log', default=None,
196 196 )
197 197 coreconfigitem(
198 198 b'cmdserver', b'max-log-files', default=7,
199 199 )
200 200 coreconfigitem(
201 201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 202 )
203 203 coreconfigitem(
204 204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 205 )
206 206 coreconfigitem(
207 207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 208 )
209 209 coreconfigitem(
210 210 b'cmdserver',
211 211 b'track-log',
212 212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 213 )
214 214 coreconfigitem(
215 215 b'color', b'.*', default=None, generic=True,
216 216 )
217 217 coreconfigitem(
218 218 b'color', b'mode', default=b'auto',
219 219 )
220 220 coreconfigitem(
221 221 b'color', b'pagermode', default=dynamicdefault,
222 222 )
223 223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 224 coreconfigitem(
225 225 b'commands', b'commit.post-status', default=False,
226 226 )
227 227 coreconfigitem(
228 228 b'commands', b'grep.all-files', default=False, experimental=True,
229 229 )
230 230 coreconfigitem(
231 231 b'commands', b'merge.require-rev', default=False,
232 232 )
233 233 coreconfigitem(
234 234 b'commands', b'push.require-revs', default=False,
235 235 )
236 236 coreconfigitem(
237 237 b'commands', b'resolve.confirm', default=False,
238 238 )
239 239 coreconfigitem(
240 240 b'commands', b'resolve.explicit-re-merge', default=False,
241 241 )
242 242 coreconfigitem(
243 243 b'commands', b'resolve.mark-check', default=b'none',
244 244 )
245 245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 246 coreconfigitem(
247 247 b'commands', b'show.aliasprefix', default=list,
248 248 )
249 249 coreconfigitem(
250 250 b'commands', b'status.relative', default=False,
251 251 )
252 252 coreconfigitem(
253 253 b'commands', b'status.skipstates', default=[], experimental=True,
254 254 )
255 255 coreconfigitem(
256 256 b'commands', b'status.terse', default=b'',
257 257 )
258 258 coreconfigitem(
259 259 b'commands', b'status.verbose', default=False,
260 260 )
261 261 coreconfigitem(
262 262 b'commands', b'update.check', default=None,
263 263 )
264 264 coreconfigitem(
265 265 b'commands', b'update.requiredest', default=False,
266 266 )
267 267 coreconfigitem(
268 268 b'committemplate', b'.*', default=None, generic=True,
269 269 )
270 270 coreconfigitem(
271 271 b'convert', b'bzr.saverev', default=True,
272 272 )
273 273 coreconfigitem(
274 274 b'convert', b'cvsps.cache', default=True,
275 275 )
276 276 coreconfigitem(
277 277 b'convert', b'cvsps.fuzz', default=60,
278 278 )
279 279 coreconfigitem(
280 280 b'convert', b'cvsps.logencoding', default=None,
281 281 )
282 282 coreconfigitem(
283 283 b'convert', b'cvsps.mergefrom', default=None,
284 284 )
285 285 coreconfigitem(
286 286 b'convert', b'cvsps.mergeto', default=None,
287 287 )
288 288 coreconfigitem(
289 289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 290 )
291 291 coreconfigitem(
292 292 b'convert', b'git.extrakeys', default=list,
293 293 )
294 294 coreconfigitem(
295 295 b'convert', b'git.findcopiesharder', default=False,
296 296 )
297 297 coreconfigitem(
298 298 b'convert', b'git.remoteprefix', default=b'remote',
299 299 )
300 300 coreconfigitem(
301 301 b'convert', b'git.renamelimit', default=400,
302 302 )
303 303 coreconfigitem(
304 304 b'convert', b'git.saverev', default=True,
305 305 )
306 306 coreconfigitem(
307 307 b'convert', b'git.similarity', default=50,
308 308 )
309 309 coreconfigitem(
310 310 b'convert', b'git.skipsubmodules', default=False,
311 311 )
312 312 coreconfigitem(
313 313 b'convert', b'hg.clonebranches', default=False,
314 314 )
315 315 coreconfigitem(
316 316 b'convert', b'hg.ignoreerrors', default=False,
317 317 )
318 318 coreconfigitem(
319 319 b'convert', b'hg.preserve-hash', default=False,
320 320 )
321 321 coreconfigitem(
322 322 b'convert', b'hg.revs', default=None,
323 323 )
324 324 coreconfigitem(
325 325 b'convert', b'hg.saverev', default=False,
326 326 )
327 327 coreconfigitem(
328 328 b'convert', b'hg.sourcename', default=None,
329 329 )
330 330 coreconfigitem(
331 331 b'convert', b'hg.startrev', default=None,
332 332 )
333 333 coreconfigitem(
334 334 b'convert', b'hg.tagsbranch', default=b'default',
335 335 )
336 336 coreconfigitem(
337 337 b'convert', b'hg.usebranchnames', default=True,
338 338 )
339 339 coreconfigitem(
340 340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 341 )
342 342 coreconfigitem(
343 343 b'convert', b'localtimezone', default=False,
344 344 )
345 345 coreconfigitem(
346 346 b'convert', b'p4.encoding', default=dynamicdefault,
347 347 )
348 348 coreconfigitem(
349 349 b'convert', b'p4.startrev', default=0,
350 350 )
351 351 coreconfigitem(
352 352 b'convert', b'skiptags', default=False,
353 353 )
354 354 coreconfigitem(
355 355 b'convert', b'svn.debugsvnlog', default=True,
356 356 )
357 357 coreconfigitem(
358 358 b'convert', b'svn.trunk', default=None,
359 359 )
360 360 coreconfigitem(
361 361 b'convert', b'svn.tags', default=None,
362 362 )
363 363 coreconfigitem(
364 364 b'convert', b'svn.branches', default=None,
365 365 )
366 366 coreconfigitem(
367 367 b'convert', b'svn.startrev', default=0,
368 368 )
369 369 coreconfigitem(
370 370 b'debug', b'dirstate.delaywrite', default=0,
371 371 )
372 372 coreconfigitem(
373 373 b'defaults', b'.*', default=None, generic=True,
374 374 )
375 375 coreconfigitem(
376 376 b'devel', b'all-warnings', default=False,
377 377 )
378 378 coreconfigitem(
379 379 b'devel', b'bundle2.debug', default=False,
380 380 )
381 381 coreconfigitem(
382 382 b'devel', b'bundle.delta', default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'devel', b'cache-vfs', default=None,
386 386 )
387 387 coreconfigitem(
388 388 b'devel', b'check-locks', default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'devel', b'check-relroot', default=False,
392 392 )
393 393 coreconfigitem(
394 394 b'devel', b'default-date', default=None,
395 395 )
396 396 coreconfigitem(
397 397 b'devel', b'deprec-warn', default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'devel', b'disableloaddefaultcerts', default=False,
401 401 )
402 402 coreconfigitem(
403 403 b'devel', b'warn-empty-changegroup', default=False,
404 404 )
405 405 coreconfigitem(
406 406 b'devel', b'legacy.exchange', default=list,
407 407 )
408 408 # TODO before getting `persistent-nodemap` out of experimental
409 409 #
410 410 # * decide for a "status" of the persistent nodemap and associated location
411 411 # - part of the store next the revlog itself (new requirements)
412 412 # - part of the cache directory
413 413 # - part of an `index` directory
414 414 # (https://www.mercurial-scm.org/wiki/ComputedIndexPlan)
415 415 # * do we want to use this for more than just changelog? if so we need:
416 416 # - simpler "pending" logic for them
417 417 # - double check the memory story (we dont want to keep all revlog in memory)
418 418 # - think about the naming scheme if we are in "cache"
419 419 # * increment the version format to "1" and freeze it.
420 420 coreconfigitem(
421 421 b'devel', b'persistent-nodemap', default=False,
422 422 )
423 423 coreconfigitem(
424 424 b'devel', b'servercafile', default=b'',
425 425 )
426 426 coreconfigitem(
427 427 b'devel', b'serverexactprotocol', default=b'',
428 428 )
429 429 coreconfigitem(
430 430 b'devel', b'serverrequirecert', default=False,
431 431 )
432 432 coreconfigitem(
433 433 b'devel', b'strip-obsmarkers', default=True,
434 434 )
435 435 coreconfigitem(
436 436 b'devel', b'warn-config', default=None,
437 437 )
438 438 coreconfigitem(
439 439 b'devel', b'warn-config-default', default=None,
440 440 )
441 441 coreconfigitem(
442 442 b'devel', b'user.obsmarker', default=None,
443 443 )
444 444 coreconfigitem(
445 445 b'devel', b'warn-config-unknown', default=None,
446 446 )
447 447 coreconfigitem(
448 448 b'devel', b'debug.copies', default=False,
449 449 )
450 450 coreconfigitem(
451 451 b'devel', b'debug.extensions', default=False,
452 452 )
453 453 coreconfigitem(
454 454 b'devel', b'debug.repo-filters', default=False,
455 455 )
456 456 coreconfigitem(
457 457 b'devel', b'debug.peer-request', default=False,
458 458 )
459 459 coreconfigitem(
460 460 b'devel', b'discovery.randomize', default=True,
461 461 )
462 462 _registerdiffopts(section=b'diff')
463 463 coreconfigitem(
464 464 b'email', b'bcc', default=None,
465 465 )
466 466 coreconfigitem(
467 467 b'email', b'cc', default=None,
468 468 )
469 469 coreconfigitem(
470 470 b'email', b'charsets', default=list,
471 471 )
472 472 coreconfigitem(
473 473 b'email', b'from', default=None,
474 474 )
475 475 coreconfigitem(
476 476 b'email', b'method', default=b'smtp',
477 477 )
478 478 coreconfigitem(
479 479 b'email', b'reply-to', default=None,
480 480 )
481 481 coreconfigitem(
482 482 b'email', b'to', default=None,
483 483 )
484 484 coreconfigitem(
485 485 b'experimental', b'archivemetatemplate', default=dynamicdefault,
486 486 )
487 487 coreconfigitem(
488 488 b'experimental', b'auto-publish', default=b'publish',
489 489 )
490 490 coreconfigitem(
491 491 b'experimental', b'bundle-phases', default=False,
492 492 )
493 493 coreconfigitem(
494 494 b'experimental', b'bundle2-advertise', default=True,
495 495 )
496 496 coreconfigitem(
497 497 b'experimental', b'bundle2-output-capture', default=False,
498 498 )
499 499 coreconfigitem(
500 500 b'experimental', b'bundle2.pushback', default=False,
501 501 )
502 502 coreconfigitem(
503 503 b'experimental', b'bundle2lazylocking', default=False,
504 504 )
505 505 coreconfigitem(
506 506 b'experimental', b'bundlecomplevel', default=None,
507 507 )
508 508 coreconfigitem(
509 509 b'experimental', b'bundlecomplevel.bzip2', default=None,
510 510 )
511 511 coreconfigitem(
512 512 b'experimental', b'bundlecomplevel.gzip', default=None,
513 513 )
514 514 coreconfigitem(
515 515 b'experimental', b'bundlecomplevel.none', default=None,
516 516 )
517 517 coreconfigitem(
518 518 b'experimental', b'bundlecomplevel.zstd', default=None,
519 519 )
520 520 coreconfigitem(
521 521 b'experimental', b'changegroup3', default=False,
522 522 )
523 523 coreconfigitem(
524 524 b'experimental', b'cleanup-as-archived', default=False,
525 525 )
526 526 coreconfigitem(
527 527 b'experimental', b'clientcompressionengines', default=list,
528 528 )
529 529 coreconfigitem(
530 530 b'experimental', b'copytrace', default=b'on',
531 531 )
532 532 coreconfigitem(
533 533 b'experimental', b'copytrace.movecandidateslimit', default=100,
534 534 )
535 535 coreconfigitem(
536 536 b'experimental', b'copytrace.sourcecommitlimit', default=100,
537 537 )
538 538 coreconfigitem(
539 539 b'experimental', b'copies.read-from', default=b"filelog-only",
540 540 )
541 541 coreconfigitem(
542 542 b'experimental', b'copies.write-to', default=b'filelog-only',
543 543 )
544 544 coreconfigitem(
545 545 b'experimental', b'crecordtest', default=None,
546 546 )
547 547 coreconfigitem(
548 548 b'experimental', b'directaccess', default=False,
549 549 )
550 550 coreconfigitem(
551 551 b'experimental', b'directaccess.revnums', default=False,
552 552 )
553 553 coreconfigitem(
554 554 b'experimental', b'editortmpinhg', default=False,
555 555 )
556 556 coreconfigitem(
557 557 b'experimental', b'evolution', default=list,
558 558 )
559 559 coreconfigitem(
560 560 b'experimental',
561 561 b'evolution.allowdivergence',
562 562 default=False,
563 563 alias=[(b'experimental', b'allowdivergence')],
564 564 )
565 565 coreconfigitem(
566 566 b'experimental', b'evolution.allowunstable', default=None,
567 567 )
568 568 coreconfigitem(
569 569 b'experimental', b'evolution.createmarkers', default=None,
570 570 )
571 571 coreconfigitem(
572 572 b'experimental',
573 573 b'evolution.effect-flags',
574 574 default=True,
575 575 alias=[(b'experimental', b'effect-flags')],
576 576 )
577 577 coreconfigitem(
578 578 b'experimental', b'evolution.exchange', default=None,
579 579 )
580 580 coreconfigitem(
581 581 b'experimental', b'evolution.bundle-obsmarker', default=False,
582 582 )
583 583 coreconfigitem(
584 584 b'experimental', b'log.topo', default=False,
585 585 )
586 586 coreconfigitem(
587 587 b'experimental', b'evolution.report-instabilities', default=True,
588 588 )
589 589 coreconfigitem(
590 590 b'experimental', b'evolution.track-operation', default=True,
591 591 )
592 592 # repo-level config to exclude a revset visibility
593 593 #
594 594 # The target use case is to use `share` to expose different subset of the same
595 595 # repository, especially server side. See also `server.view`.
596 596 coreconfigitem(
597 597 b'experimental', b'extra-filter-revs', default=None,
598 598 )
599 599 coreconfigitem(
600 600 b'experimental', b'maxdeltachainspan', default=-1,
601 601 )
602 602 coreconfigitem(
603 603 b'experimental', b'mergetempdirprefix', default=None,
604 604 )
605 605 coreconfigitem(
606 606 b'experimental', b'mmapindexthreshold', default=None,
607 607 )
608 608 coreconfigitem(
609 609 b'experimental', b'narrow', default=False,
610 610 )
611 611 coreconfigitem(
612 612 b'experimental', b'nonnormalparanoidcheck', default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'experimental', b'exportableenviron', default=list,
616 616 )
617 617 coreconfigitem(
618 618 b'experimental', b'extendedheader.index', default=None,
619 619 )
620 620 coreconfigitem(
621 621 b'experimental', b'extendedheader.similarity', default=False,
622 622 )
623 623 coreconfigitem(
624 624 b'experimental', b'graphshorten', default=False,
625 625 )
626 626 coreconfigitem(
627 627 b'experimental', b'graphstyle.parent', default=dynamicdefault,
628 628 )
629 629 coreconfigitem(
630 630 b'experimental', b'graphstyle.missing', default=dynamicdefault,
631 631 )
632 632 coreconfigitem(
633 633 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
634 634 )
635 635 coreconfigitem(
636 636 b'experimental', b'hook-track-tags', default=False,
637 637 )
638 638 coreconfigitem(
639 639 b'experimental', b'httppeer.advertise-v2', default=False,
640 640 )
641 641 coreconfigitem(
642 642 b'experimental', b'httppeer.v2-encoder-order', default=None,
643 643 )
644 644 coreconfigitem(
645 645 b'experimental', b'httppostargs', default=False,
646 646 )
647 647 coreconfigitem(
648 648 b'experimental', b'mergedriver', default=None,
649 649 )
650 650 coreconfigitem(b'experimental', b'nointerrupt', default=False)
651 651 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
652 652
653 653 coreconfigitem(
654 654 b'experimental', b'obsmarkers-exchange-debug', default=False,
655 655 )
656 656 coreconfigitem(
657 657 b'experimental', b'remotenames', default=False,
658 658 )
659 659 coreconfigitem(
660 660 b'experimental', b'removeemptydirs', default=True,
661 661 )
662 662 coreconfigitem(
663 663 b'experimental', b'revert.interactive.select-to-keep', default=False,
664 664 )
665 665 coreconfigitem(
666 666 b'experimental', b'revisions.prefixhexnode', default=False,
667 667 )
668 668 coreconfigitem(
669 669 b'experimental', b'revlogv2', default=None,
670 670 )
671 671 coreconfigitem(
672 672 b'experimental', b'revisions.disambiguatewithin', default=None,
673 673 )
674 674 coreconfigitem(
675 675 b'experimental', b'rust.index', default=False,
676 676 )
677 677 coreconfigitem(
678 678 b'experimental', b'exp-persistent-nodemap', default=False,
679 679 )
680 680 coreconfigitem(
681 681 b'experimental', b'exp-persistent-nodemap.mmap', default=True,
682 682 )
683 683 coreconfigitem(
684 b'experimental', b'exp-persistent-nodemap.mode', default=b'compat',
685 )
686 coreconfigitem(
684 687 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
685 688 )
686 689 coreconfigitem(
687 690 b'experimental',
688 691 b'server.manifestdata.recommended-batch-size',
689 692 default=100000,
690 693 )
691 694 coreconfigitem(
692 695 b'experimental', b'server.stream-narrow-clones', default=False,
693 696 )
694 697 coreconfigitem(
695 698 b'experimental', b'single-head-per-branch', default=False,
696 699 )
697 700 coreconfigitem(
698 701 b'experimental',
699 702 b'single-head-per-branch:account-closed-heads',
700 703 default=False,
701 704 )
702 705 coreconfigitem(
703 706 b'experimental', b'sshserver.support-v2', default=False,
704 707 )
705 708 coreconfigitem(
706 709 b'experimental', b'sparse-read', default=False,
707 710 )
708 711 coreconfigitem(
709 712 b'experimental', b'sparse-read.density-threshold', default=0.50,
710 713 )
711 714 coreconfigitem(
712 715 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
713 716 )
714 717 coreconfigitem(
715 718 b'experimental', b'treemanifest', default=False,
716 719 )
717 720 coreconfigitem(
718 721 b'experimental', b'update.atomic-file', default=False,
719 722 )
720 723 coreconfigitem(
721 724 b'experimental', b'sshpeer.advertise-v2', default=False,
722 725 )
723 726 coreconfigitem(
724 727 b'experimental', b'web.apiserver', default=False,
725 728 )
726 729 coreconfigitem(
727 730 b'experimental', b'web.api.http-v2', default=False,
728 731 )
729 732 coreconfigitem(
730 733 b'experimental', b'web.api.debugreflect', default=False,
731 734 )
732 735 coreconfigitem(
733 736 b'experimental', b'worker.wdir-get-thread-safe', default=False,
734 737 )
735 738 coreconfigitem(
736 739 b'experimental', b'worker.repository-upgrade', default=False,
737 740 )
738 741 coreconfigitem(
739 742 b'experimental', b'xdiff', default=False,
740 743 )
741 744 coreconfigitem(
742 745 b'extensions', b'.*', default=None, generic=True,
743 746 )
744 747 coreconfigitem(
745 748 b'extdata', b'.*', default=None, generic=True,
746 749 )
747 750 coreconfigitem(
748 751 b'format', b'bookmarks-in-store', default=False,
749 752 )
750 753 coreconfigitem(
751 754 b'format', b'chunkcachesize', default=None, experimental=True,
752 755 )
753 756 coreconfigitem(
754 757 b'format', b'dotencode', default=True,
755 758 )
756 759 coreconfigitem(
757 760 b'format', b'generaldelta', default=False, experimental=True,
758 761 )
759 762 coreconfigitem(
760 763 b'format', b'manifestcachesize', default=None, experimental=True,
761 764 )
762 765 coreconfigitem(
763 766 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
764 767 )
765 768 coreconfigitem(
766 769 b'format', b'obsstore-version', default=None,
767 770 )
768 771 coreconfigitem(
769 772 b'format', b'sparse-revlog', default=True,
770 773 )
771 774 coreconfigitem(
772 775 b'format',
773 776 b'revlog-compression',
774 777 default=lambda: [b'zlib'],
775 778 alias=[(b'experimental', b'format.compression')],
776 779 )
777 780 coreconfigitem(
778 781 b'format', b'usefncache', default=True,
779 782 )
780 783 coreconfigitem(
781 784 b'format', b'usegeneraldelta', default=True,
782 785 )
783 786 coreconfigitem(
784 787 b'format', b'usestore', default=True,
785 788 )
786 789 coreconfigitem(
787 790 b'format',
788 791 b'exp-use-copies-side-data-changeset',
789 792 default=False,
790 793 experimental=True,
791 794 )
792 795 coreconfigitem(
793 796 b'format', b'exp-use-side-data', default=False, experimental=True,
794 797 )
795 798 coreconfigitem(
796 799 b'format', b'internal-phase', default=False, experimental=True,
797 800 )
798 801 coreconfigitem(
799 802 b'fsmonitor', b'warn_when_unused', default=True,
800 803 )
801 804 coreconfigitem(
802 805 b'fsmonitor', b'warn_update_file_count', default=50000,
803 806 )
804 807 coreconfigitem(
805 808 b'help', br'hidden-command\..*', default=False, generic=True,
806 809 )
807 810 coreconfigitem(
808 811 b'help', br'hidden-topic\..*', default=False, generic=True,
809 812 )
810 813 coreconfigitem(
811 814 b'hooks', b'.*', default=dynamicdefault, generic=True,
812 815 )
813 816 coreconfigitem(
814 817 b'hgweb-paths', b'.*', default=list, generic=True,
815 818 )
816 819 coreconfigitem(
817 820 b'hostfingerprints', b'.*', default=list, generic=True,
818 821 )
819 822 coreconfigitem(
820 823 b'hostsecurity', b'ciphers', default=None,
821 824 )
822 825 coreconfigitem(
823 826 b'hostsecurity', b'disabletls10warning', default=False,
824 827 )
825 828 coreconfigitem(
826 829 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
827 830 )
828 831 coreconfigitem(
829 832 b'hostsecurity',
830 833 b'.*:minimumprotocol$',
831 834 default=dynamicdefault,
832 835 generic=True,
833 836 )
834 837 coreconfigitem(
835 838 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
836 839 )
837 840 coreconfigitem(
838 841 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
839 842 )
840 843 coreconfigitem(
841 844 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
842 845 )
843 846
844 847 coreconfigitem(
845 848 b'http_proxy', b'always', default=False,
846 849 )
847 850 coreconfigitem(
848 851 b'http_proxy', b'host', default=None,
849 852 )
850 853 coreconfigitem(
851 854 b'http_proxy', b'no', default=list,
852 855 )
853 856 coreconfigitem(
854 857 b'http_proxy', b'passwd', default=None,
855 858 )
856 859 coreconfigitem(
857 860 b'http_proxy', b'user', default=None,
858 861 )
859 862
860 863 coreconfigitem(
861 864 b'http', b'timeout', default=None,
862 865 )
863 866
864 867 coreconfigitem(
865 868 b'logtoprocess', b'commandexception', default=None,
866 869 )
867 870 coreconfigitem(
868 871 b'logtoprocess', b'commandfinish', default=None,
869 872 )
870 873 coreconfigitem(
871 874 b'logtoprocess', b'command', default=None,
872 875 )
873 876 coreconfigitem(
874 877 b'logtoprocess', b'develwarn', default=None,
875 878 )
876 879 coreconfigitem(
877 880 b'logtoprocess', b'uiblocked', default=None,
878 881 )
879 882 coreconfigitem(
880 883 b'merge', b'checkunknown', default=b'abort',
881 884 )
882 885 coreconfigitem(
883 886 b'merge', b'checkignored', default=b'abort',
884 887 )
885 888 coreconfigitem(
886 889 b'experimental', b'merge.checkpathconflicts', default=False,
887 890 )
888 891 coreconfigitem(
889 892 b'merge', b'followcopies', default=True,
890 893 )
891 894 coreconfigitem(
892 895 b'merge', b'on-failure', default=b'continue',
893 896 )
894 897 coreconfigitem(
895 898 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
896 899 )
897 900 coreconfigitem(
898 901 b'merge', b'strict-capability-check', default=False,
899 902 )
900 903 coreconfigitem(
901 904 b'merge-tools', b'.*', default=None, generic=True,
902 905 )
903 906 coreconfigitem(
904 907 b'merge-tools',
905 908 br'.*\.args$',
906 909 default=b"$local $base $other",
907 910 generic=True,
908 911 priority=-1,
909 912 )
910 913 coreconfigitem(
911 914 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
912 915 )
913 916 coreconfigitem(
914 917 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
915 918 )
916 919 coreconfigitem(
917 920 b'merge-tools',
918 921 br'.*\.checkchanged$',
919 922 default=False,
920 923 generic=True,
921 924 priority=-1,
922 925 )
923 926 coreconfigitem(
924 927 b'merge-tools',
925 928 br'.*\.executable$',
926 929 default=dynamicdefault,
927 930 generic=True,
928 931 priority=-1,
929 932 )
930 933 coreconfigitem(
931 934 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
932 935 )
933 936 coreconfigitem(
934 937 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
935 938 )
936 939 coreconfigitem(
937 940 b'merge-tools',
938 941 br'.*\.mergemarkers$',
939 942 default=b'basic',
940 943 generic=True,
941 944 priority=-1,
942 945 )
943 946 coreconfigitem(
944 947 b'merge-tools',
945 948 br'.*\.mergemarkertemplate$',
946 949 default=dynamicdefault, # take from ui.mergemarkertemplate
947 950 generic=True,
948 951 priority=-1,
949 952 )
950 953 coreconfigitem(
951 954 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
952 955 )
953 956 coreconfigitem(
954 957 b'merge-tools',
955 958 br'.*\.premerge$',
956 959 default=dynamicdefault,
957 960 generic=True,
958 961 priority=-1,
959 962 )
960 963 coreconfigitem(
961 964 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
962 965 )
963 966 coreconfigitem(
964 967 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
965 968 )
966 969 coreconfigitem(
967 970 b'pager', b'ignore', default=list,
968 971 )
969 972 coreconfigitem(
970 973 b'pager', b'pager', default=dynamicdefault,
971 974 )
972 975 coreconfigitem(
973 976 b'patch', b'eol', default=b'strict',
974 977 )
975 978 coreconfigitem(
976 979 b'patch', b'fuzz', default=2,
977 980 )
978 981 coreconfigitem(
979 982 b'paths', b'default', default=None,
980 983 )
981 984 coreconfigitem(
982 985 b'paths', b'default-push', default=None,
983 986 )
984 987 coreconfigitem(
985 988 b'paths', b'.*', default=None, generic=True,
986 989 )
987 990 coreconfigitem(
988 991 b'phases', b'checksubrepos', default=b'follow',
989 992 )
990 993 coreconfigitem(
991 994 b'phases', b'new-commit', default=b'draft',
992 995 )
993 996 coreconfigitem(
994 997 b'phases', b'publish', default=True,
995 998 )
996 999 coreconfigitem(
997 1000 b'profiling', b'enabled', default=False,
998 1001 )
999 1002 coreconfigitem(
1000 1003 b'profiling', b'format', default=b'text',
1001 1004 )
1002 1005 coreconfigitem(
1003 1006 b'profiling', b'freq', default=1000,
1004 1007 )
1005 1008 coreconfigitem(
1006 1009 b'profiling', b'limit', default=30,
1007 1010 )
1008 1011 coreconfigitem(
1009 1012 b'profiling', b'nested', default=0,
1010 1013 )
1011 1014 coreconfigitem(
1012 1015 b'profiling', b'output', default=None,
1013 1016 )
1014 1017 coreconfigitem(
1015 1018 b'profiling', b'showmax', default=0.999,
1016 1019 )
1017 1020 coreconfigitem(
1018 1021 b'profiling', b'showmin', default=dynamicdefault,
1019 1022 )
1020 1023 coreconfigitem(
1021 1024 b'profiling', b'showtime', default=True,
1022 1025 )
1023 1026 coreconfigitem(
1024 1027 b'profiling', b'sort', default=b'inlinetime',
1025 1028 )
1026 1029 coreconfigitem(
1027 1030 b'profiling', b'statformat', default=b'hotpath',
1028 1031 )
1029 1032 coreconfigitem(
1030 1033 b'profiling', b'time-track', default=dynamicdefault,
1031 1034 )
1032 1035 coreconfigitem(
1033 1036 b'profiling', b'type', default=b'stat',
1034 1037 )
1035 1038 coreconfigitem(
1036 1039 b'progress', b'assume-tty', default=False,
1037 1040 )
1038 1041 coreconfigitem(
1039 1042 b'progress', b'changedelay', default=1,
1040 1043 )
1041 1044 coreconfigitem(
1042 1045 b'progress', b'clear-complete', default=True,
1043 1046 )
1044 1047 coreconfigitem(
1045 1048 b'progress', b'debug', default=False,
1046 1049 )
1047 1050 coreconfigitem(
1048 1051 b'progress', b'delay', default=3,
1049 1052 )
1050 1053 coreconfigitem(
1051 1054 b'progress', b'disable', default=False,
1052 1055 )
1053 1056 coreconfigitem(
1054 1057 b'progress', b'estimateinterval', default=60.0,
1055 1058 )
1056 1059 coreconfigitem(
1057 1060 b'progress',
1058 1061 b'format',
1059 1062 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1060 1063 )
1061 1064 coreconfigitem(
1062 1065 b'progress', b'refresh', default=0.1,
1063 1066 )
1064 1067 coreconfigitem(
1065 1068 b'progress', b'width', default=dynamicdefault,
1066 1069 )
1067 1070 coreconfigitem(
1068 1071 b'pull', b'confirm', default=False,
1069 1072 )
1070 1073 coreconfigitem(
1071 1074 b'push', b'pushvars.server', default=False,
1072 1075 )
1073 1076 coreconfigitem(
1074 1077 b'rewrite',
1075 1078 b'backup-bundle',
1076 1079 default=True,
1077 1080 alias=[(b'ui', b'history-editing-backup')],
1078 1081 )
1079 1082 coreconfigitem(
1080 1083 b'rewrite', b'update-timestamp', default=False,
1081 1084 )
1082 1085 coreconfigitem(
1083 1086 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1084 1087 )
1085 1088 coreconfigitem(
1086 1089 b'storage',
1087 1090 b'revlog.optimize-delta-parent-choice',
1088 1091 default=True,
1089 1092 alias=[(b'format', b'aggressivemergedeltas')],
1090 1093 )
1091 1094 coreconfigitem(
1092 1095 b'storage', b'revlog.reuse-external-delta', default=True,
1093 1096 )
1094 1097 coreconfigitem(
1095 1098 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1096 1099 )
1097 1100 coreconfigitem(
1098 1101 b'storage', b'revlog.zlib.level', default=None,
1099 1102 )
1100 1103 coreconfigitem(
1101 1104 b'storage', b'revlog.zstd.level', default=None,
1102 1105 )
1103 1106 coreconfigitem(
1104 1107 b'server', b'bookmarks-pushkey-compat', default=True,
1105 1108 )
1106 1109 coreconfigitem(
1107 1110 b'server', b'bundle1', default=True,
1108 1111 )
1109 1112 coreconfigitem(
1110 1113 b'server', b'bundle1gd', default=None,
1111 1114 )
1112 1115 coreconfigitem(
1113 1116 b'server', b'bundle1.pull', default=None,
1114 1117 )
1115 1118 coreconfigitem(
1116 1119 b'server', b'bundle1gd.pull', default=None,
1117 1120 )
1118 1121 coreconfigitem(
1119 1122 b'server', b'bundle1.push', default=None,
1120 1123 )
1121 1124 coreconfigitem(
1122 1125 b'server', b'bundle1gd.push', default=None,
1123 1126 )
1124 1127 coreconfigitem(
1125 1128 b'server',
1126 1129 b'bundle2.stream',
1127 1130 default=True,
1128 1131 alias=[(b'experimental', b'bundle2.stream')],
1129 1132 )
1130 1133 coreconfigitem(
1131 1134 b'server', b'compressionengines', default=list,
1132 1135 )
1133 1136 coreconfigitem(
1134 1137 b'server', b'concurrent-push-mode', default=b'check-related',
1135 1138 )
1136 1139 coreconfigitem(
1137 1140 b'server', b'disablefullbundle', default=False,
1138 1141 )
1139 1142 coreconfigitem(
1140 1143 b'server', b'maxhttpheaderlen', default=1024,
1141 1144 )
1142 1145 coreconfigitem(
1143 1146 b'server', b'pullbundle', default=False,
1144 1147 )
1145 1148 coreconfigitem(
1146 1149 b'server', b'preferuncompressed', default=False,
1147 1150 )
1148 1151 coreconfigitem(
1149 1152 b'server', b'streamunbundle', default=False,
1150 1153 )
1151 1154 coreconfigitem(
1152 1155 b'server', b'uncompressed', default=True,
1153 1156 )
1154 1157 coreconfigitem(
1155 1158 b'server', b'uncompressedallowsecret', default=False,
1156 1159 )
1157 1160 coreconfigitem(
1158 1161 b'server', b'view', default=b'served',
1159 1162 )
1160 1163 coreconfigitem(
1161 1164 b'server', b'validate', default=False,
1162 1165 )
1163 1166 coreconfigitem(
1164 1167 b'server', b'zliblevel', default=-1,
1165 1168 )
1166 1169 coreconfigitem(
1167 1170 b'server', b'zstdlevel', default=3,
1168 1171 )
1169 1172 coreconfigitem(
1170 1173 b'share', b'pool', default=None,
1171 1174 )
1172 1175 coreconfigitem(
1173 1176 b'share', b'poolnaming', default=b'identity',
1174 1177 )
1175 1178 coreconfigitem(
1176 1179 b'shelve', b'maxbackups', default=10,
1177 1180 )
1178 1181 coreconfigitem(
1179 1182 b'smtp', b'host', default=None,
1180 1183 )
1181 1184 coreconfigitem(
1182 1185 b'smtp', b'local_hostname', default=None,
1183 1186 )
1184 1187 coreconfigitem(
1185 1188 b'smtp', b'password', default=None,
1186 1189 )
1187 1190 coreconfigitem(
1188 1191 b'smtp', b'port', default=dynamicdefault,
1189 1192 )
1190 1193 coreconfigitem(
1191 1194 b'smtp', b'tls', default=b'none',
1192 1195 )
1193 1196 coreconfigitem(
1194 1197 b'smtp', b'username', default=None,
1195 1198 )
1196 1199 coreconfigitem(
1197 1200 b'sparse', b'missingwarning', default=True, experimental=True,
1198 1201 )
1199 1202 coreconfigitem(
1200 1203 b'subrepos',
1201 1204 b'allowed',
1202 1205 default=dynamicdefault, # to make backporting simpler
1203 1206 )
1204 1207 coreconfigitem(
1205 1208 b'subrepos', b'hg:allowed', default=dynamicdefault,
1206 1209 )
1207 1210 coreconfigitem(
1208 1211 b'subrepos', b'git:allowed', default=dynamicdefault,
1209 1212 )
1210 1213 coreconfigitem(
1211 1214 b'subrepos', b'svn:allowed', default=dynamicdefault,
1212 1215 )
1213 1216 coreconfigitem(
1214 1217 b'templates', b'.*', default=None, generic=True,
1215 1218 )
1216 1219 coreconfigitem(
1217 1220 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1218 1221 )
1219 1222 coreconfigitem(
1220 1223 b'trusted', b'groups', default=list,
1221 1224 )
1222 1225 coreconfigitem(
1223 1226 b'trusted', b'users', default=list,
1224 1227 )
1225 1228 coreconfigitem(
1226 1229 b'ui', b'_usedassubrepo', default=False,
1227 1230 )
1228 1231 coreconfigitem(
1229 1232 b'ui', b'allowemptycommit', default=False,
1230 1233 )
1231 1234 coreconfigitem(
1232 1235 b'ui', b'archivemeta', default=True,
1233 1236 )
1234 1237 coreconfigitem(
1235 1238 b'ui', b'askusername', default=False,
1236 1239 )
1237 1240 coreconfigitem(
1238 1241 b'ui', b'clonebundlefallback', default=False,
1239 1242 )
1240 1243 coreconfigitem(
1241 1244 b'ui', b'clonebundleprefers', default=list,
1242 1245 )
1243 1246 coreconfigitem(
1244 1247 b'ui', b'clonebundles', default=True,
1245 1248 )
1246 1249 coreconfigitem(
1247 1250 b'ui', b'color', default=b'auto',
1248 1251 )
1249 1252 coreconfigitem(
1250 1253 b'ui', b'commitsubrepos', default=False,
1251 1254 )
1252 1255 coreconfigitem(
1253 1256 b'ui', b'debug', default=False,
1254 1257 )
1255 1258 coreconfigitem(
1256 1259 b'ui', b'debugger', default=None,
1257 1260 )
1258 1261 coreconfigitem(
1259 1262 b'ui', b'editor', default=dynamicdefault,
1260 1263 )
1261 1264 coreconfigitem(
1262 1265 b'ui', b'fallbackencoding', default=None,
1263 1266 )
1264 1267 coreconfigitem(
1265 1268 b'ui', b'forcecwd', default=None,
1266 1269 )
1267 1270 coreconfigitem(
1268 1271 b'ui', b'forcemerge', default=None,
1269 1272 )
1270 1273 coreconfigitem(
1271 1274 b'ui', b'formatdebug', default=False,
1272 1275 )
1273 1276 coreconfigitem(
1274 1277 b'ui', b'formatjson', default=False,
1275 1278 )
1276 1279 coreconfigitem(
1277 1280 b'ui', b'formatted', default=None,
1278 1281 )
1279 1282 coreconfigitem(
1280 1283 b'ui', b'graphnodetemplate', default=None,
1281 1284 )
1282 1285 coreconfigitem(
1283 1286 b'ui', b'interactive', default=None,
1284 1287 )
1285 1288 coreconfigitem(
1286 1289 b'ui', b'interface', default=None,
1287 1290 )
1288 1291 coreconfigitem(
1289 1292 b'ui', b'interface.chunkselector', default=None,
1290 1293 )
1291 1294 coreconfigitem(
1292 1295 b'ui', b'large-file-limit', default=10000000,
1293 1296 )
1294 1297 coreconfigitem(
1295 1298 b'ui', b'logblockedtimes', default=False,
1296 1299 )
1297 1300 coreconfigitem(
1298 1301 b'ui', b'logtemplate', default=None,
1299 1302 )
1300 1303 coreconfigitem(
1301 1304 b'ui', b'merge', default=None,
1302 1305 )
1303 1306 coreconfigitem(
1304 1307 b'ui', b'mergemarkers', default=b'basic',
1305 1308 )
1306 1309 coreconfigitem(
1307 1310 b'ui',
1308 1311 b'mergemarkertemplate',
1309 1312 default=(
1310 1313 b'{node|short} '
1311 1314 b'{ifeq(tags, "tip", "", '
1312 1315 b'ifeq(tags, "", "", "{tags} "))}'
1313 1316 b'{if(bookmarks, "{bookmarks} ")}'
1314 1317 b'{ifeq(branch, "default", "", "{branch} ")}'
1315 1318 b'- {author|user}: {desc|firstline}'
1316 1319 ),
1317 1320 )
1318 1321 coreconfigitem(
1319 1322 b'ui', b'message-output', default=b'stdio',
1320 1323 )
1321 1324 coreconfigitem(
1322 1325 b'ui', b'nontty', default=False,
1323 1326 )
1324 1327 coreconfigitem(
1325 1328 b'ui', b'origbackuppath', default=None,
1326 1329 )
1327 1330 coreconfigitem(
1328 1331 b'ui', b'paginate', default=True,
1329 1332 )
1330 1333 coreconfigitem(
1331 1334 b'ui', b'patch', default=None,
1332 1335 )
1333 1336 coreconfigitem(
1334 1337 b'ui', b'pre-merge-tool-output-template', default=None,
1335 1338 )
1336 1339 coreconfigitem(
1337 1340 b'ui', b'portablefilenames', default=b'warn',
1338 1341 )
1339 1342 coreconfigitem(
1340 1343 b'ui', b'promptecho', default=False,
1341 1344 )
1342 1345 coreconfigitem(
1343 1346 b'ui', b'quiet', default=False,
1344 1347 )
1345 1348 coreconfigitem(
1346 1349 b'ui', b'quietbookmarkmove', default=False,
1347 1350 )
1348 1351 coreconfigitem(
1349 1352 b'ui', b'relative-paths', default=b'legacy',
1350 1353 )
1351 1354 coreconfigitem(
1352 1355 b'ui', b'remotecmd', default=b'hg',
1353 1356 )
1354 1357 coreconfigitem(
1355 1358 b'ui', b'report_untrusted', default=True,
1356 1359 )
1357 1360 coreconfigitem(
1358 1361 b'ui', b'rollback', default=True,
1359 1362 )
1360 1363 coreconfigitem(
1361 1364 b'ui', b'signal-safe-lock', default=True,
1362 1365 )
1363 1366 coreconfigitem(
1364 1367 b'ui', b'slash', default=False,
1365 1368 )
1366 1369 coreconfigitem(
1367 1370 b'ui', b'ssh', default=b'ssh',
1368 1371 )
1369 1372 coreconfigitem(
1370 1373 b'ui', b'ssherrorhint', default=None,
1371 1374 )
1372 1375 coreconfigitem(
1373 1376 b'ui', b'statuscopies', default=False,
1374 1377 )
1375 1378 coreconfigitem(
1376 1379 b'ui', b'strict', default=False,
1377 1380 )
1378 1381 coreconfigitem(
1379 1382 b'ui', b'style', default=b'',
1380 1383 )
1381 1384 coreconfigitem(
1382 1385 b'ui', b'supportcontact', default=None,
1383 1386 )
1384 1387 coreconfigitem(
1385 1388 b'ui', b'textwidth', default=78,
1386 1389 )
1387 1390 coreconfigitem(
1388 1391 b'ui', b'timeout', default=b'600',
1389 1392 )
1390 1393 coreconfigitem(
1391 1394 b'ui', b'timeout.warn', default=0,
1392 1395 )
1393 1396 coreconfigitem(
1394 1397 b'ui', b'traceback', default=False,
1395 1398 )
1396 1399 coreconfigitem(
1397 1400 b'ui', b'tweakdefaults', default=False,
1398 1401 )
1399 1402 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1400 1403 coreconfigitem(
1401 1404 b'ui', b'verbose', default=False,
1402 1405 )
1403 1406 coreconfigitem(
1404 1407 b'verify', b'skipflags', default=None,
1405 1408 )
1406 1409 coreconfigitem(
1407 1410 b'web', b'allowbz2', default=False,
1408 1411 )
1409 1412 coreconfigitem(
1410 1413 b'web', b'allowgz', default=False,
1411 1414 )
1412 1415 coreconfigitem(
1413 1416 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1414 1417 )
1415 1418 coreconfigitem(
1416 1419 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1417 1420 )
1418 1421 coreconfigitem(
1419 1422 b'web', b'allowzip', default=False,
1420 1423 )
1421 1424 coreconfigitem(
1422 1425 b'web', b'archivesubrepos', default=False,
1423 1426 )
1424 1427 coreconfigitem(
1425 1428 b'web', b'cache', default=True,
1426 1429 )
1427 1430 coreconfigitem(
1428 1431 b'web', b'comparisoncontext', default=5,
1429 1432 )
1430 1433 coreconfigitem(
1431 1434 b'web', b'contact', default=None,
1432 1435 )
1433 1436 coreconfigitem(
1434 1437 b'web', b'deny_push', default=list,
1435 1438 )
1436 1439 coreconfigitem(
1437 1440 b'web', b'guessmime', default=False,
1438 1441 )
1439 1442 coreconfigitem(
1440 1443 b'web', b'hidden', default=False,
1441 1444 )
1442 1445 coreconfigitem(
1443 1446 b'web', b'labels', default=list,
1444 1447 )
1445 1448 coreconfigitem(
1446 1449 b'web', b'logoimg', default=b'hglogo.png',
1447 1450 )
1448 1451 coreconfigitem(
1449 1452 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1450 1453 )
1451 1454 coreconfigitem(
1452 1455 b'web', b'accesslog', default=b'-',
1453 1456 )
1454 1457 coreconfigitem(
1455 1458 b'web', b'address', default=b'',
1456 1459 )
1457 1460 coreconfigitem(
1458 1461 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1459 1462 )
1460 1463 coreconfigitem(
1461 1464 b'web', b'allow_read', default=list,
1462 1465 )
1463 1466 coreconfigitem(
1464 1467 b'web', b'baseurl', default=None,
1465 1468 )
1466 1469 coreconfigitem(
1467 1470 b'web', b'cacerts', default=None,
1468 1471 )
1469 1472 coreconfigitem(
1470 1473 b'web', b'certificate', default=None,
1471 1474 )
1472 1475 coreconfigitem(
1473 1476 b'web', b'collapse', default=False,
1474 1477 )
1475 1478 coreconfigitem(
1476 1479 b'web', b'csp', default=None,
1477 1480 )
1478 1481 coreconfigitem(
1479 1482 b'web', b'deny_read', default=list,
1480 1483 )
1481 1484 coreconfigitem(
1482 1485 b'web', b'descend', default=True,
1483 1486 )
1484 1487 coreconfigitem(
1485 1488 b'web', b'description', default=b"",
1486 1489 )
1487 1490 coreconfigitem(
1488 1491 b'web', b'encoding', default=lambda: encoding.encoding,
1489 1492 )
1490 1493 coreconfigitem(
1491 1494 b'web', b'errorlog', default=b'-',
1492 1495 )
1493 1496 coreconfigitem(
1494 1497 b'web', b'ipv6', default=False,
1495 1498 )
1496 1499 coreconfigitem(
1497 1500 b'web', b'maxchanges', default=10,
1498 1501 )
1499 1502 coreconfigitem(
1500 1503 b'web', b'maxfiles', default=10,
1501 1504 )
1502 1505 coreconfigitem(
1503 1506 b'web', b'maxshortchanges', default=60,
1504 1507 )
1505 1508 coreconfigitem(
1506 1509 b'web', b'motd', default=b'',
1507 1510 )
1508 1511 coreconfigitem(
1509 1512 b'web', b'name', default=dynamicdefault,
1510 1513 )
1511 1514 coreconfigitem(
1512 1515 b'web', b'port', default=8000,
1513 1516 )
1514 1517 coreconfigitem(
1515 1518 b'web', b'prefix', default=b'',
1516 1519 )
1517 1520 coreconfigitem(
1518 1521 b'web', b'push_ssl', default=True,
1519 1522 )
1520 1523 coreconfigitem(
1521 1524 b'web', b'refreshinterval', default=20,
1522 1525 )
1523 1526 coreconfigitem(
1524 1527 b'web', b'server-header', default=None,
1525 1528 )
1526 1529 coreconfigitem(
1527 1530 b'web', b'static', default=None,
1528 1531 )
1529 1532 coreconfigitem(
1530 1533 b'web', b'staticurl', default=None,
1531 1534 )
1532 1535 coreconfigitem(
1533 1536 b'web', b'stripes', default=1,
1534 1537 )
1535 1538 coreconfigitem(
1536 1539 b'web', b'style', default=b'paper',
1537 1540 )
1538 1541 coreconfigitem(
1539 1542 b'web', b'templates', default=None,
1540 1543 )
1541 1544 coreconfigitem(
1542 1545 b'web', b'view', default=b'served', experimental=True,
1543 1546 )
1544 1547 coreconfigitem(
1545 1548 b'worker', b'backgroundclose', default=dynamicdefault,
1546 1549 )
1547 1550 # Windows defaults to a limit of 512 open files. A buffer of 128
1548 1551 # should give us enough headway.
1549 1552 coreconfigitem(
1550 1553 b'worker', b'backgroundclosemaxqueue', default=384,
1551 1554 )
1552 1555 coreconfigitem(
1553 1556 b'worker', b'backgroundcloseminfilecount', default=2048,
1554 1557 )
1555 1558 coreconfigitem(
1556 1559 b'worker', b'backgroundclosethreadcount', default=4,
1557 1560 )
1558 1561 coreconfigitem(
1559 1562 b'worker', b'enabled', default=True,
1560 1563 )
1561 1564 coreconfigitem(
1562 1565 b'worker', b'numcpus', default=None,
1563 1566 )
1564 1567
1565 1568 # Rebase related configuration moved to core because other extension are doing
1566 1569 # strange things. For example, shelve import the extensions to reuse some bit
1567 1570 # without formally loading it.
1568 1571 coreconfigitem(
1569 1572 b'commands', b'rebase.requiredest', default=False,
1570 1573 )
1571 1574 coreconfigitem(
1572 1575 b'experimental', b'rebaseskipobsolete', default=True,
1573 1576 )
1574 1577 coreconfigitem(
1575 1578 b'rebase', b'singletransaction', default=False,
1576 1579 )
1577 1580 coreconfigitem(
1578 1581 b'rebase', b'experimental.inmemory', default=False,
1579 1582 )
@@ -1,3819 +1,3821
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # Functions receiving (ui, features) that extensions can register to impact
449 449 # the ability to load repositories with custom requirements. Only
450 450 # functions defined in loaded extensions are called.
451 451 #
452 452 # The function receives a set of requirement strings that the repository
453 453 # is capable of opening. Functions will typically add elements to the
454 454 # set to reflect that the extension knows how to handle that requirements.
455 455 featuresetupfuncs = set()
456 456
457 457
458 458 def makelocalrepository(baseui, path, intents=None):
459 459 """Create a local repository object.
460 460
461 461 Given arguments needed to construct a local repository, this function
462 462 performs various early repository loading functionality (such as
463 463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 464 the repository can be opened, derives a type suitable for representing
465 465 that repository, and returns an instance of it.
466 466
467 467 The returned object conforms to the ``repository.completelocalrepository``
468 468 interface.
469 469
470 470 The repository type is derived by calling a series of factory functions
471 471 for each aspect/interface of the final repository. These are defined by
472 472 ``REPO_INTERFACES``.
473 473
474 474 Each factory function is called to produce a type implementing a specific
475 475 interface. The cumulative list of returned types will be combined into a
476 476 new type and that type will be instantiated to represent the local
477 477 repository.
478 478
479 479 The factory functions each receive various state that may be consulted
480 480 as part of deriving a type.
481 481
482 482 Extensions should wrap these factory functions to customize repository type
483 483 creation. Note that an extension's wrapped function may be called even if
484 484 that extension is not loaded for the repo being constructed. Extensions
485 485 should check if their ``__name__`` appears in the
486 486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 487 not.
488 488 """
489 489 ui = baseui.copy()
490 490 # Prevent copying repo configuration.
491 491 ui.copy = baseui.copy
492 492
493 493 # Working directory VFS rooted at repository root.
494 494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495 495
496 496 # Main VFS for .hg/ directory.
497 497 hgpath = wdirvfs.join(b'.hg')
498 498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499 499
500 500 # The .hg/ path should exist and should be a directory. All other
501 501 # cases are errors.
502 502 if not hgvfs.isdir():
503 503 try:
504 504 hgvfs.stat()
505 505 except OSError as e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508
509 509 raise error.RepoError(_(b'repository %s not found') % path)
510 510
511 511 # .hg/requires file contains a newline-delimited list of
512 512 # features/capabilities the opener (us) must have in order to use
513 513 # the repository. This file was introduced in Mercurial 0.9.2,
514 514 # which means very old repositories may not have one. We assume
515 515 # a missing file translates to no requirements.
516 516 try:
517 517 requirements = set(hgvfs.read(b'requires').splitlines())
518 518 except IOError as e:
519 519 if e.errno != errno.ENOENT:
520 520 raise
521 521 requirements = set()
522 522
523 523 # The .hg/hgrc file may load extensions or contain config options
524 524 # that influence repository construction. Attempt to load it and
525 525 # process any new extensions that it may have pulled in.
526 526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 528 extensions.loadall(ui)
529 529 extensions.populateui(ui)
530 530
531 531 # Set of module names of extensions loaded for this repository.
532 532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533 533
534 534 supportedrequirements = gathersupportedrequirements(ui)
535 535
536 536 # We first validate the requirements are known.
537 537 ensurerequirementsrecognized(requirements, supportedrequirements)
538 538
539 539 # Then we validate that the known set is reasonable to use together.
540 540 ensurerequirementscompatible(ui, requirements)
541 541
542 542 # TODO there are unhandled edge cases related to opening repositories with
543 543 # shared storage. If storage is shared, we should also test for requirements
544 544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 545 # that repo, as that repo may load extensions needed to open it. This is a
546 546 # bit complicated because we don't want the other hgrc to overwrite settings
547 547 # in this hgrc.
548 548 #
549 549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 550 # file when sharing repos. But if a requirement is added after the share is
551 551 # performed, thereby introducing a new requirement for the opener, we may
552 552 # will not see that and could encounter a run-time error interacting with
553 553 # that shared store since it has an unknown-to-us requirement.
554 554
555 555 # At this point, we know we should be capable of opening the repository.
556 556 # Now get on with doing that.
557 557
558 558 features = set()
559 559
560 560 # The "store" part of the repository holds versioned data. How it is
561 561 # accessed is determined by various requirements. The ``shared`` or
562 562 # ``relshared`` requirements indicate the store lives in the path contained
563 563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 565 if b'shared' in requirements or b'relshared' in requirements:
566 566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 567 if b'relshared' in requirements:
568 568 sharedpath = hgvfs.join(sharedpath)
569 569
570 570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571 571
572 572 if not sharedvfs.exists():
573 573 raise error.RepoError(
574 574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 575 % sharedvfs.base
576 576 )
577 577
578 578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579 579
580 580 storebasepath = sharedvfs.base
581 581 cachepath = sharedvfs.join(b'cache')
582 582 else:
583 583 storebasepath = hgvfs.base
584 584 cachepath = hgvfs.join(b'cache')
585 585 wcachepath = hgvfs.join(b'wcache')
586 586
587 587 # The store has changed over time and the exact layout is dictated by
588 588 # requirements. The store interface abstracts differences across all
589 589 # of them.
590 590 store = makestore(
591 591 requirements,
592 592 storebasepath,
593 593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 594 )
595 595 hgvfs.createmode = store.createmode
596 596
597 597 storevfs = store.vfs
598 598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599 599
600 600 # The cache vfs is used to manage cache files.
601 601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 602 cachevfs.createmode = store.createmode
603 603 # The cache vfs is used to manage cache files related to the working copy
604 604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 605 wcachevfs.createmode = store.createmode
606 606
607 607 # Now resolve the type for the repository object. We do this by repeatedly
608 608 # calling a factory function to produces types for specific aspects of the
609 609 # repo's operation. The aggregate returned types are used as base classes
610 610 # for a dynamically-derived type, which will represent our new repository.
611 611
612 612 bases = []
613 613 extrastate = {}
614 614
615 615 for iface, fn in REPO_INTERFACES:
616 616 # We pass all potentially useful state to give extensions tons of
617 617 # flexibility.
618 618 typ = fn()(
619 619 ui=ui,
620 620 intents=intents,
621 621 requirements=requirements,
622 622 features=features,
623 623 wdirvfs=wdirvfs,
624 624 hgvfs=hgvfs,
625 625 store=store,
626 626 storevfs=storevfs,
627 627 storeoptions=storevfs.options,
628 628 cachevfs=cachevfs,
629 629 wcachevfs=wcachevfs,
630 630 extensionmodulenames=extensionmodulenames,
631 631 extrastate=extrastate,
632 632 baseclasses=bases,
633 633 )
634 634
635 635 if not isinstance(typ, type):
636 636 raise error.ProgrammingError(
637 637 b'unable to construct type for %s' % iface
638 638 )
639 639
640 640 bases.append(typ)
641 641
642 642 # type() allows you to use characters in type names that wouldn't be
643 643 # recognized as Python symbols in source code. We abuse that to add
644 644 # rich information about our constructed repo.
645 645 name = pycompat.sysstr(
646 646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 647 )
648 648
649 649 cls = type(name, tuple(bases), {})
650 650
651 651 return cls(
652 652 baseui=baseui,
653 653 ui=ui,
654 654 origroot=path,
655 655 wdirvfs=wdirvfs,
656 656 hgvfs=hgvfs,
657 657 requirements=requirements,
658 658 supportedrequirements=supportedrequirements,
659 659 sharedpath=storebasepath,
660 660 store=store,
661 661 cachevfs=cachevfs,
662 662 wcachevfs=wcachevfs,
663 663 features=features,
664 664 intents=intents,
665 665 )
666 666
667 667
668 668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 669 """Load hgrc files/content into a ui instance.
670 670
671 671 This is called during repository opening to load any additional
672 672 config files or settings relevant to the current repository.
673 673
674 674 Returns a bool indicating whether any additional configs were loaded.
675 675
676 676 Extensions should monkeypatch this function to modify how per-repo
677 677 configs are loaded. For example, an extension may wish to pull in
678 678 configs from alternate files or sources.
679 679 """
680 680 if not rcutil.use_repo_hgrc():
681 681 return False
682 682 try:
683 683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 684 return True
685 685 except IOError:
686 686 return False
687 687
688 688
689 689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 690 """Perform additional actions after .hg/hgrc is loaded.
691 691
692 692 This function is called during repository loading immediately after
693 693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694 694
695 695 The function can be used to validate configs, automatically add
696 696 options (including extensions) based on requirements, etc.
697 697 """
698 698
699 699 # Map of requirements to list of extensions to load automatically when
700 700 # requirement is present.
701 701 autoextensions = {
702 702 b'git': [b'git'],
703 703 b'largefiles': [b'largefiles'],
704 704 b'lfs': [b'lfs'],
705 705 }
706 706
707 707 for requirement, names in sorted(autoextensions.items()):
708 708 if requirement not in requirements:
709 709 continue
710 710
711 711 for name in names:
712 712 if not ui.hasconfig(b'extensions', name):
713 713 ui.setconfig(b'extensions', name, b'', source=b'autoload')
714 714
715 715
716 716 def gathersupportedrequirements(ui):
717 717 """Determine the complete set of recognized requirements."""
718 718 # Start with all requirements supported by this file.
719 719 supported = set(localrepository._basesupported)
720 720
721 721 # Execute ``featuresetupfuncs`` entries if they belong to an extension
722 722 # relevant to this ui instance.
723 723 modules = {m.__name__ for n, m in extensions.extensions(ui)}
724 724
725 725 for fn in featuresetupfuncs:
726 726 if fn.__module__ in modules:
727 727 fn(ui, supported)
728 728
729 729 # Add derived requirements from registered compression engines.
730 730 for name in util.compengines:
731 731 engine = util.compengines[name]
732 732 if engine.available() and engine.revlogheader():
733 733 supported.add(b'exp-compression-%s' % name)
734 734 if engine.name() == b'zstd':
735 735 supported.add(b'revlog-compression-zstd')
736 736
737 737 return supported
738 738
739 739
740 740 def ensurerequirementsrecognized(requirements, supported):
741 741 """Validate that a set of local requirements is recognized.
742 742
743 743 Receives a set of requirements. Raises an ``error.RepoError`` if there
744 744 exists any requirement in that set that currently loaded code doesn't
745 745 recognize.
746 746
747 747 Returns a set of supported requirements.
748 748 """
749 749 missing = set()
750 750
751 751 for requirement in requirements:
752 752 if requirement in supported:
753 753 continue
754 754
755 755 if not requirement or not requirement[0:1].isalnum():
756 756 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
757 757
758 758 missing.add(requirement)
759 759
760 760 if missing:
761 761 raise error.RequirementError(
762 762 _(b'repository requires features unknown to this Mercurial: %s')
763 763 % b' '.join(sorted(missing)),
764 764 hint=_(
765 765 b'see https://mercurial-scm.org/wiki/MissingRequirement '
766 766 b'for more information'
767 767 ),
768 768 )
769 769
770 770
771 771 def ensurerequirementscompatible(ui, requirements):
772 772 """Validates that a set of recognized requirements is mutually compatible.
773 773
774 774 Some requirements may not be compatible with others or require
775 775 config options that aren't enabled. This function is called during
776 776 repository opening to ensure that the set of requirements needed
777 777 to open a repository is sane and compatible with config options.
778 778
779 779 Extensions can monkeypatch this function to perform additional
780 780 checking.
781 781
782 782 ``error.RepoError`` should be raised on failure.
783 783 """
784 784 if b'exp-sparse' in requirements and not sparse.enabled:
785 785 raise error.RepoError(
786 786 _(
787 787 b'repository is using sparse feature but '
788 788 b'sparse is not enabled; enable the '
789 789 b'"sparse" extensions to access'
790 790 )
791 791 )
792 792
793 793
794 794 def makestore(requirements, path, vfstype):
795 795 """Construct a storage object for a repository."""
796 796 if b'store' in requirements:
797 797 if b'fncache' in requirements:
798 798 return storemod.fncachestore(
799 799 path, vfstype, b'dotencode' in requirements
800 800 )
801 801
802 802 return storemod.encodedstore(path, vfstype)
803 803
804 804 return storemod.basicstore(path, vfstype)
805 805
806 806
807 807 def resolvestorevfsoptions(ui, requirements, features):
808 808 """Resolve the options to pass to the store vfs opener.
809 809
810 810 The returned dict is used to influence behavior of the storage layer.
811 811 """
812 812 options = {}
813 813
814 814 if b'treemanifest' in requirements:
815 815 options[b'treemanifest'] = True
816 816
817 817 # experimental config: format.manifestcachesize
818 818 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
819 819 if manifestcachesize is not None:
820 820 options[b'manifestcachesize'] = manifestcachesize
821 821
822 822 # In the absence of another requirement superseding a revlog-related
823 823 # requirement, we have to assume the repo is using revlog version 0.
824 824 # This revlog format is super old and we don't bother trying to parse
825 825 # opener options for it because those options wouldn't do anything
826 826 # meaningful on such old repos.
827 827 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
828 828 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
829 829 else: # explicitly mark repo as using revlogv0
830 830 options[b'revlogv0'] = True
831 831
832 832 if COPIESSDC_REQUIREMENT in requirements:
833 833 options[b'copies-storage'] = b'changeset-sidedata'
834 834 else:
835 835 writecopiesto = ui.config(b'experimental', b'copies.write-to')
836 836 copiesextramode = (b'changeset-only', b'compatibility')
837 837 if writecopiesto in copiesextramode:
838 838 options[b'copies-storage'] = b'extra'
839 839
840 840 return options
841 841
842 842
843 843 def resolverevlogstorevfsoptions(ui, requirements, features):
844 844 """Resolve opener options specific to revlogs."""
845 845
846 846 options = {}
847 847 options[b'flagprocessors'] = {}
848 848
849 849 if b'revlogv1' in requirements:
850 850 options[b'revlogv1'] = True
851 851 if REVLOGV2_REQUIREMENT in requirements:
852 852 options[b'revlogv2'] = True
853 853
854 854 if b'generaldelta' in requirements:
855 855 options[b'generaldelta'] = True
856 856
857 857 # experimental config: format.chunkcachesize
858 858 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
859 859 if chunkcachesize is not None:
860 860 options[b'chunkcachesize'] = chunkcachesize
861 861
862 862 deltabothparents = ui.configbool(
863 863 b'storage', b'revlog.optimize-delta-parent-choice'
864 864 )
865 865 options[b'deltabothparents'] = deltabothparents
866 866
867 867 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
868 868 lazydeltabase = False
869 869 if lazydelta:
870 870 lazydeltabase = ui.configbool(
871 871 b'storage', b'revlog.reuse-external-delta-parent'
872 872 )
873 873 if lazydeltabase is None:
874 874 lazydeltabase = not scmutil.gddeltaconfig(ui)
875 875 options[b'lazydelta'] = lazydelta
876 876 options[b'lazydeltabase'] = lazydeltabase
877 877
878 878 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
879 879 if 0 <= chainspan:
880 880 options[b'maxdeltachainspan'] = chainspan
881 881
882 882 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
883 883 if mmapindexthreshold is not None:
884 884 options[b'mmapindexthreshold'] = mmapindexthreshold
885 885
886 886 withsparseread = ui.configbool(b'experimental', b'sparse-read')
887 887 srdensitythres = float(
888 888 ui.config(b'experimental', b'sparse-read.density-threshold')
889 889 )
890 890 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
891 891 options[b'with-sparse-read'] = withsparseread
892 892 options[b'sparse-read-density-threshold'] = srdensitythres
893 893 options[b'sparse-read-min-gap-size'] = srmingapsize
894 894
895 895 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
896 896 options[b'sparse-revlog'] = sparserevlog
897 897 if sparserevlog:
898 898 options[b'generaldelta'] = True
899 899
900 900 sidedata = SIDEDATA_REQUIREMENT in requirements
901 901 options[b'side-data'] = sidedata
902 902
903 903 maxchainlen = None
904 904 if sparserevlog:
905 905 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
906 906 # experimental config: format.maxchainlen
907 907 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
908 908 if maxchainlen is not None:
909 909 options[b'maxchainlen'] = maxchainlen
910 910
911 911 for r in requirements:
912 912 # we allow multiple compression engine requirement to co-exist because
913 913 # strickly speaking, revlog seems to support mixed compression style.
914 914 #
915 915 # The compression used for new entries will be "the last one"
916 916 prefix = r.startswith
917 917 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
918 918 options[b'compengine'] = r.split(b'-', 2)[2]
919 919
920 920 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
921 921 if options[b'zlib.level'] is not None:
922 922 if not (0 <= options[b'zlib.level'] <= 9):
923 923 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
924 924 raise error.Abort(msg % options[b'zlib.level'])
925 925 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
926 926 if options[b'zstd.level'] is not None:
927 927 if not (0 <= options[b'zstd.level'] <= 22):
928 928 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
929 929 raise error.Abort(msg % options[b'zstd.level'])
930 930
931 931 if repository.NARROW_REQUIREMENT in requirements:
932 932 options[b'enableellipsis'] = True
933 933
934 934 if ui.configbool(b'experimental', b'rust.index'):
935 935 options[b'rust.index'] = True
936 936 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
937 937 options[b'exp-persistent-nodemap'] = True
938 938 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
939 939 options[b'exp-persistent-nodemap.mmap'] = True
940 epnm = ui.config(b'experimental', b'exp-persistent-nodemap.mode')
941 options[b'exp-persistent-nodemap.mode'] = epnm
940 942 if ui.configbool(b'devel', b'persistent-nodemap'):
941 943 options[b'devel-force-nodemap'] = True
942 944
943 945 return options
944 946
945 947
946 948 def makemain(**kwargs):
947 949 """Produce a type conforming to ``ilocalrepositorymain``."""
948 950 return localrepository
949 951
950 952
951 953 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 954 class revlogfilestorage(object):
953 955 """File storage when using revlogs."""
954 956
955 957 def file(self, path):
956 958 if path[0] == b'/':
957 959 path = path[1:]
958 960
959 961 return filelog.filelog(self.svfs, path)
960 962
961 963
962 964 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 965 class revlognarrowfilestorage(object):
964 966 """File storage when using revlogs and narrow files."""
965 967
966 968 def file(self, path):
967 969 if path[0] == b'/':
968 970 path = path[1:]
969 971
970 972 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
971 973
972 974
973 975 def makefilestorage(requirements, features, **kwargs):
974 976 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
975 977 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
976 978 features.add(repository.REPO_FEATURE_STREAM_CLONE)
977 979
978 980 if repository.NARROW_REQUIREMENT in requirements:
979 981 return revlognarrowfilestorage
980 982 else:
981 983 return revlogfilestorage
982 984
983 985
984 986 # List of repository interfaces and factory functions for them. Each
985 987 # will be called in order during ``makelocalrepository()`` to iteratively
986 988 # derive the final type for a local repository instance. We capture the
987 989 # function as a lambda so we don't hold a reference and the module-level
988 990 # functions can be wrapped.
989 991 REPO_INTERFACES = [
990 992 (repository.ilocalrepositorymain, lambda: makemain),
991 993 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
992 994 ]
993 995
994 996
995 997 @interfaceutil.implementer(repository.ilocalrepositorymain)
996 998 class localrepository(object):
997 999 """Main class for representing local repositories.
998 1000
999 1001 All local repositories are instances of this class.
1000 1002
1001 1003 Constructed on its own, instances of this class are not usable as
1002 1004 repository objects. To obtain a usable repository object, call
1003 1005 ``hg.repository()``, ``localrepo.instance()``, or
1004 1006 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1005 1007 ``instance()`` adds support for creating new repositories.
1006 1008 ``hg.repository()`` adds more extension integration, including calling
1007 1009 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1008 1010 used.
1009 1011 """
1010 1012
1011 1013 # obsolete experimental requirements:
1012 1014 # - manifestv2: An experimental new manifest format that allowed
1013 1015 # for stem compression of long paths. Experiment ended up not
1014 1016 # being successful (repository sizes went up due to worse delta
1015 1017 # chains), and the code was deleted in 4.6.
1016 1018 supportedformats = {
1017 1019 b'revlogv1',
1018 1020 b'generaldelta',
1019 1021 b'treemanifest',
1020 1022 COPIESSDC_REQUIREMENT,
1021 1023 REVLOGV2_REQUIREMENT,
1022 1024 SIDEDATA_REQUIREMENT,
1023 1025 SPARSEREVLOG_REQUIREMENT,
1024 1026 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1025 1027 }
1026 1028 _basesupported = supportedformats | {
1027 1029 b'store',
1028 1030 b'fncache',
1029 1031 b'shared',
1030 1032 b'relshared',
1031 1033 b'dotencode',
1032 1034 b'exp-sparse',
1033 1035 b'internal-phase',
1034 1036 }
1035 1037
1036 1038 # list of prefix for file which can be written without 'wlock'
1037 1039 # Extensions should extend this list when needed
1038 1040 _wlockfreeprefix = {
1039 1041 # We migh consider requiring 'wlock' for the next
1040 1042 # two, but pretty much all the existing code assume
1041 1043 # wlock is not needed so we keep them excluded for
1042 1044 # now.
1043 1045 b'hgrc',
1044 1046 b'requires',
1045 1047 # XXX cache is a complicatged business someone
1046 1048 # should investigate this in depth at some point
1047 1049 b'cache/',
1048 1050 # XXX shouldn't be dirstate covered by the wlock?
1049 1051 b'dirstate',
1050 1052 # XXX bisect was still a bit too messy at the time
1051 1053 # this changeset was introduced. Someone should fix
1052 1054 # the remainig bit and drop this line
1053 1055 b'bisect.state',
1054 1056 }
1055 1057
1056 1058 def __init__(
1057 1059 self,
1058 1060 baseui,
1059 1061 ui,
1060 1062 origroot,
1061 1063 wdirvfs,
1062 1064 hgvfs,
1063 1065 requirements,
1064 1066 supportedrequirements,
1065 1067 sharedpath,
1066 1068 store,
1067 1069 cachevfs,
1068 1070 wcachevfs,
1069 1071 features,
1070 1072 intents=None,
1071 1073 ):
1072 1074 """Create a new local repository instance.
1073 1075
1074 1076 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1075 1077 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1076 1078 object.
1077 1079
1078 1080 Arguments:
1079 1081
1080 1082 baseui
1081 1083 ``ui.ui`` instance that ``ui`` argument was based off of.
1082 1084
1083 1085 ui
1084 1086 ``ui.ui`` instance for use by the repository.
1085 1087
1086 1088 origroot
1087 1089 ``bytes`` path to working directory root of this repository.
1088 1090
1089 1091 wdirvfs
1090 1092 ``vfs.vfs`` rooted at the working directory.
1091 1093
1092 1094 hgvfs
1093 1095 ``vfs.vfs`` rooted at .hg/
1094 1096
1095 1097 requirements
1096 1098 ``set`` of bytestrings representing repository opening requirements.
1097 1099
1098 1100 supportedrequirements
1099 1101 ``set`` of bytestrings representing repository requirements that we
1100 1102 know how to open. May be a supetset of ``requirements``.
1101 1103
1102 1104 sharedpath
1103 1105 ``bytes`` Defining path to storage base directory. Points to a
1104 1106 ``.hg/`` directory somewhere.
1105 1107
1106 1108 store
1107 1109 ``store.basicstore`` (or derived) instance providing access to
1108 1110 versioned storage.
1109 1111
1110 1112 cachevfs
1111 1113 ``vfs.vfs`` used for cache files.
1112 1114
1113 1115 wcachevfs
1114 1116 ``vfs.vfs`` used for cache files related to the working copy.
1115 1117
1116 1118 features
1117 1119 ``set`` of bytestrings defining features/capabilities of this
1118 1120 instance.
1119 1121
1120 1122 intents
1121 1123 ``set`` of system strings indicating what this repo will be used
1122 1124 for.
1123 1125 """
1124 1126 self.baseui = baseui
1125 1127 self.ui = ui
1126 1128 self.origroot = origroot
1127 1129 # vfs rooted at working directory.
1128 1130 self.wvfs = wdirvfs
1129 1131 self.root = wdirvfs.base
1130 1132 # vfs rooted at .hg/. Used to access most non-store paths.
1131 1133 self.vfs = hgvfs
1132 1134 self.path = hgvfs.base
1133 1135 self.requirements = requirements
1134 1136 self.supported = supportedrequirements
1135 1137 self.sharedpath = sharedpath
1136 1138 self.store = store
1137 1139 self.cachevfs = cachevfs
1138 1140 self.wcachevfs = wcachevfs
1139 1141 self.features = features
1140 1142
1141 1143 self.filtername = None
1142 1144
1143 1145 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1144 1146 b'devel', b'check-locks'
1145 1147 ):
1146 1148 self.vfs.audit = self._getvfsward(self.vfs.audit)
1147 1149 # A list of callback to shape the phase if no data were found.
1148 1150 # Callback are in the form: func(repo, roots) --> processed root.
1149 1151 # This list it to be filled by extension during repo setup
1150 1152 self._phasedefaults = []
1151 1153
1152 1154 color.setup(self.ui)
1153 1155
1154 1156 self.spath = self.store.path
1155 1157 self.svfs = self.store.vfs
1156 1158 self.sjoin = self.store.join
1157 1159 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1158 1160 b'devel', b'check-locks'
1159 1161 ):
1160 1162 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1161 1163 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1162 1164 else: # standard vfs
1163 1165 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1164 1166
1165 1167 self._dirstatevalidatewarned = False
1166 1168
1167 1169 self._branchcaches = branchmap.BranchMapCache()
1168 1170 self._revbranchcache = None
1169 1171 self._filterpats = {}
1170 1172 self._datafilters = {}
1171 1173 self._transref = self._lockref = self._wlockref = None
1172 1174
1173 1175 # A cache for various files under .hg/ that tracks file changes,
1174 1176 # (used by the filecache decorator)
1175 1177 #
1176 1178 # Maps a property name to its util.filecacheentry
1177 1179 self._filecache = {}
1178 1180
1179 1181 # hold sets of revision to be filtered
1180 1182 # should be cleared when something might have changed the filter value:
1181 1183 # - new changesets,
1182 1184 # - phase change,
1183 1185 # - new obsolescence marker,
1184 1186 # - working directory parent change,
1185 1187 # - bookmark changes
1186 1188 self.filteredrevcache = {}
1187 1189
1188 1190 # post-dirstate-status hooks
1189 1191 self._postdsstatus = []
1190 1192
1191 1193 # generic mapping between names and nodes
1192 1194 self.names = namespaces.namespaces()
1193 1195
1194 1196 # Key to signature value.
1195 1197 self._sparsesignaturecache = {}
1196 1198 # Signature to cached matcher instance.
1197 1199 self._sparsematchercache = {}
1198 1200
1199 1201 self._extrafilterid = repoview.extrafilter(ui)
1200 1202
1201 1203 self.filecopiesmode = None
1202 1204 if COPIESSDC_REQUIREMENT in self.requirements:
1203 1205 self.filecopiesmode = b'changeset-sidedata'
1204 1206
1205 1207 def _getvfsward(self, origfunc):
1206 1208 """build a ward for self.vfs"""
1207 1209 rref = weakref.ref(self)
1208 1210
1209 1211 def checkvfs(path, mode=None):
1210 1212 ret = origfunc(path, mode=mode)
1211 1213 repo = rref()
1212 1214 if (
1213 1215 repo is None
1214 1216 or not util.safehasattr(repo, b'_wlockref')
1215 1217 or not util.safehasattr(repo, b'_lockref')
1216 1218 ):
1217 1219 return
1218 1220 if mode in (None, b'r', b'rb'):
1219 1221 return
1220 1222 if path.startswith(repo.path):
1221 1223 # truncate name relative to the repository (.hg)
1222 1224 path = path[len(repo.path) + 1 :]
1223 1225 if path.startswith(b'cache/'):
1224 1226 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1225 1227 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1226 1228 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1227 1229 # journal is covered by 'lock'
1228 1230 if repo._currentlock(repo._lockref) is None:
1229 1231 repo.ui.develwarn(
1230 1232 b'write with no lock: "%s"' % path,
1231 1233 stacklevel=3,
1232 1234 config=b'check-locks',
1233 1235 )
1234 1236 elif repo._currentlock(repo._wlockref) is None:
1235 1237 # rest of vfs files are covered by 'wlock'
1236 1238 #
1237 1239 # exclude special files
1238 1240 for prefix in self._wlockfreeprefix:
1239 1241 if path.startswith(prefix):
1240 1242 return
1241 1243 repo.ui.develwarn(
1242 1244 b'write with no wlock: "%s"' % path,
1243 1245 stacklevel=3,
1244 1246 config=b'check-locks',
1245 1247 )
1246 1248 return ret
1247 1249
1248 1250 return checkvfs
1249 1251
1250 1252 def _getsvfsward(self, origfunc):
1251 1253 """build a ward for self.svfs"""
1252 1254 rref = weakref.ref(self)
1253 1255
1254 1256 def checksvfs(path, mode=None):
1255 1257 ret = origfunc(path, mode=mode)
1256 1258 repo = rref()
1257 1259 if repo is None or not util.safehasattr(repo, b'_lockref'):
1258 1260 return
1259 1261 if mode in (None, b'r', b'rb'):
1260 1262 return
1261 1263 if path.startswith(repo.sharedpath):
1262 1264 # truncate name relative to the repository (.hg)
1263 1265 path = path[len(repo.sharedpath) + 1 :]
1264 1266 if repo._currentlock(repo._lockref) is None:
1265 1267 repo.ui.develwarn(
1266 1268 b'write with no lock: "%s"' % path, stacklevel=4
1267 1269 )
1268 1270 return ret
1269 1271
1270 1272 return checksvfs
1271 1273
1272 1274 def close(self):
1273 1275 self._writecaches()
1274 1276
1275 1277 def _writecaches(self):
1276 1278 if self._revbranchcache:
1277 1279 self._revbranchcache.write()
1278 1280
1279 1281 def _restrictcapabilities(self, caps):
1280 1282 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1281 1283 caps = set(caps)
1282 1284 capsblob = bundle2.encodecaps(
1283 1285 bundle2.getrepocaps(self, role=b'client')
1284 1286 )
1285 1287 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1286 1288 return caps
1287 1289
1288 1290 def _writerequirements(self):
1289 1291 scmutil.writerequires(self.vfs, self.requirements)
1290 1292
1291 1293 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1292 1294 # self -> auditor -> self._checknested -> self
1293 1295
1294 1296 @property
1295 1297 def auditor(self):
1296 1298 # This is only used by context.workingctx.match in order to
1297 1299 # detect files in subrepos.
1298 1300 return pathutil.pathauditor(self.root, callback=self._checknested)
1299 1301
1300 1302 @property
1301 1303 def nofsauditor(self):
1302 1304 # This is only used by context.basectx.match in order to detect
1303 1305 # files in subrepos.
1304 1306 return pathutil.pathauditor(
1305 1307 self.root, callback=self._checknested, realfs=False, cached=True
1306 1308 )
1307 1309
1308 1310 def _checknested(self, path):
1309 1311 """Determine if path is a legal nested repository."""
1310 1312 if not path.startswith(self.root):
1311 1313 return False
1312 1314 subpath = path[len(self.root) + 1 :]
1313 1315 normsubpath = util.pconvert(subpath)
1314 1316
1315 1317 # XXX: Checking against the current working copy is wrong in
1316 1318 # the sense that it can reject things like
1317 1319 #
1318 1320 # $ hg cat -r 10 sub/x.txt
1319 1321 #
1320 1322 # if sub/ is no longer a subrepository in the working copy
1321 1323 # parent revision.
1322 1324 #
1323 1325 # However, it can of course also allow things that would have
1324 1326 # been rejected before, such as the above cat command if sub/
1325 1327 # is a subrepository now, but was a normal directory before.
1326 1328 # The old path auditor would have rejected by mistake since it
1327 1329 # panics when it sees sub/.hg/.
1328 1330 #
1329 1331 # All in all, checking against the working copy seems sensible
1330 1332 # since we want to prevent access to nested repositories on
1331 1333 # the filesystem *now*.
1332 1334 ctx = self[None]
1333 1335 parts = util.splitpath(subpath)
1334 1336 while parts:
1335 1337 prefix = b'/'.join(parts)
1336 1338 if prefix in ctx.substate:
1337 1339 if prefix == normsubpath:
1338 1340 return True
1339 1341 else:
1340 1342 sub = ctx.sub(prefix)
1341 1343 return sub.checknested(subpath[len(prefix) + 1 :])
1342 1344 else:
1343 1345 parts.pop()
1344 1346 return False
1345 1347
1346 1348 def peer(self):
1347 1349 return localpeer(self) # not cached to avoid reference cycle
1348 1350
1349 1351 def unfiltered(self):
1350 1352 """Return unfiltered version of the repository
1351 1353
1352 1354 Intended to be overwritten by filtered repo."""
1353 1355 return self
1354 1356
1355 1357 def filtered(self, name, visibilityexceptions=None):
1356 1358 """Return a filtered version of a repository
1357 1359
1358 1360 The `name` parameter is the identifier of the requested view. This
1359 1361 will return a repoview object set "exactly" to the specified view.
1360 1362
1361 1363 This function does not apply recursive filtering to a repository. For
1362 1364 example calling `repo.filtered("served")` will return a repoview using
1363 1365 the "served" view, regardless of the initial view used by `repo`.
1364 1366
1365 1367 In other word, there is always only one level of `repoview` "filtering".
1366 1368 """
1367 1369 if self._extrafilterid is not None and b'%' not in name:
1368 1370 name = name + b'%' + self._extrafilterid
1369 1371
1370 1372 cls = repoview.newtype(self.unfiltered().__class__)
1371 1373 return cls(self, name, visibilityexceptions)
1372 1374
1373 1375 @mixedrepostorecache(
1374 1376 (b'bookmarks', b'plain'),
1375 1377 (b'bookmarks.current', b'plain'),
1376 1378 (b'bookmarks', b''),
1377 1379 (b'00changelog.i', b''),
1378 1380 )
1379 1381 def _bookmarks(self):
1380 1382 # Since the multiple files involved in the transaction cannot be
1381 1383 # written atomically (with current repository format), there is a race
1382 1384 # condition here.
1383 1385 #
1384 1386 # 1) changelog content A is read
1385 1387 # 2) outside transaction update changelog to content B
1386 1388 # 3) outside transaction update bookmark file referring to content B
1387 1389 # 4) bookmarks file content is read and filtered against changelog-A
1388 1390 #
1389 1391 # When this happens, bookmarks against nodes missing from A are dropped.
1390 1392 #
1391 1393 # Having this happening during read is not great, but it become worse
1392 1394 # when this happen during write because the bookmarks to the "unknown"
1393 1395 # nodes will be dropped for good. However, writes happen within locks.
1394 1396 # This locking makes it possible to have a race free consistent read.
1395 1397 # For this purpose data read from disc before locking are
1396 1398 # "invalidated" right after the locks are taken. This invalidations are
1397 1399 # "light", the `filecache` mechanism keep the data in memory and will
1398 1400 # reuse them if the underlying files did not changed. Not parsing the
1399 1401 # same data multiple times helps performances.
1400 1402 #
1401 1403 # Unfortunately in the case describe above, the files tracked by the
1402 1404 # bookmarks file cache might not have changed, but the in-memory
1403 1405 # content is still "wrong" because we used an older changelog content
1404 1406 # to process the on-disk data. So after locking, the changelog would be
1405 1407 # refreshed but `_bookmarks` would be preserved.
1406 1408 # Adding `00changelog.i` to the list of tracked file is not
1407 1409 # enough, because at the time we build the content for `_bookmarks` in
1408 1410 # (4), the changelog file has already diverged from the content used
1409 1411 # for loading `changelog` in (1)
1410 1412 #
1411 1413 # To prevent the issue, we force the changelog to be explicitly
1412 1414 # reloaded while computing `_bookmarks`. The data race can still happen
1413 1415 # without the lock (with a narrower window), but it would no longer go
1414 1416 # undetected during the lock time refresh.
1415 1417 #
1416 1418 # The new schedule is as follow
1417 1419 #
1418 1420 # 1) filecache logic detect that `_bookmarks` needs to be computed
1419 1421 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1420 1422 # 3) We force `changelog` filecache to be tested
1421 1423 # 4) cachestat for `changelog` are captured (for changelog)
1422 1424 # 5) `_bookmarks` is computed and cached
1423 1425 #
1424 1426 # The step in (3) ensure we have a changelog at least as recent as the
1425 1427 # cache stat computed in (1). As a result at locking time:
1426 1428 # * if the changelog did not changed since (1) -> we can reuse the data
1427 1429 # * otherwise -> the bookmarks get refreshed.
1428 1430 self._refreshchangelog()
1429 1431 return bookmarks.bmstore(self)
1430 1432
1431 1433 def _refreshchangelog(self):
1432 1434 """make sure the in memory changelog match the on-disk one"""
1433 1435 if 'changelog' in vars(self) and self.currenttransaction() is None:
1434 1436 del self.changelog
1435 1437
1436 1438 @property
1437 1439 def _activebookmark(self):
1438 1440 return self._bookmarks.active
1439 1441
1440 1442 # _phasesets depend on changelog. what we need is to call
1441 1443 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1442 1444 # can't be easily expressed in filecache mechanism.
1443 1445 @storecache(b'phaseroots', b'00changelog.i')
1444 1446 def _phasecache(self):
1445 1447 return phases.phasecache(self, self._phasedefaults)
1446 1448
1447 1449 @storecache(b'obsstore')
1448 1450 def obsstore(self):
1449 1451 return obsolete.makestore(self.ui, self)
1450 1452
1451 1453 @storecache(b'00changelog.i')
1452 1454 def changelog(self):
1453 1455 return self.store.changelog(txnutil.mayhavepending(self.root))
1454 1456
1455 1457 @storecache(b'00manifest.i')
1456 1458 def manifestlog(self):
1457 1459 return self.store.manifestlog(self, self._storenarrowmatch)
1458 1460
1459 1461 @repofilecache(b'dirstate')
1460 1462 def dirstate(self):
1461 1463 return self._makedirstate()
1462 1464
1463 1465 def _makedirstate(self):
1464 1466 """Extension point for wrapping the dirstate per-repo."""
1465 1467 sparsematchfn = lambda: sparse.matcher(self)
1466 1468
1467 1469 return dirstate.dirstate(
1468 1470 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1469 1471 )
1470 1472
1471 1473 def _dirstatevalidate(self, node):
1472 1474 try:
1473 1475 self.changelog.rev(node)
1474 1476 return node
1475 1477 except error.LookupError:
1476 1478 if not self._dirstatevalidatewarned:
1477 1479 self._dirstatevalidatewarned = True
1478 1480 self.ui.warn(
1479 1481 _(b"warning: ignoring unknown working parent %s!\n")
1480 1482 % short(node)
1481 1483 )
1482 1484 return nullid
1483 1485
1484 1486 @storecache(narrowspec.FILENAME)
1485 1487 def narrowpats(self):
1486 1488 """matcher patterns for this repository's narrowspec
1487 1489
1488 1490 A tuple of (includes, excludes).
1489 1491 """
1490 1492 return narrowspec.load(self)
1491 1493
1492 1494 @storecache(narrowspec.FILENAME)
1493 1495 def _storenarrowmatch(self):
1494 1496 if repository.NARROW_REQUIREMENT not in self.requirements:
1495 1497 return matchmod.always()
1496 1498 include, exclude = self.narrowpats
1497 1499 return narrowspec.match(self.root, include=include, exclude=exclude)
1498 1500
1499 1501 @storecache(narrowspec.FILENAME)
1500 1502 def _narrowmatch(self):
1501 1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1502 1504 return matchmod.always()
1503 1505 narrowspec.checkworkingcopynarrowspec(self)
1504 1506 include, exclude = self.narrowpats
1505 1507 return narrowspec.match(self.root, include=include, exclude=exclude)
1506 1508
1507 1509 def narrowmatch(self, match=None, includeexact=False):
1508 1510 """matcher corresponding the the repo's narrowspec
1509 1511
1510 1512 If `match` is given, then that will be intersected with the narrow
1511 1513 matcher.
1512 1514
1513 1515 If `includeexact` is True, then any exact matches from `match` will
1514 1516 be included even if they're outside the narrowspec.
1515 1517 """
1516 1518 if match:
1517 1519 if includeexact and not self._narrowmatch.always():
1518 1520 # do not exclude explicitly-specified paths so that they can
1519 1521 # be warned later on
1520 1522 em = matchmod.exact(match.files())
1521 1523 nm = matchmod.unionmatcher([self._narrowmatch, em])
1522 1524 return matchmod.intersectmatchers(match, nm)
1523 1525 return matchmod.intersectmatchers(match, self._narrowmatch)
1524 1526 return self._narrowmatch
1525 1527
1526 1528 def setnarrowpats(self, newincludes, newexcludes):
1527 1529 narrowspec.save(self, newincludes, newexcludes)
1528 1530 self.invalidate(clearfilecache=True)
1529 1531
1530 1532 @unfilteredpropertycache
1531 1533 def _quick_access_changeid_null(self):
1532 1534 return {
1533 1535 b'null': (nullrev, nullid),
1534 1536 nullrev: (nullrev, nullid),
1535 1537 nullid: (nullrev, nullid),
1536 1538 }
1537 1539
1538 1540 @unfilteredpropertycache
1539 1541 def _quick_access_changeid_wc(self):
1540 1542 # also fast path access to the working copy parents
1541 1543 # however, only do it for filter that ensure wc is visible.
1542 1544 quick = {}
1543 1545 cl = self.unfiltered().changelog
1544 1546 for node in self.dirstate.parents():
1545 1547 if node == nullid:
1546 1548 continue
1547 1549 rev = cl.index.get_rev(node)
1548 1550 if rev is None:
1549 1551 # unknown working copy parent case:
1550 1552 #
1551 1553 # skip the fast path and let higher code deal with it
1552 1554 continue
1553 1555 pair = (rev, node)
1554 1556 quick[rev] = pair
1555 1557 quick[node] = pair
1556 1558 # also add the parents of the parents
1557 1559 for r in cl.parentrevs(rev):
1558 1560 if r == nullrev:
1559 1561 continue
1560 1562 n = cl.node(r)
1561 1563 pair = (r, n)
1562 1564 quick[r] = pair
1563 1565 quick[n] = pair
1564 1566 p1node = self.dirstate.p1()
1565 1567 if p1node != nullid:
1566 1568 quick[b'.'] = quick[p1node]
1567 1569 return quick
1568 1570
1569 1571 @unfilteredmethod
1570 1572 def _quick_access_changeid_invalidate(self):
1571 1573 if '_quick_access_changeid_wc' in vars(self):
1572 1574 del self.__dict__['_quick_access_changeid_wc']
1573 1575
1574 1576 @property
1575 1577 def _quick_access_changeid(self):
1576 1578 """an helper dictionnary for __getitem__ calls
1577 1579
1578 1580 This contains a list of symbol we can recognise right away without
1579 1581 further processing.
1580 1582 """
1581 1583 mapping = self._quick_access_changeid_null
1582 1584 if self.filtername in repoview.filter_has_wc:
1583 1585 mapping = mapping.copy()
1584 1586 mapping.update(self._quick_access_changeid_wc)
1585 1587 return mapping
1586 1588
1587 1589 def __getitem__(self, changeid):
1588 1590 # dealing with special cases
1589 1591 if changeid is None:
1590 1592 return context.workingctx(self)
1591 1593 if isinstance(changeid, context.basectx):
1592 1594 return changeid
1593 1595
1594 1596 # dealing with multiple revisions
1595 1597 if isinstance(changeid, slice):
1596 1598 # wdirrev isn't contiguous so the slice shouldn't include it
1597 1599 return [
1598 1600 self[i]
1599 1601 for i in pycompat.xrange(*changeid.indices(len(self)))
1600 1602 if i not in self.changelog.filteredrevs
1601 1603 ]
1602 1604
1603 1605 # dealing with some special values
1604 1606 quick_access = self._quick_access_changeid.get(changeid)
1605 1607 if quick_access is not None:
1606 1608 rev, node = quick_access
1607 1609 return context.changectx(self, rev, node, maybe_filtered=False)
1608 1610 if changeid == b'tip':
1609 1611 node = self.changelog.tip()
1610 1612 rev = self.changelog.rev(node)
1611 1613 return context.changectx(self, rev, node)
1612 1614
1613 1615 # dealing with arbitrary values
1614 1616 try:
1615 1617 if isinstance(changeid, int):
1616 1618 node = self.changelog.node(changeid)
1617 1619 rev = changeid
1618 1620 elif changeid == b'.':
1619 1621 # this is a hack to delay/avoid loading obsmarkers
1620 1622 # when we know that '.' won't be hidden
1621 1623 node = self.dirstate.p1()
1622 1624 rev = self.unfiltered().changelog.rev(node)
1623 1625 elif len(changeid) == 20:
1624 1626 try:
1625 1627 node = changeid
1626 1628 rev = self.changelog.rev(changeid)
1627 1629 except error.FilteredLookupError:
1628 1630 changeid = hex(changeid) # for the error message
1629 1631 raise
1630 1632 except LookupError:
1631 1633 # check if it might have come from damaged dirstate
1632 1634 #
1633 1635 # XXX we could avoid the unfiltered if we had a recognizable
1634 1636 # exception for filtered changeset access
1635 1637 if (
1636 1638 self.local()
1637 1639 and changeid in self.unfiltered().dirstate.parents()
1638 1640 ):
1639 1641 msg = _(b"working directory has unknown parent '%s'!")
1640 1642 raise error.Abort(msg % short(changeid))
1641 1643 changeid = hex(changeid) # for the error message
1642 1644 raise
1643 1645
1644 1646 elif len(changeid) == 40:
1645 1647 node = bin(changeid)
1646 1648 rev = self.changelog.rev(node)
1647 1649 else:
1648 1650 raise error.ProgrammingError(
1649 1651 b"unsupported changeid '%s' of type %s"
1650 1652 % (changeid, pycompat.bytestr(type(changeid)))
1651 1653 )
1652 1654
1653 1655 return context.changectx(self, rev, node)
1654 1656
1655 1657 except (error.FilteredIndexError, error.FilteredLookupError):
1656 1658 raise error.FilteredRepoLookupError(
1657 1659 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1658 1660 )
1659 1661 except (IndexError, LookupError):
1660 1662 raise error.RepoLookupError(
1661 1663 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1662 1664 )
1663 1665 except error.WdirUnsupported:
1664 1666 return context.workingctx(self)
1665 1667
1666 1668 def __contains__(self, changeid):
1667 1669 """True if the given changeid exists
1668 1670
1669 1671 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1670 1672 specified.
1671 1673 """
1672 1674 try:
1673 1675 self[changeid]
1674 1676 return True
1675 1677 except error.RepoLookupError:
1676 1678 return False
1677 1679
1678 1680 def __nonzero__(self):
1679 1681 return True
1680 1682
1681 1683 __bool__ = __nonzero__
1682 1684
1683 1685 def __len__(self):
1684 1686 # no need to pay the cost of repoview.changelog
1685 1687 unfi = self.unfiltered()
1686 1688 return len(unfi.changelog)
1687 1689
1688 1690 def __iter__(self):
1689 1691 return iter(self.changelog)
1690 1692
1691 1693 def revs(self, expr, *args):
1692 1694 '''Find revisions matching a revset.
1693 1695
1694 1696 The revset is specified as a string ``expr`` that may contain
1695 1697 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1696 1698
1697 1699 Revset aliases from the configuration are not expanded. To expand
1698 1700 user aliases, consider calling ``scmutil.revrange()`` or
1699 1701 ``repo.anyrevs([expr], user=True)``.
1700 1702
1701 1703 Returns a smartset.abstractsmartset, which is a list-like interface
1702 1704 that contains integer revisions.
1703 1705 '''
1704 1706 tree = revsetlang.spectree(expr, *args)
1705 1707 return revset.makematcher(tree)(self)
1706 1708
1707 1709 def set(self, expr, *args):
1708 1710 '''Find revisions matching a revset and emit changectx instances.
1709 1711
1710 1712 This is a convenience wrapper around ``revs()`` that iterates the
1711 1713 result and is a generator of changectx instances.
1712 1714
1713 1715 Revset aliases from the configuration are not expanded. To expand
1714 1716 user aliases, consider calling ``scmutil.revrange()``.
1715 1717 '''
1716 1718 for r in self.revs(expr, *args):
1717 1719 yield self[r]
1718 1720
1719 1721 def anyrevs(self, specs, user=False, localalias=None):
1720 1722 '''Find revisions matching one of the given revsets.
1721 1723
1722 1724 Revset aliases from the configuration are not expanded by default. To
1723 1725 expand user aliases, specify ``user=True``. To provide some local
1724 1726 definitions overriding user aliases, set ``localalias`` to
1725 1727 ``{name: definitionstring}``.
1726 1728 '''
1727 1729 if specs == [b'null']:
1728 1730 return revset.baseset([nullrev])
1729 1731 if specs == [b'.']:
1730 1732 quick_data = self._quick_access_changeid.get(b'.')
1731 1733 if quick_data is not None:
1732 1734 return revset.baseset([quick_data[0]])
1733 1735 if user:
1734 1736 m = revset.matchany(
1735 1737 self.ui,
1736 1738 specs,
1737 1739 lookup=revset.lookupfn(self),
1738 1740 localalias=localalias,
1739 1741 )
1740 1742 else:
1741 1743 m = revset.matchany(None, specs, localalias=localalias)
1742 1744 return m(self)
1743 1745
1744 1746 def url(self):
1745 1747 return b'file:' + self.root
1746 1748
1747 1749 def hook(self, name, throw=False, **args):
1748 1750 """Call a hook, passing this repo instance.
1749 1751
1750 1752 This a convenience method to aid invoking hooks. Extensions likely
1751 1753 won't call this unless they have registered a custom hook or are
1752 1754 replacing code that is expected to call a hook.
1753 1755 """
1754 1756 return hook.hook(self.ui, self, name, throw, **args)
1755 1757
1756 1758 @filteredpropertycache
1757 1759 def _tagscache(self):
1758 1760 '''Returns a tagscache object that contains various tags related
1759 1761 caches.'''
1760 1762
1761 1763 # This simplifies its cache management by having one decorated
1762 1764 # function (this one) and the rest simply fetch things from it.
1763 1765 class tagscache(object):
1764 1766 def __init__(self):
1765 1767 # These two define the set of tags for this repository. tags
1766 1768 # maps tag name to node; tagtypes maps tag name to 'global' or
1767 1769 # 'local'. (Global tags are defined by .hgtags across all
1768 1770 # heads, and local tags are defined in .hg/localtags.)
1769 1771 # They constitute the in-memory cache of tags.
1770 1772 self.tags = self.tagtypes = None
1771 1773
1772 1774 self.nodetagscache = self.tagslist = None
1773 1775
1774 1776 cache = tagscache()
1775 1777 cache.tags, cache.tagtypes = self._findtags()
1776 1778
1777 1779 return cache
1778 1780
1779 1781 def tags(self):
1780 1782 '''return a mapping of tag to node'''
1781 1783 t = {}
1782 1784 if self.changelog.filteredrevs:
1783 1785 tags, tt = self._findtags()
1784 1786 else:
1785 1787 tags = self._tagscache.tags
1786 1788 rev = self.changelog.rev
1787 1789 for k, v in pycompat.iteritems(tags):
1788 1790 try:
1789 1791 # ignore tags to unknown nodes
1790 1792 rev(v)
1791 1793 t[k] = v
1792 1794 except (error.LookupError, ValueError):
1793 1795 pass
1794 1796 return t
1795 1797
1796 1798 def _findtags(self):
1797 1799 '''Do the hard work of finding tags. Return a pair of dicts
1798 1800 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1799 1801 maps tag name to a string like \'global\' or \'local\'.
1800 1802 Subclasses or extensions are free to add their own tags, but
1801 1803 should be aware that the returned dicts will be retained for the
1802 1804 duration of the localrepo object.'''
1803 1805
1804 1806 # XXX what tagtype should subclasses/extensions use? Currently
1805 1807 # mq and bookmarks add tags, but do not set the tagtype at all.
1806 1808 # Should each extension invent its own tag type? Should there
1807 1809 # be one tagtype for all such "virtual" tags? Or is the status
1808 1810 # quo fine?
1809 1811
1810 1812 # map tag name to (node, hist)
1811 1813 alltags = tagsmod.findglobaltags(self.ui, self)
1812 1814 # map tag name to tag type
1813 1815 tagtypes = {tag: b'global' for tag in alltags}
1814 1816
1815 1817 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1816 1818
1817 1819 # Build the return dicts. Have to re-encode tag names because
1818 1820 # the tags module always uses UTF-8 (in order not to lose info
1819 1821 # writing to the cache), but the rest of Mercurial wants them in
1820 1822 # local encoding.
1821 1823 tags = {}
1822 1824 for (name, (node, hist)) in pycompat.iteritems(alltags):
1823 1825 if node != nullid:
1824 1826 tags[encoding.tolocal(name)] = node
1825 1827 tags[b'tip'] = self.changelog.tip()
1826 1828 tagtypes = {
1827 1829 encoding.tolocal(name): value
1828 1830 for (name, value) in pycompat.iteritems(tagtypes)
1829 1831 }
1830 1832 return (tags, tagtypes)
1831 1833
1832 1834 def tagtype(self, tagname):
1833 1835 '''
1834 1836 return the type of the given tag. result can be:
1835 1837
1836 1838 'local' : a local tag
1837 1839 'global' : a global tag
1838 1840 None : tag does not exist
1839 1841 '''
1840 1842
1841 1843 return self._tagscache.tagtypes.get(tagname)
1842 1844
1843 1845 def tagslist(self):
1844 1846 '''return a list of tags ordered by revision'''
1845 1847 if not self._tagscache.tagslist:
1846 1848 l = []
1847 1849 for t, n in pycompat.iteritems(self.tags()):
1848 1850 l.append((self.changelog.rev(n), t, n))
1849 1851 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1850 1852
1851 1853 return self._tagscache.tagslist
1852 1854
1853 1855 def nodetags(self, node):
1854 1856 '''return the tags associated with a node'''
1855 1857 if not self._tagscache.nodetagscache:
1856 1858 nodetagscache = {}
1857 1859 for t, n in pycompat.iteritems(self._tagscache.tags):
1858 1860 nodetagscache.setdefault(n, []).append(t)
1859 1861 for tags in pycompat.itervalues(nodetagscache):
1860 1862 tags.sort()
1861 1863 self._tagscache.nodetagscache = nodetagscache
1862 1864 return self._tagscache.nodetagscache.get(node, [])
1863 1865
1864 1866 def nodebookmarks(self, node):
1865 1867 """return the list of bookmarks pointing to the specified node"""
1866 1868 return self._bookmarks.names(node)
1867 1869
1868 1870 def branchmap(self):
1869 1871 '''returns a dictionary {branch: [branchheads]} with branchheads
1870 1872 ordered by increasing revision number'''
1871 1873 return self._branchcaches[self]
1872 1874
1873 1875 @unfilteredmethod
1874 1876 def revbranchcache(self):
1875 1877 if not self._revbranchcache:
1876 1878 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1877 1879 return self._revbranchcache
1878 1880
1879 1881 def branchtip(self, branch, ignoremissing=False):
1880 1882 '''return the tip node for a given branch
1881 1883
1882 1884 If ignoremissing is True, then this method will not raise an error.
1883 1885 This is helpful for callers that only expect None for a missing branch
1884 1886 (e.g. namespace).
1885 1887
1886 1888 '''
1887 1889 try:
1888 1890 return self.branchmap().branchtip(branch)
1889 1891 except KeyError:
1890 1892 if not ignoremissing:
1891 1893 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1892 1894 else:
1893 1895 pass
1894 1896
1895 1897 def lookup(self, key):
1896 1898 node = scmutil.revsymbol(self, key).node()
1897 1899 if node is None:
1898 1900 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1899 1901 return node
1900 1902
1901 1903 def lookupbranch(self, key):
1902 1904 if self.branchmap().hasbranch(key):
1903 1905 return key
1904 1906
1905 1907 return scmutil.revsymbol(self, key).branch()
1906 1908
1907 1909 def known(self, nodes):
1908 1910 cl = self.changelog
1909 1911 get_rev = cl.index.get_rev
1910 1912 filtered = cl.filteredrevs
1911 1913 result = []
1912 1914 for n in nodes:
1913 1915 r = get_rev(n)
1914 1916 resp = not (r is None or r in filtered)
1915 1917 result.append(resp)
1916 1918 return result
1917 1919
1918 1920 def local(self):
1919 1921 return self
1920 1922
1921 1923 def publishing(self):
1922 1924 # it's safe (and desirable) to trust the publish flag unconditionally
1923 1925 # so that we don't finalize changes shared between users via ssh or nfs
1924 1926 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1925 1927
1926 1928 def cancopy(self):
1927 1929 # so statichttprepo's override of local() works
1928 1930 if not self.local():
1929 1931 return False
1930 1932 if not self.publishing():
1931 1933 return True
1932 1934 # if publishing we can't copy if there is filtered content
1933 1935 return not self.filtered(b'visible').changelog.filteredrevs
1934 1936
1935 1937 def shared(self):
1936 1938 '''the type of shared repository (None if not shared)'''
1937 1939 if self.sharedpath != self.path:
1938 1940 return b'store'
1939 1941 return None
1940 1942
1941 1943 def wjoin(self, f, *insidef):
1942 1944 return self.vfs.reljoin(self.root, f, *insidef)
1943 1945
1944 1946 def setparents(self, p1, p2=nullid):
1945 1947 self[None].setparents(p1, p2)
1946 1948 self._quick_access_changeid_invalidate()
1947 1949
1948 1950 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1949 1951 """changeid must be a changeset revision, if specified.
1950 1952 fileid can be a file revision or node."""
1951 1953 return context.filectx(
1952 1954 self, path, changeid, fileid, changectx=changectx
1953 1955 )
1954 1956
1955 1957 def getcwd(self):
1956 1958 return self.dirstate.getcwd()
1957 1959
1958 1960 def pathto(self, f, cwd=None):
1959 1961 return self.dirstate.pathto(f, cwd)
1960 1962
1961 1963 def _loadfilter(self, filter):
1962 1964 if filter not in self._filterpats:
1963 1965 l = []
1964 1966 for pat, cmd in self.ui.configitems(filter):
1965 1967 if cmd == b'!':
1966 1968 continue
1967 1969 mf = matchmod.match(self.root, b'', [pat])
1968 1970 fn = None
1969 1971 params = cmd
1970 1972 for name, filterfn in pycompat.iteritems(self._datafilters):
1971 1973 if cmd.startswith(name):
1972 1974 fn = filterfn
1973 1975 params = cmd[len(name) :].lstrip()
1974 1976 break
1975 1977 if not fn:
1976 1978 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1977 1979 fn.__name__ = 'commandfilter'
1978 1980 # Wrap old filters not supporting keyword arguments
1979 1981 if not pycompat.getargspec(fn)[2]:
1980 1982 oldfn = fn
1981 1983 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1982 1984 fn.__name__ = 'compat-' + oldfn.__name__
1983 1985 l.append((mf, fn, params))
1984 1986 self._filterpats[filter] = l
1985 1987 return self._filterpats[filter]
1986 1988
1987 1989 def _filter(self, filterpats, filename, data):
1988 1990 for mf, fn, cmd in filterpats:
1989 1991 if mf(filename):
1990 1992 self.ui.debug(
1991 1993 b"filtering %s through %s\n"
1992 1994 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1993 1995 )
1994 1996 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1995 1997 break
1996 1998
1997 1999 return data
1998 2000
1999 2001 @unfilteredpropertycache
2000 2002 def _encodefilterpats(self):
2001 2003 return self._loadfilter(b'encode')
2002 2004
2003 2005 @unfilteredpropertycache
2004 2006 def _decodefilterpats(self):
2005 2007 return self._loadfilter(b'decode')
2006 2008
2007 2009 def adddatafilter(self, name, filter):
2008 2010 self._datafilters[name] = filter
2009 2011
2010 2012 def wread(self, filename):
2011 2013 if self.wvfs.islink(filename):
2012 2014 data = self.wvfs.readlink(filename)
2013 2015 else:
2014 2016 data = self.wvfs.read(filename)
2015 2017 return self._filter(self._encodefilterpats, filename, data)
2016 2018
2017 2019 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2018 2020 """write ``data`` into ``filename`` in the working directory
2019 2021
2020 2022 This returns length of written (maybe decoded) data.
2021 2023 """
2022 2024 data = self._filter(self._decodefilterpats, filename, data)
2023 2025 if b'l' in flags:
2024 2026 self.wvfs.symlink(data, filename)
2025 2027 else:
2026 2028 self.wvfs.write(
2027 2029 filename, data, backgroundclose=backgroundclose, **kwargs
2028 2030 )
2029 2031 if b'x' in flags:
2030 2032 self.wvfs.setflags(filename, False, True)
2031 2033 else:
2032 2034 self.wvfs.setflags(filename, False, False)
2033 2035 return len(data)
2034 2036
2035 2037 def wwritedata(self, filename, data):
2036 2038 return self._filter(self._decodefilterpats, filename, data)
2037 2039
2038 2040 def currenttransaction(self):
2039 2041 """return the current transaction or None if non exists"""
2040 2042 if self._transref:
2041 2043 tr = self._transref()
2042 2044 else:
2043 2045 tr = None
2044 2046
2045 2047 if tr and tr.running():
2046 2048 return tr
2047 2049 return None
2048 2050
2049 2051 def transaction(self, desc, report=None):
2050 2052 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2051 2053 b'devel', b'check-locks'
2052 2054 ):
2053 2055 if self._currentlock(self._lockref) is None:
2054 2056 raise error.ProgrammingError(b'transaction requires locking')
2055 2057 tr = self.currenttransaction()
2056 2058 if tr is not None:
2057 2059 return tr.nest(name=desc)
2058 2060
2059 2061 # abort here if the journal already exists
2060 2062 if self.svfs.exists(b"journal"):
2061 2063 raise error.RepoError(
2062 2064 _(b"abandoned transaction found"),
2063 2065 hint=_(b"run 'hg recover' to clean up transaction"),
2064 2066 )
2065 2067
2066 2068 idbase = b"%.40f#%f" % (random.random(), time.time())
2067 2069 ha = hex(hashutil.sha1(idbase).digest())
2068 2070 txnid = b'TXN:' + ha
2069 2071 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2070 2072
2071 2073 self._writejournal(desc)
2072 2074 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2073 2075 if report:
2074 2076 rp = report
2075 2077 else:
2076 2078 rp = self.ui.warn
2077 2079 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2078 2080 # we must avoid cyclic reference between repo and transaction.
2079 2081 reporef = weakref.ref(self)
2080 2082 # Code to track tag movement
2081 2083 #
2082 2084 # Since tags are all handled as file content, it is actually quite hard
2083 2085 # to track these movement from a code perspective. So we fallback to a
2084 2086 # tracking at the repository level. One could envision to track changes
2085 2087 # to the '.hgtags' file through changegroup apply but that fails to
2086 2088 # cope with case where transaction expose new heads without changegroup
2087 2089 # being involved (eg: phase movement).
2088 2090 #
2089 2091 # For now, We gate the feature behind a flag since this likely comes
2090 2092 # with performance impacts. The current code run more often than needed
2091 2093 # and do not use caches as much as it could. The current focus is on
2092 2094 # the behavior of the feature so we disable it by default. The flag
2093 2095 # will be removed when we are happy with the performance impact.
2094 2096 #
2095 2097 # Once this feature is no longer experimental move the following
2096 2098 # documentation to the appropriate help section:
2097 2099 #
2098 2100 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2099 2101 # tags (new or changed or deleted tags). In addition the details of
2100 2102 # these changes are made available in a file at:
2101 2103 # ``REPOROOT/.hg/changes/tags.changes``.
2102 2104 # Make sure you check for HG_TAG_MOVED before reading that file as it
2103 2105 # might exist from a previous transaction even if no tag were touched
2104 2106 # in this one. Changes are recorded in a line base format::
2105 2107 #
2106 2108 # <action> <hex-node> <tag-name>\n
2107 2109 #
2108 2110 # Actions are defined as follow:
2109 2111 # "-R": tag is removed,
2110 2112 # "+A": tag is added,
2111 2113 # "-M": tag is moved (old value),
2112 2114 # "+M": tag is moved (new value),
2113 2115 tracktags = lambda x: None
2114 2116 # experimental config: experimental.hook-track-tags
2115 2117 shouldtracktags = self.ui.configbool(
2116 2118 b'experimental', b'hook-track-tags'
2117 2119 )
2118 2120 if desc != b'strip' and shouldtracktags:
2119 2121 oldheads = self.changelog.headrevs()
2120 2122
2121 2123 def tracktags(tr2):
2122 2124 repo = reporef()
2123 2125 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2124 2126 newheads = repo.changelog.headrevs()
2125 2127 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2126 2128 # notes: we compare lists here.
2127 2129 # As we do it only once buiding set would not be cheaper
2128 2130 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2129 2131 if changes:
2130 2132 tr2.hookargs[b'tag_moved'] = b'1'
2131 2133 with repo.vfs(
2132 2134 b'changes/tags.changes', b'w', atomictemp=True
2133 2135 ) as changesfile:
2134 2136 # note: we do not register the file to the transaction
2135 2137 # because we needs it to still exist on the transaction
2136 2138 # is close (for txnclose hooks)
2137 2139 tagsmod.writediff(changesfile, changes)
2138 2140
2139 2141 def validate(tr2):
2140 2142 """will run pre-closing hooks"""
2141 2143 # XXX the transaction API is a bit lacking here so we take a hacky
2142 2144 # path for now
2143 2145 #
2144 2146 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2145 2147 # dict is copied before these run. In addition we needs the data
2146 2148 # available to in memory hooks too.
2147 2149 #
2148 2150 # Moreover, we also need to make sure this runs before txnclose
2149 2151 # hooks and there is no "pending" mechanism that would execute
2150 2152 # logic only if hooks are about to run.
2151 2153 #
2152 2154 # Fixing this limitation of the transaction is also needed to track
2153 2155 # other families of changes (bookmarks, phases, obsolescence).
2154 2156 #
2155 2157 # This will have to be fixed before we remove the experimental
2156 2158 # gating.
2157 2159 tracktags(tr2)
2158 2160 repo = reporef()
2159 2161
2160 2162 singleheadopt = (b'experimental', b'single-head-per-branch')
2161 2163 singlehead = repo.ui.configbool(*singleheadopt)
2162 2164 if singlehead:
2163 2165 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2164 2166 accountclosed = singleheadsub.get(
2165 2167 b"account-closed-heads", False
2166 2168 )
2167 2169 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2168 2170 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2169 2171 for name, (old, new) in sorted(
2170 2172 tr.changes[b'bookmarks'].items()
2171 2173 ):
2172 2174 args = tr.hookargs.copy()
2173 2175 args.update(bookmarks.preparehookargs(name, old, new))
2174 2176 repo.hook(
2175 2177 b'pretxnclose-bookmark',
2176 2178 throw=True,
2177 2179 **pycompat.strkwargs(args)
2178 2180 )
2179 2181 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2180 2182 cl = repo.unfiltered().changelog
2181 2183 for revs, (old, new) in tr.changes[b'phases']:
2182 2184 for rev in revs:
2183 2185 args = tr.hookargs.copy()
2184 2186 node = hex(cl.node(rev))
2185 2187 args.update(phases.preparehookargs(node, old, new))
2186 2188 repo.hook(
2187 2189 b'pretxnclose-phase',
2188 2190 throw=True,
2189 2191 **pycompat.strkwargs(args)
2190 2192 )
2191 2193
2192 2194 repo.hook(
2193 2195 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2194 2196 )
2195 2197
2196 2198 def releasefn(tr, success):
2197 2199 repo = reporef()
2198 2200 if repo is None:
2199 2201 # If the repo has been GC'd (and this release function is being
2200 2202 # called from transaction.__del__), there's not much we can do,
2201 2203 # so just leave the unfinished transaction there and let the
2202 2204 # user run `hg recover`.
2203 2205 return
2204 2206 if success:
2205 2207 # this should be explicitly invoked here, because
2206 2208 # in-memory changes aren't written out at closing
2207 2209 # transaction, if tr.addfilegenerator (via
2208 2210 # dirstate.write or so) isn't invoked while
2209 2211 # transaction running
2210 2212 repo.dirstate.write(None)
2211 2213 else:
2212 2214 # discard all changes (including ones already written
2213 2215 # out) in this transaction
2214 2216 narrowspec.restorebackup(self, b'journal.narrowspec')
2215 2217 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2216 2218 repo.dirstate.restorebackup(None, b'journal.dirstate')
2217 2219
2218 2220 repo.invalidate(clearfilecache=True)
2219 2221
2220 2222 tr = transaction.transaction(
2221 2223 rp,
2222 2224 self.svfs,
2223 2225 vfsmap,
2224 2226 b"journal",
2225 2227 b"undo",
2226 2228 aftertrans(renames),
2227 2229 self.store.createmode,
2228 2230 validator=validate,
2229 2231 releasefn=releasefn,
2230 2232 checkambigfiles=_cachedfiles,
2231 2233 name=desc,
2232 2234 )
2233 2235 tr.changes[b'origrepolen'] = len(self)
2234 2236 tr.changes[b'obsmarkers'] = set()
2235 2237 tr.changes[b'phases'] = []
2236 2238 tr.changes[b'bookmarks'] = {}
2237 2239
2238 2240 tr.hookargs[b'txnid'] = txnid
2239 2241 tr.hookargs[b'txnname'] = desc
2240 2242 # note: writing the fncache only during finalize mean that the file is
2241 2243 # outdated when running hooks. As fncache is used for streaming clone,
2242 2244 # this is not expected to break anything that happen during the hooks.
2243 2245 tr.addfinalize(b'flush-fncache', self.store.write)
2244 2246
2245 2247 def txnclosehook(tr2):
2246 2248 """To be run if transaction is successful, will schedule a hook run
2247 2249 """
2248 2250 # Don't reference tr2 in hook() so we don't hold a reference.
2249 2251 # This reduces memory consumption when there are multiple
2250 2252 # transactions per lock. This can likely go away if issue5045
2251 2253 # fixes the function accumulation.
2252 2254 hookargs = tr2.hookargs
2253 2255
2254 2256 def hookfunc(unused_success):
2255 2257 repo = reporef()
2256 2258 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2257 2259 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2258 2260 for name, (old, new) in bmchanges:
2259 2261 args = tr.hookargs.copy()
2260 2262 args.update(bookmarks.preparehookargs(name, old, new))
2261 2263 repo.hook(
2262 2264 b'txnclose-bookmark',
2263 2265 throw=False,
2264 2266 **pycompat.strkwargs(args)
2265 2267 )
2266 2268
2267 2269 if hook.hashook(repo.ui, b'txnclose-phase'):
2268 2270 cl = repo.unfiltered().changelog
2269 2271 phasemv = sorted(
2270 2272 tr.changes[b'phases'], key=lambda r: r[0][0]
2271 2273 )
2272 2274 for revs, (old, new) in phasemv:
2273 2275 for rev in revs:
2274 2276 args = tr.hookargs.copy()
2275 2277 node = hex(cl.node(rev))
2276 2278 args.update(phases.preparehookargs(node, old, new))
2277 2279 repo.hook(
2278 2280 b'txnclose-phase',
2279 2281 throw=False,
2280 2282 **pycompat.strkwargs(args)
2281 2283 )
2282 2284
2283 2285 repo.hook(
2284 2286 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2285 2287 )
2286 2288
2287 2289 reporef()._afterlock(hookfunc)
2288 2290
2289 2291 tr.addfinalize(b'txnclose-hook', txnclosehook)
2290 2292 # Include a leading "-" to make it happen before the transaction summary
2291 2293 # reports registered via scmutil.registersummarycallback() whose names
2292 2294 # are 00-txnreport etc. That way, the caches will be warm when the
2293 2295 # callbacks run.
2294 2296 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2295 2297
2296 2298 def txnaborthook(tr2):
2297 2299 """To be run if transaction is aborted
2298 2300 """
2299 2301 reporef().hook(
2300 2302 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2301 2303 )
2302 2304
2303 2305 tr.addabort(b'txnabort-hook', txnaborthook)
2304 2306 # avoid eager cache invalidation. in-memory data should be identical
2305 2307 # to stored data if transaction has no error.
2306 2308 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2307 2309 self._transref = weakref.ref(tr)
2308 2310 scmutil.registersummarycallback(self, tr, desc)
2309 2311 return tr
2310 2312
2311 2313 def _journalfiles(self):
2312 2314 return (
2313 2315 (self.svfs, b'journal'),
2314 2316 (self.svfs, b'journal.narrowspec'),
2315 2317 (self.vfs, b'journal.narrowspec.dirstate'),
2316 2318 (self.vfs, b'journal.dirstate'),
2317 2319 (self.vfs, b'journal.branch'),
2318 2320 (self.vfs, b'journal.desc'),
2319 2321 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2320 2322 (self.svfs, b'journal.phaseroots'),
2321 2323 )
2322 2324
2323 2325 def undofiles(self):
2324 2326 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2325 2327
2326 2328 @unfilteredmethod
2327 2329 def _writejournal(self, desc):
2328 2330 self.dirstate.savebackup(None, b'journal.dirstate')
2329 2331 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2330 2332 narrowspec.savebackup(self, b'journal.narrowspec')
2331 2333 self.vfs.write(
2332 2334 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2333 2335 )
2334 2336 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2335 2337 bookmarksvfs = bookmarks.bookmarksvfs(self)
2336 2338 bookmarksvfs.write(
2337 2339 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2338 2340 )
2339 2341 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2340 2342
2341 2343 def recover(self):
2342 2344 with self.lock():
2343 2345 if self.svfs.exists(b"journal"):
2344 2346 self.ui.status(_(b"rolling back interrupted transaction\n"))
2345 2347 vfsmap = {
2346 2348 b'': self.svfs,
2347 2349 b'plain': self.vfs,
2348 2350 }
2349 2351 transaction.rollback(
2350 2352 self.svfs,
2351 2353 vfsmap,
2352 2354 b"journal",
2353 2355 self.ui.warn,
2354 2356 checkambigfiles=_cachedfiles,
2355 2357 )
2356 2358 self.invalidate()
2357 2359 return True
2358 2360 else:
2359 2361 self.ui.warn(_(b"no interrupted transaction available\n"))
2360 2362 return False
2361 2363
2362 2364 def rollback(self, dryrun=False, force=False):
2363 2365 wlock = lock = dsguard = None
2364 2366 try:
2365 2367 wlock = self.wlock()
2366 2368 lock = self.lock()
2367 2369 if self.svfs.exists(b"undo"):
2368 2370 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2369 2371
2370 2372 return self._rollback(dryrun, force, dsguard)
2371 2373 else:
2372 2374 self.ui.warn(_(b"no rollback information available\n"))
2373 2375 return 1
2374 2376 finally:
2375 2377 release(dsguard, lock, wlock)
2376 2378
2377 2379 @unfilteredmethod # Until we get smarter cache management
2378 2380 def _rollback(self, dryrun, force, dsguard):
2379 2381 ui = self.ui
2380 2382 try:
2381 2383 args = self.vfs.read(b'undo.desc').splitlines()
2382 2384 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2383 2385 if len(args) >= 3:
2384 2386 detail = args[2]
2385 2387 oldtip = oldlen - 1
2386 2388
2387 2389 if detail and ui.verbose:
2388 2390 msg = _(
2389 2391 b'repository tip rolled back to revision %d'
2390 2392 b' (undo %s: %s)\n'
2391 2393 ) % (oldtip, desc, detail)
2392 2394 else:
2393 2395 msg = _(
2394 2396 b'repository tip rolled back to revision %d (undo %s)\n'
2395 2397 ) % (oldtip, desc)
2396 2398 except IOError:
2397 2399 msg = _(b'rolling back unknown transaction\n')
2398 2400 desc = None
2399 2401
2400 2402 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2401 2403 raise error.Abort(
2402 2404 _(
2403 2405 b'rollback of last commit while not checked out '
2404 2406 b'may lose data'
2405 2407 ),
2406 2408 hint=_(b'use -f to force'),
2407 2409 )
2408 2410
2409 2411 ui.status(msg)
2410 2412 if dryrun:
2411 2413 return 0
2412 2414
2413 2415 parents = self.dirstate.parents()
2414 2416 self.destroying()
2415 2417 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2416 2418 transaction.rollback(
2417 2419 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2418 2420 )
2419 2421 bookmarksvfs = bookmarks.bookmarksvfs(self)
2420 2422 if bookmarksvfs.exists(b'undo.bookmarks'):
2421 2423 bookmarksvfs.rename(
2422 2424 b'undo.bookmarks', b'bookmarks', checkambig=True
2423 2425 )
2424 2426 if self.svfs.exists(b'undo.phaseroots'):
2425 2427 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2426 2428 self.invalidate()
2427 2429
2428 2430 has_node = self.changelog.index.has_node
2429 2431 parentgone = any(not has_node(p) for p in parents)
2430 2432 if parentgone:
2431 2433 # prevent dirstateguard from overwriting already restored one
2432 2434 dsguard.close()
2433 2435
2434 2436 narrowspec.restorebackup(self, b'undo.narrowspec')
2435 2437 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2436 2438 self.dirstate.restorebackup(None, b'undo.dirstate')
2437 2439 try:
2438 2440 branch = self.vfs.read(b'undo.branch')
2439 2441 self.dirstate.setbranch(encoding.tolocal(branch))
2440 2442 except IOError:
2441 2443 ui.warn(
2442 2444 _(
2443 2445 b'named branch could not be reset: '
2444 2446 b'current branch is still \'%s\'\n'
2445 2447 )
2446 2448 % self.dirstate.branch()
2447 2449 )
2448 2450
2449 2451 parents = tuple([p.rev() for p in self[None].parents()])
2450 2452 if len(parents) > 1:
2451 2453 ui.status(
2452 2454 _(
2453 2455 b'working directory now based on '
2454 2456 b'revisions %d and %d\n'
2455 2457 )
2456 2458 % parents
2457 2459 )
2458 2460 else:
2459 2461 ui.status(
2460 2462 _(b'working directory now based on revision %d\n') % parents
2461 2463 )
2462 2464 mergemod.mergestate.clean(self, self[b'.'].node())
2463 2465
2464 2466 # TODO: if we know which new heads may result from this rollback, pass
2465 2467 # them to destroy(), which will prevent the branchhead cache from being
2466 2468 # invalidated.
2467 2469 self.destroyed()
2468 2470 return 0
2469 2471
2470 2472 def _buildcacheupdater(self, newtransaction):
2471 2473 """called during transaction to build the callback updating cache
2472 2474
2473 2475 Lives on the repository to help extension who might want to augment
2474 2476 this logic. For this purpose, the created transaction is passed to the
2475 2477 method.
2476 2478 """
2477 2479 # we must avoid cyclic reference between repo and transaction.
2478 2480 reporef = weakref.ref(self)
2479 2481
2480 2482 def updater(tr):
2481 2483 repo = reporef()
2482 2484 repo.updatecaches(tr)
2483 2485
2484 2486 return updater
2485 2487
2486 2488 @unfilteredmethod
2487 2489 def updatecaches(self, tr=None, full=False):
2488 2490 """warm appropriate caches
2489 2491
2490 2492 If this function is called after a transaction closed. The transaction
2491 2493 will be available in the 'tr' argument. This can be used to selectively
2492 2494 update caches relevant to the changes in that transaction.
2493 2495
2494 2496 If 'full' is set, make sure all caches the function knows about have
2495 2497 up-to-date data. Even the ones usually loaded more lazily.
2496 2498 """
2497 2499 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2498 2500 # During strip, many caches are invalid but
2499 2501 # later call to `destroyed` will refresh them.
2500 2502 return
2501 2503
2502 2504 if tr is None or tr.changes[b'origrepolen'] < len(self):
2503 2505 # accessing the 'ser ved' branchmap should refresh all the others,
2504 2506 self.ui.debug(b'updating the branch cache\n')
2505 2507 self.filtered(b'served').branchmap()
2506 2508 self.filtered(b'served.hidden').branchmap()
2507 2509
2508 2510 if full:
2509 2511 unfi = self.unfiltered()
2510 2512
2511 2513 self.changelog.update_caches(transaction=tr)
2512 2514 self.manifestlog.update_caches(transaction=tr)
2513 2515
2514 2516 rbc = unfi.revbranchcache()
2515 2517 for r in unfi.changelog:
2516 2518 rbc.branchinfo(r)
2517 2519 rbc.write()
2518 2520
2519 2521 # ensure the working copy parents are in the manifestfulltextcache
2520 2522 for ctx in self[b'.'].parents():
2521 2523 ctx.manifest() # accessing the manifest is enough
2522 2524
2523 2525 # accessing fnode cache warms the cache
2524 2526 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2525 2527 # accessing tags warm the cache
2526 2528 self.tags()
2527 2529 self.filtered(b'served').tags()
2528 2530
2529 2531 # The `full` arg is documented as updating even the lazily-loaded
2530 2532 # caches immediately, so we're forcing a write to cause these caches
2531 2533 # to be warmed up even if they haven't explicitly been requested
2532 2534 # yet (if they've never been used by hg, they won't ever have been
2533 2535 # written, even if they're a subset of another kind of cache that
2534 2536 # *has* been used).
2535 2537 for filt in repoview.filtertable.keys():
2536 2538 filtered = self.filtered(filt)
2537 2539 filtered.branchmap().write(filtered)
2538 2540
2539 2541 def invalidatecaches(self):
2540 2542
2541 2543 if '_tagscache' in vars(self):
2542 2544 # can't use delattr on proxy
2543 2545 del self.__dict__['_tagscache']
2544 2546
2545 2547 self._branchcaches.clear()
2546 2548 self.invalidatevolatilesets()
2547 2549 self._sparsesignaturecache.clear()
2548 2550
2549 2551 def invalidatevolatilesets(self):
2550 2552 self.filteredrevcache.clear()
2551 2553 obsolete.clearobscaches(self)
2552 2554 self._quick_access_changeid_invalidate()
2553 2555
2554 2556 def invalidatedirstate(self):
2555 2557 '''Invalidates the dirstate, causing the next call to dirstate
2556 2558 to check if it was modified since the last time it was read,
2557 2559 rereading it if it has.
2558 2560
2559 2561 This is different to dirstate.invalidate() that it doesn't always
2560 2562 rereads the dirstate. Use dirstate.invalidate() if you want to
2561 2563 explicitly read the dirstate again (i.e. restoring it to a previous
2562 2564 known good state).'''
2563 2565 if hasunfilteredcache(self, 'dirstate'):
2564 2566 for k in self.dirstate._filecache:
2565 2567 try:
2566 2568 delattr(self.dirstate, k)
2567 2569 except AttributeError:
2568 2570 pass
2569 2571 delattr(self.unfiltered(), 'dirstate')
2570 2572
2571 2573 def invalidate(self, clearfilecache=False):
2572 2574 '''Invalidates both store and non-store parts other than dirstate
2573 2575
2574 2576 If a transaction is running, invalidation of store is omitted,
2575 2577 because discarding in-memory changes might cause inconsistency
2576 2578 (e.g. incomplete fncache causes unintentional failure, but
2577 2579 redundant one doesn't).
2578 2580 '''
2579 2581 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2580 2582 for k in list(self._filecache.keys()):
2581 2583 # dirstate is invalidated separately in invalidatedirstate()
2582 2584 if k == b'dirstate':
2583 2585 continue
2584 2586 if (
2585 2587 k == b'changelog'
2586 2588 and self.currenttransaction()
2587 2589 and self.changelog._delayed
2588 2590 ):
2589 2591 # The changelog object may store unwritten revisions. We don't
2590 2592 # want to lose them.
2591 2593 # TODO: Solve the problem instead of working around it.
2592 2594 continue
2593 2595
2594 2596 if clearfilecache:
2595 2597 del self._filecache[k]
2596 2598 try:
2597 2599 delattr(unfiltered, k)
2598 2600 except AttributeError:
2599 2601 pass
2600 2602 self.invalidatecaches()
2601 2603 if not self.currenttransaction():
2602 2604 # TODO: Changing contents of store outside transaction
2603 2605 # causes inconsistency. We should make in-memory store
2604 2606 # changes detectable, and abort if changed.
2605 2607 self.store.invalidatecaches()
2606 2608
2607 2609 def invalidateall(self):
2608 2610 '''Fully invalidates both store and non-store parts, causing the
2609 2611 subsequent operation to reread any outside changes.'''
2610 2612 # extension should hook this to invalidate its caches
2611 2613 self.invalidate()
2612 2614 self.invalidatedirstate()
2613 2615
2614 2616 @unfilteredmethod
2615 2617 def _refreshfilecachestats(self, tr):
2616 2618 """Reload stats of cached files so that they are flagged as valid"""
2617 2619 for k, ce in self._filecache.items():
2618 2620 k = pycompat.sysstr(k)
2619 2621 if k == 'dirstate' or k not in self.__dict__:
2620 2622 continue
2621 2623 ce.refresh()
2622 2624
2623 2625 def _lock(
2624 2626 self,
2625 2627 vfs,
2626 2628 lockname,
2627 2629 wait,
2628 2630 releasefn,
2629 2631 acquirefn,
2630 2632 desc,
2631 2633 inheritchecker=None,
2632 2634 parentenvvar=None,
2633 2635 ):
2634 2636 parentlock = None
2635 2637 # the contents of parentenvvar are used by the underlying lock to
2636 2638 # determine whether it can be inherited
2637 2639 if parentenvvar is not None:
2638 2640 parentlock = encoding.environ.get(parentenvvar)
2639 2641
2640 2642 timeout = 0
2641 2643 warntimeout = 0
2642 2644 if wait:
2643 2645 timeout = self.ui.configint(b"ui", b"timeout")
2644 2646 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2645 2647 # internal config: ui.signal-safe-lock
2646 2648 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2647 2649
2648 2650 l = lockmod.trylock(
2649 2651 self.ui,
2650 2652 vfs,
2651 2653 lockname,
2652 2654 timeout,
2653 2655 warntimeout,
2654 2656 releasefn=releasefn,
2655 2657 acquirefn=acquirefn,
2656 2658 desc=desc,
2657 2659 inheritchecker=inheritchecker,
2658 2660 parentlock=parentlock,
2659 2661 signalsafe=signalsafe,
2660 2662 )
2661 2663 return l
2662 2664
2663 2665 def _afterlock(self, callback):
2664 2666 """add a callback to be run when the repository is fully unlocked
2665 2667
2666 2668 The callback will be executed when the outermost lock is released
2667 2669 (with wlock being higher level than 'lock')."""
2668 2670 for ref in (self._wlockref, self._lockref):
2669 2671 l = ref and ref()
2670 2672 if l and l.held:
2671 2673 l.postrelease.append(callback)
2672 2674 break
2673 2675 else: # no lock have been found.
2674 2676 callback(True)
2675 2677
2676 2678 def lock(self, wait=True):
2677 2679 '''Lock the repository store (.hg/store) and return a weak reference
2678 2680 to the lock. Use this before modifying the store (e.g. committing or
2679 2681 stripping). If you are opening a transaction, get a lock as well.)
2680 2682
2681 2683 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2682 2684 'wlock' first to avoid a dead-lock hazard.'''
2683 2685 l = self._currentlock(self._lockref)
2684 2686 if l is not None:
2685 2687 l.lock()
2686 2688 return l
2687 2689
2688 2690 l = self._lock(
2689 2691 vfs=self.svfs,
2690 2692 lockname=b"lock",
2691 2693 wait=wait,
2692 2694 releasefn=None,
2693 2695 acquirefn=self.invalidate,
2694 2696 desc=_(b'repository %s') % self.origroot,
2695 2697 )
2696 2698 self._lockref = weakref.ref(l)
2697 2699 return l
2698 2700
2699 2701 def _wlockchecktransaction(self):
2700 2702 if self.currenttransaction() is not None:
2701 2703 raise error.LockInheritanceContractViolation(
2702 2704 b'wlock cannot be inherited in the middle of a transaction'
2703 2705 )
2704 2706
2705 2707 def wlock(self, wait=True):
2706 2708 '''Lock the non-store parts of the repository (everything under
2707 2709 .hg except .hg/store) and return a weak reference to the lock.
2708 2710
2709 2711 Use this before modifying files in .hg.
2710 2712
2711 2713 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2712 2714 'wlock' first to avoid a dead-lock hazard.'''
2713 2715 l = self._wlockref and self._wlockref()
2714 2716 if l is not None and l.held:
2715 2717 l.lock()
2716 2718 return l
2717 2719
2718 2720 # We do not need to check for non-waiting lock acquisition. Such
2719 2721 # acquisition would not cause dead-lock as they would just fail.
2720 2722 if wait and (
2721 2723 self.ui.configbool(b'devel', b'all-warnings')
2722 2724 or self.ui.configbool(b'devel', b'check-locks')
2723 2725 ):
2724 2726 if self._currentlock(self._lockref) is not None:
2725 2727 self.ui.develwarn(b'"wlock" acquired after "lock"')
2726 2728
2727 2729 def unlock():
2728 2730 if self.dirstate.pendingparentchange():
2729 2731 self.dirstate.invalidate()
2730 2732 else:
2731 2733 self.dirstate.write(None)
2732 2734
2733 2735 self._filecache[b'dirstate'].refresh()
2734 2736
2735 2737 l = self._lock(
2736 2738 self.vfs,
2737 2739 b"wlock",
2738 2740 wait,
2739 2741 unlock,
2740 2742 self.invalidatedirstate,
2741 2743 _(b'working directory of %s') % self.origroot,
2742 2744 inheritchecker=self._wlockchecktransaction,
2743 2745 parentenvvar=b'HG_WLOCK_LOCKER',
2744 2746 )
2745 2747 self._wlockref = weakref.ref(l)
2746 2748 return l
2747 2749
2748 2750 def _currentlock(self, lockref):
2749 2751 """Returns the lock if it's held, or None if it's not."""
2750 2752 if lockref is None:
2751 2753 return None
2752 2754 l = lockref()
2753 2755 if l is None or not l.held:
2754 2756 return None
2755 2757 return l
2756 2758
2757 2759 def currentwlock(self):
2758 2760 """Returns the wlock if it's held, or None if it's not."""
2759 2761 return self._currentlock(self._wlockref)
2760 2762
2761 2763 def _filecommit(
2762 2764 self,
2763 2765 fctx,
2764 2766 manifest1,
2765 2767 manifest2,
2766 2768 linkrev,
2767 2769 tr,
2768 2770 changelist,
2769 2771 includecopymeta,
2770 2772 ):
2771 2773 """
2772 2774 commit an individual file as part of a larger transaction
2773 2775 """
2774 2776
2775 2777 fname = fctx.path()
2776 2778 fparent1 = manifest1.get(fname, nullid)
2777 2779 fparent2 = manifest2.get(fname, nullid)
2778 2780 if isinstance(fctx, context.filectx):
2779 2781 node = fctx.filenode()
2780 2782 if node in [fparent1, fparent2]:
2781 2783 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2782 2784 if (
2783 2785 fparent1 != nullid
2784 2786 and manifest1.flags(fname) != fctx.flags()
2785 2787 ) or (
2786 2788 fparent2 != nullid
2787 2789 and manifest2.flags(fname) != fctx.flags()
2788 2790 ):
2789 2791 changelist.append(fname)
2790 2792 return node
2791 2793
2792 2794 flog = self.file(fname)
2793 2795 meta = {}
2794 2796 cfname = fctx.copysource()
2795 2797 if cfname and cfname != fname:
2796 2798 # Mark the new revision of this file as a copy of another
2797 2799 # file. This copy data will effectively act as a parent
2798 2800 # of this new revision. If this is a merge, the first
2799 2801 # parent will be the nullid (meaning "look up the copy data")
2800 2802 # and the second one will be the other parent. For example:
2801 2803 #
2802 2804 # 0 --- 1 --- 3 rev1 changes file foo
2803 2805 # \ / rev2 renames foo to bar and changes it
2804 2806 # \- 2 -/ rev3 should have bar with all changes and
2805 2807 # should record that bar descends from
2806 2808 # bar in rev2 and foo in rev1
2807 2809 #
2808 2810 # this allows this merge to succeed:
2809 2811 #
2810 2812 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2811 2813 # \ / merging rev3 and rev4 should use bar@rev2
2812 2814 # \- 2 --- 4 as the merge base
2813 2815 #
2814 2816
2815 2817 cnode = manifest1.get(cfname)
2816 2818 newfparent = fparent2
2817 2819
2818 2820 if manifest2: # branch merge
2819 2821 if fparent2 == nullid or cnode is None: # copied on remote side
2820 2822 if cfname in manifest2:
2821 2823 cnode = manifest2[cfname]
2822 2824 newfparent = fparent1
2823 2825
2824 2826 # Here, we used to search backwards through history to try to find
2825 2827 # where the file copy came from if the source of a copy was not in
2826 2828 # the parent directory. However, this doesn't actually make sense to
2827 2829 # do (what does a copy from something not in your working copy even
2828 2830 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2829 2831 # the user that copy information was dropped, so if they didn't
2830 2832 # expect this outcome it can be fixed, but this is the correct
2831 2833 # behavior in this circumstance.
2832 2834
2833 2835 if cnode:
2834 2836 self.ui.debug(
2835 2837 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2836 2838 )
2837 2839 if includecopymeta:
2838 2840 meta[b"copy"] = cfname
2839 2841 meta[b"copyrev"] = hex(cnode)
2840 2842 fparent1, fparent2 = nullid, newfparent
2841 2843 else:
2842 2844 self.ui.warn(
2843 2845 _(
2844 2846 b"warning: can't find ancestor for '%s' "
2845 2847 b"copied from '%s'!\n"
2846 2848 )
2847 2849 % (fname, cfname)
2848 2850 )
2849 2851
2850 2852 elif fparent1 == nullid:
2851 2853 fparent1, fparent2 = fparent2, nullid
2852 2854 elif fparent2 != nullid:
2853 2855 # is one parent an ancestor of the other?
2854 2856 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2855 2857 if fparent1 in fparentancestors:
2856 2858 fparent1, fparent2 = fparent2, nullid
2857 2859 elif fparent2 in fparentancestors:
2858 2860 fparent2 = nullid
2859 2861 elif not fparentancestors:
2860 2862 # TODO: this whole if-else might be simplified much more
2861 2863 ms = mergemod.mergestate.read(self)
2862 2864 if (
2863 2865 fname in ms
2864 2866 and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
2865 2867 ):
2866 2868 fparent1, fparent2 = fparent2, nullid
2867 2869
2868 2870 # is the file changed?
2869 2871 text = fctx.data()
2870 2872 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2871 2873 changelist.append(fname)
2872 2874 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2873 2875 # are just the flags changed during merge?
2874 2876 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2875 2877 changelist.append(fname)
2876 2878
2877 2879 return fparent1
2878 2880
2879 2881 def checkcommitpatterns(self, wctx, match, status, fail):
2880 2882 """check for commit arguments that aren't committable"""
2881 2883 if match.isexact() or match.prefix():
2882 2884 matched = set(status.modified + status.added + status.removed)
2883 2885
2884 2886 for f in match.files():
2885 2887 f = self.dirstate.normalize(f)
2886 2888 if f == b'.' or f in matched or f in wctx.substate:
2887 2889 continue
2888 2890 if f in status.deleted:
2889 2891 fail(f, _(b'file not found!'))
2890 2892 # Is it a directory that exists or used to exist?
2891 2893 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2892 2894 d = f + b'/'
2893 2895 for mf in matched:
2894 2896 if mf.startswith(d):
2895 2897 break
2896 2898 else:
2897 2899 fail(f, _(b"no match under directory!"))
2898 2900 elif f not in self.dirstate:
2899 2901 fail(f, _(b"file not tracked!"))
2900 2902
2901 2903 @unfilteredmethod
2902 2904 def commit(
2903 2905 self,
2904 2906 text=b"",
2905 2907 user=None,
2906 2908 date=None,
2907 2909 match=None,
2908 2910 force=False,
2909 2911 editor=None,
2910 2912 extra=None,
2911 2913 ):
2912 2914 """Add a new revision to current repository.
2913 2915
2914 2916 Revision information is gathered from the working directory,
2915 2917 match can be used to filter the committed files. If editor is
2916 2918 supplied, it is called to get a commit message.
2917 2919 """
2918 2920 if extra is None:
2919 2921 extra = {}
2920 2922
2921 2923 def fail(f, msg):
2922 2924 raise error.Abort(b'%s: %s' % (f, msg))
2923 2925
2924 2926 if not match:
2925 2927 match = matchmod.always()
2926 2928
2927 2929 if not force:
2928 2930 match.bad = fail
2929 2931
2930 2932 # lock() for recent changelog (see issue4368)
2931 2933 with self.wlock(), self.lock():
2932 2934 wctx = self[None]
2933 2935 merge = len(wctx.parents()) > 1
2934 2936
2935 2937 if not force and merge and not match.always():
2936 2938 raise error.Abort(
2937 2939 _(
2938 2940 b'cannot partially commit a merge '
2939 2941 b'(do not specify files or patterns)'
2940 2942 )
2941 2943 )
2942 2944
2943 2945 status = self.status(match=match, clean=force)
2944 2946 if force:
2945 2947 status.modified.extend(
2946 2948 status.clean
2947 2949 ) # mq may commit clean files
2948 2950
2949 2951 # check subrepos
2950 2952 subs, commitsubs, newstate = subrepoutil.precommit(
2951 2953 self.ui, wctx, status, match, force=force
2952 2954 )
2953 2955
2954 2956 # make sure all explicit patterns are matched
2955 2957 if not force:
2956 2958 self.checkcommitpatterns(wctx, match, status, fail)
2957 2959
2958 2960 cctx = context.workingcommitctx(
2959 2961 self, status, text, user, date, extra
2960 2962 )
2961 2963
2962 2964 ms = mergemod.mergestate.read(self)
2963 2965 mergeutil.checkunresolved(ms)
2964 2966
2965 2967 # internal config: ui.allowemptycommit
2966 2968 allowemptycommit = (
2967 2969 wctx.branch() != wctx.p1().branch()
2968 2970 or extra.get(b'close')
2969 2971 or merge
2970 2972 or cctx.files()
2971 2973 or self.ui.configbool(b'ui', b'allowemptycommit')
2972 2974 )
2973 2975 if not allowemptycommit:
2974 2976 self.ui.debug(b'nothing to commit, clearing merge state\n')
2975 2977 ms.reset()
2976 2978 return None
2977 2979
2978 2980 if merge and cctx.deleted():
2979 2981 raise error.Abort(_(b"cannot commit merge with missing files"))
2980 2982
2981 2983 if editor:
2982 2984 cctx._text = editor(self, cctx, subs)
2983 2985 edited = text != cctx._text
2984 2986
2985 2987 # Save commit message in case this transaction gets rolled back
2986 2988 # (e.g. by a pretxncommit hook). Leave the content alone on
2987 2989 # the assumption that the user will use the same editor again.
2988 2990 msgfn = self.savecommitmessage(cctx._text)
2989 2991
2990 2992 # commit subs and write new state
2991 2993 if subs:
2992 2994 uipathfn = scmutil.getuipathfn(self)
2993 2995 for s in sorted(commitsubs):
2994 2996 sub = wctx.sub(s)
2995 2997 self.ui.status(
2996 2998 _(b'committing subrepository %s\n')
2997 2999 % uipathfn(subrepoutil.subrelpath(sub))
2998 3000 )
2999 3001 sr = sub.commit(cctx._text, user, date)
3000 3002 newstate[s] = (newstate[s][0], sr)
3001 3003 subrepoutil.writestate(self, newstate)
3002 3004
3003 3005 p1, p2 = self.dirstate.parents()
3004 3006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3005 3007 try:
3006 3008 self.hook(
3007 3009 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3008 3010 )
3009 3011 with self.transaction(b'commit'):
3010 3012 ret = self.commitctx(cctx, True)
3011 3013 # update bookmarks, dirstate and mergestate
3012 3014 bookmarks.update(self, [p1, p2], ret)
3013 3015 cctx.markcommitted(ret)
3014 3016 ms.reset()
3015 3017 except: # re-raises
3016 3018 if edited:
3017 3019 self.ui.write(
3018 3020 _(b'note: commit message saved in %s\n') % msgfn
3019 3021 )
3020 3022 self.ui.write(
3021 3023 _(
3022 3024 b"note: use 'hg commit --logfile "
3023 3025 b".hg/last-message.txt --edit' to reuse it\n"
3024 3026 )
3025 3027 )
3026 3028 raise
3027 3029
3028 3030 def commithook(unused_success):
3029 3031 # hack for command that use a temporary commit (eg: histedit)
3030 3032 # temporary commit got stripped before hook release
3031 3033 if self.changelog.hasnode(ret):
3032 3034 self.hook(
3033 3035 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3034 3036 )
3035 3037
3036 3038 self._afterlock(commithook)
3037 3039 return ret
3038 3040
3039 3041 @unfilteredmethod
3040 3042 def commitctx(self, ctx, error=False, origctx=None):
3041 3043 """Add a new revision to current repository.
3042 3044 Revision information is passed via the context argument.
3043 3045
3044 3046 ctx.files() should list all files involved in this commit, i.e.
3045 3047 modified/added/removed files. On merge, it may be wider than the
3046 3048 ctx.files() to be committed, since any file nodes derived directly
3047 3049 from p1 or p2 are excluded from the committed ctx.files().
3048 3050
3049 3051 origctx is for convert to work around the problem that bug
3050 3052 fixes to the files list in changesets change hashes. For
3051 3053 convert to be the identity, it can pass an origctx and this
3052 3054 function will use the same files list when it makes sense to
3053 3055 do so.
3054 3056 """
3055 3057
3056 3058 p1, p2 = ctx.p1(), ctx.p2()
3057 3059 user = ctx.user()
3058 3060
3059 3061 if self.filecopiesmode == b'changeset-sidedata':
3060 3062 writechangesetcopy = True
3061 3063 writefilecopymeta = True
3062 3064 writecopiesto = None
3063 3065 else:
3064 3066 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3065 3067 writefilecopymeta = writecopiesto != b'changeset-only'
3066 3068 writechangesetcopy = writecopiesto in (
3067 3069 b'changeset-only',
3068 3070 b'compatibility',
3069 3071 )
3070 3072 p1copies, p2copies = None, None
3071 3073 if writechangesetcopy:
3072 3074 p1copies = ctx.p1copies()
3073 3075 p2copies = ctx.p2copies()
3074 3076 filesadded, filesremoved = None, None
3075 3077 with self.lock(), self.transaction(b"commit") as tr:
3076 3078 trp = weakref.proxy(tr)
3077 3079
3078 3080 if ctx.manifestnode():
3079 3081 # reuse an existing manifest revision
3080 3082 self.ui.debug(b'reusing known manifest\n')
3081 3083 mn = ctx.manifestnode()
3082 3084 files = ctx.files()
3083 3085 if writechangesetcopy:
3084 3086 filesadded = ctx.filesadded()
3085 3087 filesremoved = ctx.filesremoved()
3086 3088 elif ctx.files():
3087 3089 m1ctx = p1.manifestctx()
3088 3090 m2ctx = p2.manifestctx()
3089 3091 mctx = m1ctx.copy()
3090 3092
3091 3093 m = mctx.read()
3092 3094 m1 = m1ctx.read()
3093 3095 m2 = m2ctx.read()
3094 3096
3095 3097 # check in files
3096 3098 added = []
3097 3099 changed = []
3098 3100 removed = list(ctx.removed())
3099 3101 linkrev = len(self)
3100 3102 self.ui.note(_(b"committing files:\n"))
3101 3103 uipathfn = scmutil.getuipathfn(self)
3102 3104 for f in sorted(ctx.modified() + ctx.added()):
3103 3105 self.ui.note(uipathfn(f) + b"\n")
3104 3106 try:
3105 3107 fctx = ctx[f]
3106 3108 if fctx is None:
3107 3109 removed.append(f)
3108 3110 else:
3109 3111 added.append(f)
3110 3112 m[f] = self._filecommit(
3111 3113 fctx,
3112 3114 m1,
3113 3115 m2,
3114 3116 linkrev,
3115 3117 trp,
3116 3118 changed,
3117 3119 writefilecopymeta,
3118 3120 )
3119 3121 m.setflag(f, fctx.flags())
3120 3122 except OSError:
3121 3123 self.ui.warn(
3122 3124 _(b"trouble committing %s!\n") % uipathfn(f)
3123 3125 )
3124 3126 raise
3125 3127 except IOError as inst:
3126 3128 errcode = getattr(inst, 'errno', errno.ENOENT)
3127 3129 if error or errcode and errcode != errno.ENOENT:
3128 3130 self.ui.warn(
3129 3131 _(b"trouble committing %s!\n") % uipathfn(f)
3130 3132 )
3131 3133 raise
3132 3134
3133 3135 # update manifest
3134 3136 removed = [f for f in removed if f in m1 or f in m2]
3135 3137 drop = sorted([f for f in removed if f in m])
3136 3138 for f in drop:
3137 3139 del m[f]
3138 3140 if p2.rev() != nullrev:
3139 3141
3140 3142 @util.cachefunc
3141 3143 def mas():
3142 3144 p1n = p1.node()
3143 3145 p2n = p2.node()
3144 3146 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3145 3147 if not cahs:
3146 3148 cahs = [nullrev]
3147 3149 return [self[r].manifest() for r in cahs]
3148 3150
3149 3151 def deletionfromparent(f):
3150 3152 # When a file is removed relative to p1 in a merge, this
3151 3153 # function determines whether the absence is due to a
3152 3154 # deletion from a parent, or whether the merge commit
3153 3155 # itself deletes the file. We decide this by doing a
3154 3156 # simplified three way merge of the manifest entry for
3155 3157 # the file. There are two ways we decide the merge
3156 3158 # itself didn't delete a file:
3157 3159 # - neither parent (nor the merge) contain the file
3158 3160 # - exactly one parent contains the file, and that
3159 3161 # parent has the same filelog entry as the merge
3160 3162 # ancestor (or all of them if there two). In other
3161 3163 # words, that parent left the file unchanged while the
3162 3164 # other one deleted it.
3163 3165 # One way to think about this is that deleting a file is
3164 3166 # similar to emptying it, so the list of changed files
3165 3167 # should be similar either way. The computation
3166 3168 # described above is not done directly in _filecommit
3167 3169 # when creating the list of changed files, however
3168 3170 # it does something very similar by comparing filelog
3169 3171 # nodes.
3170 3172 if f in m1:
3171 3173 return f not in m2 and all(
3172 3174 f in ma and ma.find(f) == m1.find(f)
3173 3175 for ma in mas()
3174 3176 )
3175 3177 elif f in m2:
3176 3178 return all(
3177 3179 f in ma and ma.find(f) == m2.find(f)
3178 3180 for ma in mas()
3179 3181 )
3180 3182 else:
3181 3183 return True
3182 3184
3183 3185 removed = [f for f in removed if not deletionfromparent(f)]
3184 3186
3185 3187 files = changed + removed
3186 3188 md = None
3187 3189 if not files:
3188 3190 # if no "files" actually changed in terms of the changelog,
3189 3191 # try hard to detect unmodified manifest entry so that the
3190 3192 # exact same commit can be reproduced later on convert.
3191 3193 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3192 3194 if not files and md:
3193 3195 self.ui.debug(
3194 3196 b'not reusing manifest (no file change in '
3195 3197 b'changelog, but manifest differs)\n'
3196 3198 )
3197 3199 if files or md:
3198 3200 self.ui.note(_(b"committing manifest\n"))
3199 3201 # we're using narrowmatch here since it's already applied at
3200 3202 # other stages (such as dirstate.walk), so we're already
3201 3203 # ignoring things outside of narrowspec in most cases. The
3202 3204 # one case where we might have files outside the narrowspec
3203 3205 # at this point is merges, and we already error out in the
3204 3206 # case where the merge has files outside of the narrowspec,
3205 3207 # so this is safe.
3206 3208 mn = mctx.write(
3207 3209 trp,
3208 3210 linkrev,
3209 3211 p1.manifestnode(),
3210 3212 p2.manifestnode(),
3211 3213 added,
3212 3214 drop,
3213 3215 match=self.narrowmatch(),
3214 3216 )
3215 3217
3216 3218 if writechangesetcopy:
3217 3219 filesadded = [
3218 3220 f for f in changed if not (f in m1 or f in m2)
3219 3221 ]
3220 3222 filesremoved = removed
3221 3223 else:
3222 3224 self.ui.debug(
3223 3225 b'reusing manifest from p1 (listed files '
3224 3226 b'actually unchanged)\n'
3225 3227 )
3226 3228 mn = p1.manifestnode()
3227 3229 else:
3228 3230 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3229 3231 mn = p1.manifestnode()
3230 3232 files = []
3231 3233
3232 3234 if writecopiesto == b'changeset-only':
3233 3235 # If writing only to changeset extras, use None to indicate that
3234 3236 # no entry should be written. If writing to both, write an empty
3235 3237 # entry to prevent the reader from falling back to reading
3236 3238 # filelogs.
3237 3239 p1copies = p1copies or None
3238 3240 p2copies = p2copies or None
3239 3241 filesadded = filesadded or None
3240 3242 filesremoved = filesremoved or None
3241 3243
3242 3244 if origctx and origctx.manifestnode() == mn:
3243 3245 files = origctx.files()
3244 3246
3245 3247 # update changelog
3246 3248 self.ui.note(_(b"committing changelog\n"))
3247 3249 self.changelog.delayupdate(tr)
3248 3250 n = self.changelog.add(
3249 3251 mn,
3250 3252 files,
3251 3253 ctx.description(),
3252 3254 trp,
3253 3255 p1.node(),
3254 3256 p2.node(),
3255 3257 user,
3256 3258 ctx.date(),
3257 3259 ctx.extra().copy(),
3258 3260 p1copies,
3259 3261 p2copies,
3260 3262 filesadded,
3261 3263 filesremoved,
3262 3264 )
3263 3265 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3264 3266 self.hook(
3265 3267 b'pretxncommit',
3266 3268 throw=True,
3267 3269 node=hex(n),
3268 3270 parent1=xp1,
3269 3271 parent2=xp2,
3270 3272 )
3271 3273 # set the new commit is proper phase
3272 3274 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3273 3275 if targetphase:
3274 3276 # retract boundary do not alter parent changeset.
3275 3277 # if a parent have higher the resulting phase will
3276 3278 # be compliant anyway
3277 3279 #
3278 3280 # if minimal phase was 0 we don't need to retract anything
3279 3281 phases.registernew(self, tr, targetphase, [n])
3280 3282 return n
3281 3283
3282 3284 @unfilteredmethod
3283 3285 def destroying(self):
3284 3286 '''Inform the repository that nodes are about to be destroyed.
3285 3287 Intended for use by strip and rollback, so there's a common
3286 3288 place for anything that has to be done before destroying history.
3287 3289
3288 3290 This is mostly useful for saving state that is in memory and waiting
3289 3291 to be flushed when the current lock is released. Because a call to
3290 3292 destroyed is imminent, the repo will be invalidated causing those
3291 3293 changes to stay in memory (waiting for the next unlock), or vanish
3292 3294 completely.
3293 3295 '''
3294 3296 # When using the same lock to commit and strip, the phasecache is left
3295 3297 # dirty after committing. Then when we strip, the repo is invalidated,
3296 3298 # causing those changes to disappear.
3297 3299 if '_phasecache' in vars(self):
3298 3300 self._phasecache.write()
3299 3301
3300 3302 @unfilteredmethod
3301 3303 def destroyed(self):
3302 3304 '''Inform the repository that nodes have been destroyed.
3303 3305 Intended for use by strip and rollback, so there's a common
3304 3306 place for anything that has to be done after destroying history.
3305 3307 '''
3306 3308 # When one tries to:
3307 3309 # 1) destroy nodes thus calling this method (e.g. strip)
3308 3310 # 2) use phasecache somewhere (e.g. commit)
3309 3311 #
3310 3312 # then 2) will fail because the phasecache contains nodes that were
3311 3313 # removed. We can either remove phasecache from the filecache,
3312 3314 # causing it to reload next time it is accessed, or simply filter
3313 3315 # the removed nodes now and write the updated cache.
3314 3316 self._phasecache.filterunknown(self)
3315 3317 self._phasecache.write()
3316 3318
3317 3319 # refresh all repository caches
3318 3320 self.updatecaches()
3319 3321
3320 3322 # Ensure the persistent tag cache is updated. Doing it now
3321 3323 # means that the tag cache only has to worry about destroyed
3322 3324 # heads immediately after a strip/rollback. That in turn
3323 3325 # guarantees that "cachetip == currenttip" (comparing both rev
3324 3326 # and node) always means no nodes have been added or destroyed.
3325 3327
3326 3328 # XXX this is suboptimal when qrefresh'ing: we strip the current
3327 3329 # head, refresh the tag cache, then immediately add a new head.
3328 3330 # But I think doing it this way is necessary for the "instant
3329 3331 # tag cache retrieval" case to work.
3330 3332 self.invalidate()
3331 3333
3332 3334 def status(
3333 3335 self,
3334 3336 node1=b'.',
3335 3337 node2=None,
3336 3338 match=None,
3337 3339 ignored=False,
3338 3340 clean=False,
3339 3341 unknown=False,
3340 3342 listsubrepos=False,
3341 3343 ):
3342 3344 '''a convenience method that calls node1.status(node2)'''
3343 3345 return self[node1].status(
3344 3346 node2, match, ignored, clean, unknown, listsubrepos
3345 3347 )
3346 3348
3347 3349 def addpostdsstatus(self, ps):
3348 3350 """Add a callback to run within the wlock, at the point at which status
3349 3351 fixups happen.
3350 3352
3351 3353 On status completion, callback(wctx, status) will be called with the
3352 3354 wlock held, unless the dirstate has changed from underneath or the wlock
3353 3355 couldn't be grabbed.
3354 3356
3355 3357 Callbacks should not capture and use a cached copy of the dirstate --
3356 3358 it might change in the meanwhile. Instead, they should access the
3357 3359 dirstate via wctx.repo().dirstate.
3358 3360
3359 3361 This list is emptied out after each status run -- extensions should
3360 3362 make sure it adds to this list each time dirstate.status is called.
3361 3363 Extensions should also make sure they don't call this for statuses
3362 3364 that don't involve the dirstate.
3363 3365 """
3364 3366
3365 3367 # The list is located here for uniqueness reasons -- it is actually
3366 3368 # managed by the workingctx, but that isn't unique per-repo.
3367 3369 self._postdsstatus.append(ps)
3368 3370
3369 3371 def postdsstatus(self):
3370 3372 """Used by workingctx to get the list of post-dirstate-status hooks."""
3371 3373 return self._postdsstatus
3372 3374
3373 3375 def clearpostdsstatus(self):
3374 3376 """Used by workingctx to clear post-dirstate-status hooks."""
3375 3377 del self._postdsstatus[:]
3376 3378
3377 3379 def heads(self, start=None):
3378 3380 if start is None:
3379 3381 cl = self.changelog
3380 3382 headrevs = reversed(cl.headrevs())
3381 3383 return [cl.node(rev) for rev in headrevs]
3382 3384
3383 3385 heads = self.changelog.heads(start)
3384 3386 # sort the output in rev descending order
3385 3387 return sorted(heads, key=self.changelog.rev, reverse=True)
3386 3388
3387 3389 def branchheads(self, branch=None, start=None, closed=False):
3388 3390 '''return a (possibly filtered) list of heads for the given branch
3389 3391
3390 3392 Heads are returned in topological order, from newest to oldest.
3391 3393 If branch is None, use the dirstate branch.
3392 3394 If start is not None, return only heads reachable from start.
3393 3395 If closed is True, return heads that are marked as closed as well.
3394 3396 '''
3395 3397 if branch is None:
3396 3398 branch = self[None].branch()
3397 3399 branches = self.branchmap()
3398 3400 if not branches.hasbranch(branch):
3399 3401 return []
3400 3402 # the cache returns heads ordered lowest to highest
3401 3403 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3402 3404 if start is not None:
3403 3405 # filter out the heads that cannot be reached from startrev
3404 3406 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3405 3407 bheads = [h for h in bheads if h in fbheads]
3406 3408 return bheads
3407 3409
3408 3410 def branches(self, nodes):
3409 3411 if not nodes:
3410 3412 nodes = [self.changelog.tip()]
3411 3413 b = []
3412 3414 for n in nodes:
3413 3415 t = n
3414 3416 while True:
3415 3417 p = self.changelog.parents(n)
3416 3418 if p[1] != nullid or p[0] == nullid:
3417 3419 b.append((t, n, p[0], p[1]))
3418 3420 break
3419 3421 n = p[0]
3420 3422 return b
3421 3423
3422 3424 def between(self, pairs):
3423 3425 r = []
3424 3426
3425 3427 for top, bottom in pairs:
3426 3428 n, l, i = top, [], 0
3427 3429 f = 1
3428 3430
3429 3431 while n != bottom and n != nullid:
3430 3432 p = self.changelog.parents(n)[0]
3431 3433 if i == f:
3432 3434 l.append(n)
3433 3435 f = f * 2
3434 3436 n = p
3435 3437 i += 1
3436 3438
3437 3439 r.append(l)
3438 3440
3439 3441 return r
3440 3442
3441 3443 def checkpush(self, pushop):
3442 3444 """Extensions can override this function if additional checks have
3443 3445 to be performed before pushing, or call it if they override push
3444 3446 command.
3445 3447 """
3446 3448
3447 3449 @unfilteredpropertycache
3448 3450 def prepushoutgoinghooks(self):
3449 3451 """Return util.hooks consists of a pushop with repo, remote, outgoing
3450 3452 methods, which are called before pushing changesets.
3451 3453 """
3452 3454 return util.hooks()
3453 3455
3454 3456 def pushkey(self, namespace, key, old, new):
3455 3457 try:
3456 3458 tr = self.currenttransaction()
3457 3459 hookargs = {}
3458 3460 if tr is not None:
3459 3461 hookargs.update(tr.hookargs)
3460 3462 hookargs = pycompat.strkwargs(hookargs)
3461 3463 hookargs['namespace'] = namespace
3462 3464 hookargs['key'] = key
3463 3465 hookargs['old'] = old
3464 3466 hookargs['new'] = new
3465 3467 self.hook(b'prepushkey', throw=True, **hookargs)
3466 3468 except error.HookAbort as exc:
3467 3469 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3468 3470 if exc.hint:
3469 3471 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3470 3472 return False
3471 3473 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3472 3474 ret = pushkey.push(self, namespace, key, old, new)
3473 3475
3474 3476 def runhook(unused_success):
3475 3477 self.hook(
3476 3478 b'pushkey',
3477 3479 namespace=namespace,
3478 3480 key=key,
3479 3481 old=old,
3480 3482 new=new,
3481 3483 ret=ret,
3482 3484 )
3483 3485
3484 3486 self._afterlock(runhook)
3485 3487 return ret
3486 3488
3487 3489 def listkeys(self, namespace):
3488 3490 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3489 3491 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3490 3492 values = pushkey.list(self, namespace)
3491 3493 self.hook(b'listkeys', namespace=namespace, values=values)
3492 3494 return values
3493 3495
3494 3496 def debugwireargs(self, one, two, three=None, four=None, five=None):
3495 3497 '''used to test argument passing over the wire'''
3496 3498 return b"%s %s %s %s %s" % (
3497 3499 one,
3498 3500 two,
3499 3501 pycompat.bytestr(three),
3500 3502 pycompat.bytestr(four),
3501 3503 pycompat.bytestr(five),
3502 3504 )
3503 3505
3504 3506 def savecommitmessage(self, text):
3505 3507 fp = self.vfs(b'last-message.txt', b'wb')
3506 3508 try:
3507 3509 fp.write(text)
3508 3510 finally:
3509 3511 fp.close()
3510 3512 return self.pathto(fp.name[len(self.root) + 1 :])
3511 3513
3512 3514
3513 3515 # used to avoid circular references so destructors work
3514 3516 def aftertrans(files):
3515 3517 renamefiles = [tuple(t) for t in files]
3516 3518
3517 3519 def a():
3518 3520 for vfs, src, dest in renamefiles:
3519 3521 # if src and dest refer to a same file, vfs.rename is a no-op,
3520 3522 # leaving both src and dest on disk. delete dest to make sure
3521 3523 # the rename couldn't be such a no-op.
3522 3524 vfs.tryunlink(dest)
3523 3525 try:
3524 3526 vfs.rename(src, dest)
3525 3527 except OSError: # journal file does not yet exist
3526 3528 pass
3527 3529
3528 3530 return a
3529 3531
3530 3532
3531 3533 def undoname(fn):
3532 3534 base, name = os.path.split(fn)
3533 3535 assert name.startswith(b'journal')
3534 3536 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3535 3537
3536 3538
3537 3539 def instance(ui, path, create, intents=None, createopts=None):
3538 3540 localpath = util.urllocalpath(path)
3539 3541 if create:
3540 3542 createrepository(ui, localpath, createopts=createopts)
3541 3543
3542 3544 return makelocalrepository(ui, localpath, intents=intents)
3543 3545
3544 3546
3545 3547 def islocal(path):
3546 3548 return True
3547 3549
3548 3550
3549 3551 def defaultcreateopts(ui, createopts=None):
3550 3552 """Populate the default creation options for a repository.
3551 3553
3552 3554 A dictionary of explicitly requested creation options can be passed
3553 3555 in. Missing keys will be populated.
3554 3556 """
3555 3557 createopts = dict(createopts or {})
3556 3558
3557 3559 if b'backend' not in createopts:
3558 3560 # experimental config: storage.new-repo-backend
3559 3561 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3560 3562
3561 3563 return createopts
3562 3564
3563 3565
3564 3566 def newreporequirements(ui, createopts):
3565 3567 """Determine the set of requirements for a new local repository.
3566 3568
3567 3569 Extensions can wrap this function to specify custom requirements for
3568 3570 new repositories.
3569 3571 """
3570 3572 # If the repo is being created from a shared repository, we copy
3571 3573 # its requirements.
3572 3574 if b'sharedrepo' in createopts:
3573 3575 requirements = set(createopts[b'sharedrepo'].requirements)
3574 3576 if createopts.get(b'sharedrelative'):
3575 3577 requirements.add(b'relshared')
3576 3578 else:
3577 3579 requirements.add(b'shared')
3578 3580
3579 3581 return requirements
3580 3582
3581 3583 if b'backend' not in createopts:
3582 3584 raise error.ProgrammingError(
3583 3585 b'backend key not present in createopts; '
3584 3586 b'was defaultcreateopts() called?'
3585 3587 )
3586 3588
3587 3589 if createopts[b'backend'] != b'revlogv1':
3588 3590 raise error.Abort(
3589 3591 _(
3590 3592 b'unable to determine repository requirements for '
3591 3593 b'storage backend: %s'
3592 3594 )
3593 3595 % createopts[b'backend']
3594 3596 )
3595 3597
3596 3598 requirements = {b'revlogv1'}
3597 3599 if ui.configbool(b'format', b'usestore'):
3598 3600 requirements.add(b'store')
3599 3601 if ui.configbool(b'format', b'usefncache'):
3600 3602 requirements.add(b'fncache')
3601 3603 if ui.configbool(b'format', b'dotencode'):
3602 3604 requirements.add(b'dotencode')
3603 3605
3604 3606 compengines = ui.configlist(b'format', b'revlog-compression')
3605 3607 for compengine in compengines:
3606 3608 if compengine in util.compengines:
3607 3609 break
3608 3610 else:
3609 3611 raise error.Abort(
3610 3612 _(
3611 3613 b'compression engines %s defined by '
3612 3614 b'format.revlog-compression not available'
3613 3615 )
3614 3616 % b', '.join(b'"%s"' % e for e in compengines),
3615 3617 hint=_(
3616 3618 b'run "hg debuginstall" to list available '
3617 3619 b'compression engines'
3618 3620 ),
3619 3621 )
3620 3622
3621 3623 # zlib is the historical default and doesn't need an explicit requirement.
3622 3624 if compengine == b'zstd':
3623 3625 requirements.add(b'revlog-compression-zstd')
3624 3626 elif compengine != b'zlib':
3625 3627 requirements.add(b'exp-compression-%s' % compengine)
3626 3628
3627 3629 if scmutil.gdinitconfig(ui):
3628 3630 requirements.add(b'generaldelta')
3629 3631 if ui.configbool(b'format', b'sparse-revlog'):
3630 3632 requirements.add(SPARSEREVLOG_REQUIREMENT)
3631 3633
3632 3634 # experimental config: format.exp-use-side-data
3633 3635 if ui.configbool(b'format', b'exp-use-side-data'):
3634 3636 requirements.add(SIDEDATA_REQUIREMENT)
3635 3637 # experimental config: format.exp-use-copies-side-data-changeset
3636 3638 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3637 3639 requirements.add(SIDEDATA_REQUIREMENT)
3638 3640 requirements.add(COPIESSDC_REQUIREMENT)
3639 3641 if ui.configbool(b'experimental', b'treemanifest'):
3640 3642 requirements.add(b'treemanifest')
3641 3643
3642 3644 revlogv2 = ui.config(b'experimental', b'revlogv2')
3643 3645 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3644 3646 requirements.remove(b'revlogv1')
3645 3647 # generaldelta is implied by revlogv2.
3646 3648 requirements.discard(b'generaldelta')
3647 3649 requirements.add(REVLOGV2_REQUIREMENT)
3648 3650 # experimental config: format.internal-phase
3649 3651 if ui.configbool(b'format', b'internal-phase'):
3650 3652 requirements.add(b'internal-phase')
3651 3653
3652 3654 if createopts.get(b'narrowfiles'):
3653 3655 requirements.add(repository.NARROW_REQUIREMENT)
3654 3656
3655 3657 if createopts.get(b'lfs'):
3656 3658 requirements.add(b'lfs')
3657 3659
3658 3660 if ui.configbool(b'format', b'bookmarks-in-store'):
3659 3661 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3660 3662
3661 3663 return requirements
3662 3664
3663 3665
3664 3666 def filterknowncreateopts(ui, createopts):
3665 3667 """Filters a dict of repo creation options against options that are known.
3666 3668
3667 3669 Receives a dict of repo creation options and returns a dict of those
3668 3670 options that we don't know how to handle.
3669 3671
3670 3672 This function is called as part of repository creation. If the
3671 3673 returned dict contains any items, repository creation will not
3672 3674 be allowed, as it means there was a request to create a repository
3673 3675 with options not recognized by loaded code.
3674 3676
3675 3677 Extensions can wrap this function to filter out creation options
3676 3678 they know how to handle.
3677 3679 """
3678 3680 known = {
3679 3681 b'backend',
3680 3682 b'lfs',
3681 3683 b'narrowfiles',
3682 3684 b'sharedrepo',
3683 3685 b'sharedrelative',
3684 3686 b'shareditems',
3685 3687 b'shallowfilestore',
3686 3688 }
3687 3689
3688 3690 return {k: v for k, v in createopts.items() if k not in known}
3689 3691
3690 3692
3691 3693 def createrepository(ui, path, createopts=None):
3692 3694 """Create a new repository in a vfs.
3693 3695
3694 3696 ``path`` path to the new repo's working directory.
3695 3697 ``createopts`` options for the new repository.
3696 3698
3697 3699 The following keys for ``createopts`` are recognized:
3698 3700
3699 3701 backend
3700 3702 The storage backend to use.
3701 3703 lfs
3702 3704 Repository will be created with ``lfs`` requirement. The lfs extension
3703 3705 will automatically be loaded when the repository is accessed.
3704 3706 narrowfiles
3705 3707 Set up repository to support narrow file storage.
3706 3708 sharedrepo
3707 3709 Repository object from which storage should be shared.
3708 3710 sharedrelative
3709 3711 Boolean indicating if the path to the shared repo should be
3710 3712 stored as relative. By default, the pointer to the "parent" repo
3711 3713 is stored as an absolute path.
3712 3714 shareditems
3713 3715 Set of items to share to the new repository (in addition to storage).
3714 3716 shallowfilestore
3715 3717 Indicates that storage for files should be shallow (not all ancestor
3716 3718 revisions are known).
3717 3719 """
3718 3720 createopts = defaultcreateopts(ui, createopts=createopts)
3719 3721
3720 3722 unknownopts = filterknowncreateopts(ui, createopts)
3721 3723
3722 3724 if not isinstance(unknownopts, dict):
3723 3725 raise error.ProgrammingError(
3724 3726 b'filterknowncreateopts() did not return a dict'
3725 3727 )
3726 3728
3727 3729 if unknownopts:
3728 3730 raise error.Abort(
3729 3731 _(
3730 3732 b'unable to create repository because of unknown '
3731 3733 b'creation option: %s'
3732 3734 )
3733 3735 % b', '.join(sorted(unknownopts)),
3734 3736 hint=_(b'is a required extension not loaded?'),
3735 3737 )
3736 3738
3737 3739 requirements = newreporequirements(ui, createopts=createopts)
3738 3740
3739 3741 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3740 3742
3741 3743 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3742 3744 if hgvfs.exists():
3743 3745 raise error.RepoError(_(b'repository %s already exists') % path)
3744 3746
3745 3747 if b'sharedrepo' in createopts:
3746 3748 sharedpath = createopts[b'sharedrepo'].sharedpath
3747 3749
3748 3750 if createopts.get(b'sharedrelative'):
3749 3751 try:
3750 3752 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3751 3753 except (IOError, ValueError) as e:
3752 3754 # ValueError is raised on Windows if the drive letters differ
3753 3755 # on each path.
3754 3756 raise error.Abort(
3755 3757 _(b'cannot calculate relative path'),
3756 3758 hint=stringutil.forcebytestr(e),
3757 3759 )
3758 3760
3759 3761 if not wdirvfs.exists():
3760 3762 wdirvfs.makedirs()
3761 3763
3762 3764 hgvfs.makedir(notindexed=True)
3763 3765 if b'sharedrepo' not in createopts:
3764 3766 hgvfs.mkdir(b'cache')
3765 3767 hgvfs.mkdir(b'wcache')
3766 3768
3767 3769 if b'store' in requirements and b'sharedrepo' not in createopts:
3768 3770 hgvfs.mkdir(b'store')
3769 3771
3770 3772 # We create an invalid changelog outside the store so very old
3771 3773 # Mercurial versions (which didn't know about the requirements
3772 3774 # file) encounter an error on reading the changelog. This
3773 3775 # effectively locks out old clients and prevents them from
3774 3776 # mucking with a repo in an unknown format.
3775 3777 #
3776 3778 # The revlog header has version 2, which won't be recognized by
3777 3779 # such old clients.
3778 3780 hgvfs.append(
3779 3781 b'00changelog.i',
3780 3782 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3781 3783 b'layout',
3782 3784 )
3783 3785
3784 3786 scmutil.writerequires(hgvfs, requirements)
3785 3787
3786 3788 # Write out file telling readers where to find the shared store.
3787 3789 if b'sharedrepo' in createopts:
3788 3790 hgvfs.write(b'sharedpath', sharedpath)
3789 3791
3790 3792 if createopts.get(b'shareditems'):
3791 3793 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3792 3794 hgvfs.write(b'shared', shared)
3793 3795
3794 3796
3795 3797 def poisonrepository(repo):
3796 3798 """Poison a repository instance so it can no longer be used."""
3797 3799 # Perform any cleanup on the instance.
3798 3800 repo.close()
3799 3801
3800 3802 # Our strategy is to replace the type of the object with one that
3801 3803 # has all attribute lookups result in error.
3802 3804 #
3803 3805 # But we have to allow the close() method because some constructors
3804 3806 # of repos call close() on repo references.
3805 3807 class poisonedrepository(object):
3806 3808 def __getattribute__(self, item):
3807 3809 if item == 'close':
3808 3810 return object.__getattribute__(self, item)
3809 3811
3810 3812 raise error.ProgrammingError(
3811 3813 b'repo instances should not be used after unshare'
3812 3814 )
3813 3815
3814 3816 def close(self):
3815 3817 pass
3816 3818
3817 3819 # We may have a repoview, which intercepts __setattr__. So be sure
3818 3820 # we operate at the lowest level possible.
3819 3821 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,633 +1,643
1 1 # nodemap.py - nodemap related code and utilities
2 2 #
3 3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import re
14 14 import struct
15 15
16 from ..i18n import _
17
16 18 from .. import (
17 19 error,
18 20 node as nodemod,
19 21 util,
20 22 )
21 23
22 24
23 25 class NodeMap(dict):
24 26 def __missing__(self, x):
25 27 raise error.RevlogError(b'unknown node: %s' % x)
26 28
27 29
28 30 def persisted_data(revlog):
29 31 """read the nodemap for a revlog from disk"""
30 32 if revlog.nodemap_file is None:
31 33 return None
32 34 pdata = revlog.opener.tryread(revlog.nodemap_file)
33 35 if not pdata:
34 36 return None
35 37 offset = 0
36 38 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
37 39 if version != ONDISK_VERSION:
38 40 return None
39 41 offset += S_VERSION.size
40 42 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
41 43 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
42 44 offset += S_HEADER.size
43 45 docket = NodeMapDocket(pdata[offset : offset + uid_size])
44 46 offset += uid_size
45 47 docket.tip_rev = tip_rev
46 48 docket.tip_node = pdata[offset : offset + tip_node_size]
47 49 docket.data_length = data_length
48 50 docket.data_unused = data_unused
49 51
50 52 filename = _rawdata_filepath(revlog, docket)
51 53 use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
52 54 try:
53 55 with revlog.opener(filename) as fd:
54 56 if use_mmap:
55 57 data = util.buffer(util.mmapread(fd, data_length))
56 58 else:
57 59 data = fd.read(data_length)
58 60 except OSError as e:
59 61 if e.errno != errno.ENOENT:
60 62 raise
61 63 if len(data) < data_length:
62 64 return None
63 65 return docket, data
64 66
65 67
66 68 def setup_persistent_nodemap(tr, revlog):
67 69 """Install whatever is needed transaction side to persist a nodemap on disk
68 70
69 71 (only actually persist the nodemap if this is relevant for this revlog)
70 72 """
71 73 if revlog._inline:
72 74 return # inlined revlog are too small for this to be relevant
73 75 if revlog.nodemap_file is None:
74 76 return # we do not use persistent_nodemap on this revlog
75 77
76 78 # we need to happen after the changelog finalization, in that use "cl-"
77 79 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
78 80 if tr.hasfinalize(callback_id):
79 81 return # no need to register again
80 82 tr.addpending(
81 83 callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
82 84 )
83 85 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
84 86
85 87
86 88 class _NoTransaction(object):
87 89 """transaction like object to update the nodemap outside a transaction
88 90 """
89 91
90 92 def __init__(self):
91 93 self._postclose = {}
92 94
93 95 def addpostclose(self, callback_id, callback_func):
94 96 self._postclose[callback_id] = callback_func
95 97
96 98 def registertmp(self, *args, **kwargs):
97 99 pass
98 100
99 101 def addbackup(self, *args, **kwargs):
100 102 pass
101 103
102 104 def add(self, *args, **kwargs):
103 105 pass
104 106
105 107 def addabort(self, *args, **kwargs):
106 108 pass
107 109
110 def _report(self, *args):
111 pass
112
108 113
109 114 def update_persistent_nodemap(revlog):
110 115 """update the persistent nodemap right now
111 116
112 117 To be used for updating the nodemap on disk outside of a normal transaction
113 118 setup (eg, `debugupdatecache`).
114 119 """
115 120 if revlog._inline:
116 121 return # inlined revlog are too small for this to be relevant
117 122 if revlog.nodemap_file is None:
118 123 return # we do not use persistent_nodemap on this revlog
119 124
120 125 notr = _NoTransaction()
121 126 _persist_nodemap(notr, revlog)
122 127 for k in sorted(notr._postclose):
123 128 notr._postclose[k](None)
124 129
125 130
126 131 def _persist_nodemap(tr, revlog, pending=False):
127 132 """Write nodemap data on disk for a given revlog
128 133 """
129 134 if getattr(revlog, 'filteredrevs', ()):
130 135 raise error.ProgrammingError(
131 136 "cannot persist nodemap of a filtered changelog"
132 137 )
133 138 if revlog.nodemap_file is None:
134 139 msg = "calling persist nodemap on a revlog without the feature enableb"
135 140 raise error.ProgrammingError(msg)
136 141
137 142 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
138 143 ondisk_docket = revlog._nodemap_docket
139 144 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
140 145 use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
146 mode = revlog.opener.options.get(b"exp-persistent-nodemap.mode")
147 if not can_incremental:
148 msg = _(b"persistent nodemap in strict mode without efficient method")
149 if mode == b'warn':
150 tr._report(b"%s\n" % msg)
141 151
142 152 data = None
143 153 # first attemp an incremental update of the data
144 154 if can_incremental and ondisk_docket is not None:
145 155 target_docket = revlog._nodemap_docket.copy()
146 156 (
147 157 src_docket,
148 158 data_changed_count,
149 159 data,
150 160 ) = revlog.index.nodemap_data_incremental()
151 161 new_length = target_docket.data_length + len(data)
152 162 new_unused = target_docket.data_unused + data_changed_count
153 163 if src_docket != target_docket:
154 164 data = None
155 165 elif new_length <= (new_unused * 10): # under 10% of unused data
156 166 data = None
157 167 else:
158 168 datafile = _rawdata_filepath(revlog, target_docket)
159 169 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
160 170 # store vfs
161 171 tr.add(datafile, target_docket.data_length)
162 172 with revlog.opener(datafile, b'r+') as fd:
163 173 fd.seek(target_docket.data_length)
164 174 fd.write(data)
165 175 if feed_data:
166 176 if use_mmap:
167 177 fd.seek(0)
168 178 new_data = fd.read(new_length)
169 179 else:
170 180 fd.flush()
171 181 new_data = util.buffer(util.mmapread(fd, new_length))
172 182 target_docket.data_length = new_length
173 183 target_docket.data_unused = new_unused
174 184
175 185 if data is None:
176 186 # otherwise fallback to a full new export
177 187 target_docket = NodeMapDocket()
178 188 datafile = _rawdata_filepath(revlog, target_docket)
179 189 if util.safehasattr(revlog.index, "nodemap_data_all"):
180 190 data = revlog.index.nodemap_data_all()
181 191 else:
182 192 data = persistent_data(revlog.index)
183 193 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
184 194 # store vfs
185 195
186 196 tryunlink = revlog.opener.tryunlink
187 197
188 198 def abortck(tr):
189 199 tryunlink(datafile)
190 200
191 201 callback_id = b"delete-%s" % datafile
192 202
193 203 # some flavor of the transaction abort does not cleanup new file, it
194 204 # simply empty them.
195 205 tr.addabort(callback_id, abortck)
196 206 with revlog.opener(datafile, b'w+') as fd:
197 207 fd.write(data)
198 208 if feed_data:
199 209 if use_mmap:
200 210 new_data = data
201 211 else:
202 212 fd.flush()
203 213 new_data = util.buffer(util.mmapread(fd, len(data)))
204 214 target_docket.data_length = len(data)
205 215 target_docket.tip_rev = revlog.tiprev()
206 216 target_docket.tip_node = revlog.node(target_docket.tip_rev)
207 217 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
208 218 # store vfs
209 219 file_path = revlog.nodemap_file
210 220 if pending:
211 221 file_path += b'.a'
212 222 tr.registertmp(file_path)
213 223 else:
214 224 tr.addbackup(file_path)
215 225
216 226 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
217 227 fp.write(target_docket.serialize())
218 228 revlog._nodemap_docket = target_docket
219 229 if feed_data:
220 230 revlog.index.update_nodemap_data(target_docket, new_data)
221 231
222 232 # search for old index file in all cases, some older process might have
223 233 # left one behind.
224 234 olds = _other_rawdata_filepath(revlog, target_docket)
225 235 if olds:
226 236 realvfs = getattr(revlog, '_realopener', revlog.opener)
227 237
228 238 def cleanup(tr):
229 239 for oldfile in olds:
230 240 realvfs.tryunlink(oldfile)
231 241
232 242 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
233 243 tr.addpostclose(callback_id, cleanup)
234 244
235 245
236 246 ### Nodemap docket file
237 247 #
238 248 # The nodemap data are stored on disk using 2 files:
239 249 #
240 250 # * a raw data files containing a persistent nodemap
241 251 # (see `Nodemap Trie` section)
242 252 #
243 253 # * a small "docket" file containing medatadata
244 254 #
245 255 # While the nodemap data can be multiple tens of megabytes, the "docket" is
246 256 # small, it is easy to update it automatically or to duplicated its content
247 257 # during a transaction.
248 258 #
249 259 # Multiple raw data can exist at the same time (The currently valid one and a
250 260 # new one beind used by an in progress transaction). To accomodate this, the
251 261 # filename hosting the raw data has a variable parts. The exact filename is
252 262 # specified inside the "docket" file.
253 263 #
254 264 # The docket file contains information to find, qualify and validate the raw
255 265 # data. Its content is currently very light, but it will expand as the on disk
256 266 # nodemap gains the necessary features to be used in production.
257 267
258 268 # version 0 is experimental, no BC garantee, do no use outside of tests.
259 269 ONDISK_VERSION = 0
260 270 S_VERSION = struct.Struct(">B")
261 271 S_HEADER = struct.Struct(">BQQQQ")
262 272
263 273 ID_SIZE = 8
264 274
265 275
266 276 def _make_uid():
267 277 """return a new unique identifier.
268 278
269 279 The identifier is random and composed of ascii characters."""
270 280 return nodemod.hex(os.urandom(ID_SIZE))
271 281
272 282
273 283 class NodeMapDocket(object):
274 284 """metadata associated with persistent nodemap data
275 285
276 286 The persistent data may come from disk or be on their way to disk.
277 287 """
278 288
279 289 def __init__(self, uid=None):
280 290 if uid is None:
281 291 uid = _make_uid()
282 292 # a unique identifier for the data file:
283 293 # - When new data are appended, it is preserved.
284 294 # - When a new data file is created, a new identifier is generated.
285 295 self.uid = uid
286 296 # the tipmost revision stored in the data file. This revision and all
287 297 # revision before it are expected to be encoded in the data file.
288 298 self.tip_rev = None
289 299 # the node of that tipmost revision, if it mismatch the current index
290 300 # data the docket is not valid for the current index and should be
291 301 # discarded.
292 302 #
293 303 # note: this method is not perfect as some destructive operation could
294 304 # preserve the same tip_rev + tip_node while altering lower revision.
295 305 # However this multiple other caches have the same vulnerability (eg:
296 306 # brancmap cache).
297 307 self.tip_node = None
298 308 # the size (in bytes) of the persisted data to encode the nodemap valid
299 309 # for `tip_rev`.
300 310 # - data file shorter than this are corrupted,
301 311 # - any extra data should be ignored.
302 312 self.data_length = None
303 313 # the amount (in bytes) of "dead" data, still in the data file but no
304 314 # longer used for the nodemap.
305 315 self.data_unused = 0
306 316
307 317 def copy(self):
308 318 new = NodeMapDocket(uid=self.uid)
309 319 new.tip_rev = self.tip_rev
310 320 new.tip_node = self.tip_node
311 321 new.data_length = self.data_length
312 322 new.data_unused = self.data_unused
313 323 return new
314 324
315 325 def __cmp__(self, other):
316 326 if self.uid < other.uid:
317 327 return -1
318 328 if self.uid > other.uid:
319 329 return 1
320 330 elif self.data_length < other.data_length:
321 331 return -1
322 332 elif self.data_length > other.data_length:
323 333 return 1
324 334 return 0
325 335
326 336 def __eq__(self, other):
327 337 return self.uid == other.uid and self.data_length == other.data_length
328 338
329 339 def serialize(self):
330 340 """return serialized bytes for a docket using the passed uid"""
331 341 data = []
332 342 data.append(S_VERSION.pack(ONDISK_VERSION))
333 343 headers = (
334 344 len(self.uid),
335 345 self.tip_rev,
336 346 self.data_length,
337 347 self.data_unused,
338 348 len(self.tip_node),
339 349 )
340 350 data.append(S_HEADER.pack(*headers))
341 351 data.append(self.uid)
342 352 data.append(self.tip_node)
343 353 return b''.join(data)
344 354
345 355
346 356 def _rawdata_filepath(revlog, docket):
347 357 """The (vfs relative) nodemap's rawdata file for a given uid"""
348 358 if revlog.nodemap_file.endswith(b'.n.a'):
349 359 prefix = revlog.nodemap_file[:-4]
350 360 else:
351 361 prefix = revlog.nodemap_file[:-2]
352 362 return b"%s-%s.nd" % (prefix, docket.uid)
353 363
354 364
355 365 def _other_rawdata_filepath(revlog, docket):
356 366 prefix = revlog.nodemap_file[:-2]
357 367 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
358 368 new_file_path = _rawdata_filepath(revlog, docket)
359 369 new_file_name = revlog.opener.basename(new_file_path)
360 370 dirpath = revlog.opener.dirname(new_file_path)
361 371 others = []
362 372 for f in revlog.opener.listdir(dirpath):
363 373 if pattern.match(f) and f != new_file_name:
364 374 others.append(f)
365 375 return others
366 376
367 377
368 378 ### Nodemap Trie
369 379 #
370 380 # This is a simple reference implementation to compute and persist a nodemap
371 381 # trie. This reference implementation is write only. The python version of this
372 382 # is not expected to be actually used, since it wont provide performance
373 383 # improvement over existing non-persistent C implementation.
374 384 #
375 385 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
376 386 # revision can be adressed using its node shortest prefix.
377 387 #
378 388 # The trie is stored as a sequence of block. Each block contains 16 entries
379 389 # (signed 64bit integer, big endian). Each entry can be one of the following:
380 390 #
381 391 # * value >= 0 -> index of sub-block
382 392 # * value == -1 -> no value
383 393 # * value < -1 -> a revision value: rev = -(value+10)
384 394 #
385 395 # The implementation focus on simplicity, not on performance. A Rust
386 396 # implementation should provide a efficient version of the same binary
387 397 # persistence. This reference python implementation is never meant to be
388 398 # extensively use in production.
389 399
390 400
391 401 def persistent_data(index):
392 402 """return the persistent binary form for a nodemap for a given index
393 403 """
394 404 trie = _build_trie(index)
395 405 return _persist_trie(trie)
396 406
397 407
398 408 def update_persistent_data(index, root, max_idx, last_rev):
399 409 """return the incremental update for persistent nodemap from a given index
400 410 """
401 411 changed_block, trie = _update_trie(index, root, last_rev)
402 412 return (
403 413 changed_block * S_BLOCK.size,
404 414 _persist_trie(trie, existing_idx=max_idx),
405 415 )
406 416
407 417
408 418 S_BLOCK = struct.Struct(">" + ("l" * 16))
409 419
410 420 NO_ENTRY = -1
411 421 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
412 422 REV_OFFSET = 2
413 423
414 424
415 425 def _transform_rev(rev):
416 426 """Return the number used to represent the rev in the tree.
417 427
418 428 (or retrieve a rev number from such representation)
419 429
420 430 Note that this is an involution, a function equal to its inverse (i.e.
421 431 which gives the identity when applied to itself).
422 432 """
423 433 return -(rev + REV_OFFSET)
424 434
425 435
426 436 def _to_int(hex_digit):
427 437 """turn an hexadecimal digit into a proper integer"""
428 438 return int(hex_digit, 16)
429 439
430 440
431 441 class Block(dict):
432 442 """represent a block of the Trie
433 443
434 444 contains up to 16 entry indexed from 0 to 15"""
435 445
436 446 def __init__(self):
437 447 super(Block, self).__init__()
438 448 # If this block exist on disk, here is its ID
439 449 self.ondisk_id = None
440 450
441 451 def __iter__(self):
442 452 return iter(self.get(i) for i in range(16))
443 453
444 454
445 455 def _build_trie(index):
446 456 """build a nodemap trie
447 457
448 458 The nodemap stores revision number for each unique prefix.
449 459
450 460 Each block is a dictionary with keys in `[0, 15]`. Values are either
451 461 another block or a revision number.
452 462 """
453 463 root = Block()
454 464 for rev in range(len(index)):
455 465 hex = nodemod.hex(index[rev][7])
456 466 _insert_into_block(index, 0, root, rev, hex)
457 467 return root
458 468
459 469
460 470 def _update_trie(index, root, last_rev):
461 471 """consume"""
462 472 changed = 0
463 473 for rev in range(last_rev + 1, len(index)):
464 474 hex = nodemod.hex(index[rev][7])
465 475 changed += _insert_into_block(index, 0, root, rev, hex)
466 476 return changed, root
467 477
468 478
469 479 def _insert_into_block(index, level, block, current_rev, current_hex):
470 480 """insert a new revision in a block
471 481
472 482 index: the index we are adding revision for
473 483 level: the depth of the current block in the trie
474 484 block: the block currently being considered
475 485 current_rev: the revision number we are adding
476 486 current_hex: the hexadecimal representation of the of that revision
477 487 """
478 488 changed = 1
479 489 if block.ondisk_id is not None:
480 490 block.ondisk_id = None
481 491 hex_digit = _to_int(current_hex[level : level + 1])
482 492 entry = block.get(hex_digit)
483 493 if entry is None:
484 494 # no entry, simply store the revision number
485 495 block[hex_digit] = current_rev
486 496 elif isinstance(entry, dict):
487 497 # need to recurse to an underlying block
488 498 changed += _insert_into_block(
489 499 index, level + 1, entry, current_rev, current_hex
490 500 )
491 501 else:
492 502 # collision with a previously unique prefix, inserting new
493 503 # vertices to fit both entry.
494 504 other_hex = nodemod.hex(index[entry][7])
495 505 other_rev = entry
496 506 new = Block()
497 507 block[hex_digit] = new
498 508 _insert_into_block(index, level + 1, new, other_rev, other_hex)
499 509 _insert_into_block(index, level + 1, new, current_rev, current_hex)
500 510 return changed
501 511
502 512
503 513 def _persist_trie(root, existing_idx=None):
504 514 """turn a nodemap trie into persistent binary data
505 515
506 516 See `_build_trie` for nodemap trie structure"""
507 517 block_map = {}
508 518 if existing_idx is not None:
509 519 base_idx = existing_idx + 1
510 520 else:
511 521 base_idx = 0
512 522 chunks = []
513 523 for tn in _walk_trie(root):
514 524 if tn.ondisk_id is not None:
515 525 block_map[id(tn)] = tn.ondisk_id
516 526 else:
517 527 block_map[id(tn)] = len(chunks) + base_idx
518 528 chunks.append(_persist_block(tn, block_map))
519 529 return b''.join(chunks)
520 530
521 531
522 532 def _walk_trie(block):
523 533 """yield all the block in a trie
524 534
525 535 Children blocks are always yield before their parent block.
526 536 """
527 537 for (__, item) in sorted(block.items()):
528 538 if isinstance(item, dict):
529 539 for sub_block in _walk_trie(item):
530 540 yield sub_block
531 541 yield block
532 542
533 543
534 544 def _persist_block(block_node, block_map):
535 545 """produce persistent binary data for a single block
536 546
537 547 Children block are assumed to be already persisted and present in
538 548 block_map.
539 549 """
540 550 data = tuple(_to_value(v, block_map) for v in block_node)
541 551 return S_BLOCK.pack(*data)
542 552
543 553
544 554 def _to_value(item, block_map):
545 555 """persist any value as an integer"""
546 556 if item is None:
547 557 return NO_ENTRY
548 558 elif isinstance(item, dict):
549 559 return block_map[id(item)]
550 560 else:
551 561 return _transform_rev(item)
552 562
553 563
554 564 def parse_data(data):
555 565 """parse parse nodemap data into a nodemap Trie"""
556 566 if (len(data) % S_BLOCK.size) != 0:
557 567 msg = "nodemap data size is not a multiple of block size (%d): %d"
558 568 raise error.Abort(msg % (S_BLOCK.size, len(data)))
559 569 if not data:
560 570 return Block(), None
561 571 block_map = {}
562 572 new_blocks = []
563 573 for i in range(0, len(data), S_BLOCK.size):
564 574 block = Block()
565 575 block.ondisk_id = len(block_map)
566 576 block_map[block.ondisk_id] = block
567 577 block_data = data[i : i + S_BLOCK.size]
568 578 values = S_BLOCK.unpack(block_data)
569 579 new_blocks.append((block, values))
570 580 for b, values in new_blocks:
571 581 for idx, v in enumerate(values):
572 582 if v == NO_ENTRY:
573 583 continue
574 584 elif v >= 0:
575 585 b[idx] = block_map[v]
576 586 else:
577 587 b[idx] = _transform_rev(v)
578 588 return block, i // S_BLOCK.size
579 589
580 590
581 591 # debug utility
582 592
583 593
584 594 def check_data(ui, index, data):
585 595 """verify that the provided nodemap data are valid for the given idex"""
586 596 ret = 0
587 597 ui.status((b"revision in index: %d\n") % len(index))
588 598 root, __ = parse_data(data)
589 599 all_revs = set(_all_revisions(root))
590 600 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
591 601 for r in range(len(index)):
592 602 if r not in all_revs:
593 603 msg = b" revision missing from nodemap: %d\n" % r
594 604 ui.write_err(msg)
595 605 ret = 1
596 606 else:
597 607 all_revs.remove(r)
598 608 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
599 609 if nm_rev is None:
600 610 msg = b" revision node does not match any entries: %d\n" % r
601 611 ui.write_err(msg)
602 612 ret = 1
603 613 elif nm_rev != r:
604 614 msg = (
605 615 b" revision node does not match the expected revision: "
606 616 b"%d != %d\n" % (r, nm_rev)
607 617 )
608 618 ui.write_err(msg)
609 619 ret = 1
610 620
611 621 if all_revs:
612 622 for r in sorted(all_revs):
613 623 msg = b" extra revision in nodemap: %d\n" % r
614 624 ui.write_err(msg)
615 625 ret = 1
616 626 return ret
617 627
618 628
619 629 def _all_revisions(root):
620 630 """return all revisions stored in a Trie"""
621 631 for block in _walk_trie(root):
622 632 for v in block:
623 633 if v is None or isinstance(v, Block):
624 634 continue
625 635 yield v
626 636
627 637
628 638 def _find_node(block, node):
629 639 """find the revision associated with a given node"""
630 640 entry = block.get(_to_int(node[0:1]))
631 641 if isinstance(entry, dict):
632 642 return _find_node(entry, node[1:])
633 643 return entry
@@ -1,416 +1,418
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5 $ hg init test-repo
6 6 $ cd test-repo
7 7 $ cat << EOF >> .hg/hgrc
8 8 > [experimental]
9 9 > exp-persistent-nodemap=yes
10 10 > [devel]
11 11 > persistent-nodemap=yes
12 12 > EOF
13 $ hg debugbuilddag .+5000 --new-file
13 $ hg debugbuilddag .+5000 --new-file --config "experimental.exp-persistent-nodemap.mode=warn"
14 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
15 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
14 16 $ hg debugnodemap --metadata
15 17 uid: ???????????????? (glob)
16 18 tip-rev: 5000
17 19 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
18 20 data-length: 121088
19 21 data-unused: 0
20 22 data-unused: 0.000%
21 23 $ f --size .hg/store/00changelog.n
22 24 .hg/store/00changelog.n: size=70
23 25
24 26 Simple lookup works
25 27
26 28 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
27 29 $ hg log -r "$ANYNODE" --template '{rev}\n'
28 30 5000
29 31
30 32
31 33 #if rust
32 34
33 35 $ f --sha256 .hg/store/00changelog-*.nd
34 36 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
35 37
36 38 $ f --sha256 .hg/store/00manifest-*.nd
37 39 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
38 40 $ hg debugnodemap --dump-new | f --sha256 --size
39 41 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
40 42 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
41 43 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
42 44 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
43 45 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
44 46 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
45 47 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
46 48 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
47 49 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
48 50 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
49 51 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
50 52 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
51 53 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
52 54 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
53 55 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
54 56 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
55 57 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
56 58 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
57 59 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
58 60
59 61
60 62 #else
61 63
62 64 $ f --sha256 .hg/store/00changelog-*.nd
63 65 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
64 66 $ hg debugnodemap --dump-new | f --sha256 --size
65 67 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
66 68 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
67 69 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
68 70 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
69 71 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
70 72 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
71 73 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
72 74 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
73 75 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
74 76 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
75 77 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
76 78 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
77 79 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
78 80 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
79 81 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
80 82 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
81 83 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
82 84 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
83 85 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
84 86
85 87 #endif
86 88
87 89 $ hg debugnodemap --check
88 90 revision in index: 5001
89 91 revision in nodemap: 5001
90 92
91 93 add a new commit
92 94
93 95 $ hg up
94 96 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 97 $ echo foo > foo
96 98 $ hg add foo
97 99 $ hg ci -m 'foo'
98 100
99 101 #if no-pure no-rust
100 102 $ hg debugnodemap --metadata
101 103 uid: ???????????????? (glob)
102 104 tip-rev: 5001
103 105 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
104 106 data-length: 121088
105 107 data-unused: 0
106 108 data-unused: 0.000%
107 109 #else
108 110 $ hg debugnodemap --metadata
109 111 uid: ???????????????? (glob)
110 112 tip-rev: 5001
111 113 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
112 114 data-length: 121344
113 115 data-unused: 256
114 116 data-unused: 0.211%
115 117 #endif
116 118
117 119 $ f --size .hg/store/00changelog.n
118 120 .hg/store/00changelog.n: size=70
119 121
120 122 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
121 123
122 124 #if pure
123 125 $ f --sha256 .hg/store/00changelog-*.nd --size
124 126 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
125 127 #endif
126 128
127 129 #if rust
128 130 $ f --sha256 .hg/store/00changelog-*.nd --size
129 131 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
130 132 #endif
131 133
132 134 #if no-pure no-rust
133 135 $ f --sha256 .hg/store/00changelog-*.nd --size
134 136 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
135 137 #endif
136 138
137 139 $ hg debugnodemap --check
138 140 revision in index: 5002
139 141 revision in nodemap: 5002
140 142
141 143 Test code path without mmap
142 144 ---------------------------
143 145
144 146 $ echo bar > bar
145 147 $ hg add bar
146 148 $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
147 149
148 150 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
149 151 revision in index: 5003
150 152 revision in nodemap: 5003
151 153 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
152 154 revision in index: 5003
153 155 revision in nodemap: 5003
154 156
155 157
156 158 #if pure
157 159 $ hg debugnodemap --metadata
158 160 uid: ???????????????? (glob)
159 161 tip-rev: 5002
160 162 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
161 163 data-length: 121600
162 164 data-unused: 512
163 165 data-unused: 0.421%
164 166 $ f --sha256 .hg/store/00changelog-*.nd --size
165 167 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
166 168 #endif
167 169 #if rust
168 170 $ hg debugnodemap --metadata
169 171 uid: ???????????????? (glob)
170 172 tip-rev: 5002
171 173 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
172 174 data-length: 121600
173 175 data-unused: 512
174 176 data-unused: 0.421%
175 177 $ f --sha256 .hg/store/00changelog-*.nd --size
176 178 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
177 179 #endif
178 180 #if no-pure no-rust
179 181 $ hg debugnodemap --metadata
180 182 uid: ???????????????? (glob)
181 183 tip-rev: 5002
182 184 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
183 185 data-length: 121088
184 186 data-unused: 0
185 187 data-unused: 0.000%
186 188 $ f --sha256 .hg/store/00changelog-*.nd --size
187 189 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
188 190 #endif
189 191
190 192 Test force warming the cache
191 193
192 194 $ rm .hg/store/00changelog.n
193 195 $ hg debugnodemap --metadata
194 196 $ hg debugupdatecache
195 197 #if pure
196 198 $ hg debugnodemap --metadata
197 199 uid: ???????????????? (glob)
198 200 tip-rev: 5002
199 201 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
200 202 data-length: 121088
201 203 data-unused: 0
202 204 data-unused: 0.000%
203 205 #else
204 206 $ hg debugnodemap --metadata
205 207 uid: ???????????????? (glob)
206 208 tip-rev: 5002
207 209 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
208 210 data-length: 121088
209 211 data-unused: 0
210 212 data-unused: 0.000%
211 213 #endif
212 214
213 215 Check out of sync nodemap
214 216 =========================
215 217
216 218 First copy old data on the side.
217 219
218 220 $ mkdir ../tmp-copies
219 221 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
220 222
221 223 Nodemap lagging behind
222 224 ----------------------
223 225
224 226 make a new commit
225 227
226 228 $ echo bar2 > bar
227 229 $ hg ci -m 'bar2'
228 230 $ NODE=`hg log -r tip -T '{node}\n'`
229 231 $ hg log -r "$NODE" -T '{rev}\n'
230 232 5003
231 233
232 234 If the nodemap is lagging behind, it can catch up fine
233 235
234 236 $ hg debugnodemap --metadata
235 237 uid: ???????????????? (glob)
236 238 tip-rev: 5003
237 239 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
238 240 data-length: 121344 (pure !)
239 241 data-length: 121344 (rust !)
240 242 data-length: 121152 (no-rust no-pure !)
241 243 data-unused: 192 (pure !)
242 244 data-unused: 192 (rust !)
243 245 data-unused: 0 (no-rust no-pure !)
244 246 data-unused: 0.158% (pure !)
245 247 data-unused: 0.158% (rust !)
246 248 data-unused: 0.000% (no-rust no-pure !)
247 249 $ cp -f ../tmp-copies/* .hg/store/
248 250 $ hg debugnodemap --metadata
249 251 uid: ???????????????? (glob)
250 252 tip-rev: 5002
251 253 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
252 254 data-length: 121088
253 255 data-unused: 0
254 256 data-unused: 0.000%
255 257 $ hg log -r "$NODE" -T '{rev}\n'
256 258 5003
257 259
258 260 changelog altered
259 261 -----------------
260 262
261 263 If the nodemap is not gated behind a requirements, an unaware client can alter
262 264 the repository so the revlog used to generate the nodemap is not longer
263 265 compatible with the persistent nodemap. We need to detect that.
264 266
265 267 $ hg up "$NODE~5"
266 268 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
267 269 $ echo bar > babar
268 270 $ hg add babar
269 271 $ hg ci -m 'babar'
270 272 created new head
271 273 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
272 274 $ hg log -r "$OTHERNODE" -T '{rev}\n'
273 275 5004
274 276
275 277 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
276 278
277 279 the nodemap should detect the changelog have been tampered with and recover.
278 280
279 281 $ hg debugnodemap --metadata
280 282 uid: ???????????????? (glob)
281 283 tip-rev: 5002
282 284 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
283 285 data-length: 121536 (pure !)
284 286 data-length: 121088 (rust !)
285 287 data-length: 121088 (no-pure no-rust !)
286 288 data-unused: 448 (pure !)
287 289 data-unused: 0 (rust !)
288 290 data-unused: 0 (no-pure no-rust !)
289 291 data-unused: 0.000% (rust !)
290 292 data-unused: 0.369% (pure !)
291 293 data-unused: 0.000% (no-pure no-rust !)
292 294
293 295 $ cp -f ../tmp-copies/* .hg/store/
294 296 $ hg debugnodemap --metadata
295 297 uid: ???????????????? (glob)
296 298 tip-rev: 5002
297 299 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
298 300 data-length: 121088
299 301 data-unused: 0
300 302 data-unused: 0.000%
301 303 $ hg log -r "$OTHERNODE" -T '{rev}\n'
302 304 5002
303 305
304 306 Check transaction related property
305 307 ==================================
306 308
307 309 An up to date nodemap should be available to shell hooks,
308 310
309 311 $ echo dsljfl > a
310 312 $ hg add a
311 313 $ hg ci -m a
312 314 $ hg debugnodemap --metadata
313 315 uid: ???????????????? (glob)
314 316 tip-rev: 5003
315 317 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
316 318 data-length: 121088
317 319 data-unused: 0
318 320 data-unused: 0.000%
319 321 $ echo babar2 > babar
320 322 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
321 323 uid: ???????????????? (glob)
322 324 tip-rev: 5004
323 325 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
324 326 data-length: 121280 (pure !)
325 327 data-length: 121280 (rust !)
326 328 data-length: 121088 (no-pure no-rust !)
327 329 data-unused: 192 (pure !)
328 330 data-unused: 192 (rust !)
329 331 data-unused: 0 (no-pure no-rust !)
330 332 data-unused: 0.158% (pure !)
331 333 data-unused: 0.158% (rust !)
332 334 data-unused: 0.000% (no-pure no-rust !)
333 335 $ hg debugnodemap --metadata
334 336 uid: ???????????????? (glob)
335 337 tip-rev: 5004
336 338 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
337 339 data-length: 121280 (pure !)
338 340 data-length: 121280 (rust !)
339 341 data-length: 121088 (no-pure no-rust !)
340 342 data-unused: 192 (pure !)
341 343 data-unused: 192 (rust !)
342 344 data-unused: 0 (no-pure no-rust !)
343 345 data-unused: 0.158% (pure !)
344 346 data-unused: 0.158% (rust !)
345 347 data-unused: 0.000% (no-pure no-rust !)
346 348
347 349 Another process does not see the pending nodemap content during run.
348 350
349 351 $ PATH=$RUNTESTDIR/testlib/:$PATH
350 352 $ echo qpoasp > a
351 353 $ hg ci -m a2 \
352 354 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
353 355 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
354 356
355 357 (read the repository while the commit transaction is pending)
356 358
357 359 $ wait-on-file 20 sync-txn-pending && \
358 360 > hg debugnodemap --metadata && \
359 361 > wait-on-file 20 sync-txn-close sync-repo-read
360 362 uid: ???????????????? (glob)
361 363 tip-rev: 5004
362 364 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
363 365 data-length: 121280 (pure !)
364 366 data-length: 121280 (rust !)
365 367 data-length: 121088 (no-pure no-rust !)
366 368 data-unused: 192 (pure !)
367 369 data-unused: 192 (rust !)
368 370 data-unused: 0 (no-pure no-rust !)
369 371 data-unused: 0.158% (pure !)
370 372 data-unused: 0.158% (rust !)
371 373 data-unused: 0.000% (no-pure no-rust !)
372 374 $ hg debugnodemap --metadata
373 375 uid: ???????????????? (glob)
374 376 tip-rev: 5005
375 377 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
376 378 data-length: 121536 (pure !)
377 379 data-length: 121536 (rust !)
378 380 data-length: 121088 (no-pure no-rust !)
379 381 data-unused: 448 (pure !)
380 382 data-unused: 448 (rust !)
381 383 data-unused: 0 (no-pure no-rust !)
382 384 data-unused: 0.369% (pure !)
383 385 data-unused: 0.369% (rust !)
384 386 data-unused: 0.000% (no-pure no-rust !)
385 387
386 388 $ cat output.txt
387 389
388 390 Check that a failing transaction will properly revert the data
389 391
390 392 $ echo plakfe > a
391 393 $ f --size --sha256 .hg/store/00changelog-*.nd
392 394 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
393 395 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
394 396 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
395 397 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
396 398 transaction abort!
397 399 rollback completed
398 400 abort: This is a late abort
399 401 [255]
400 402 $ hg debugnodemap --metadata
401 403 uid: ???????????????? (glob)
402 404 tip-rev: 5005
403 405 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
404 406 data-length: 121536 (pure !)
405 407 data-length: 121536 (rust !)
406 408 data-length: 121088 (no-pure no-rust !)
407 409 data-unused: 448 (pure !)
408 410 data-unused: 448 (rust !)
409 411 data-unused: 0 (no-pure no-rust !)
410 412 data-unused: 0.369% (pure !)
411 413 data-unused: 0.369% (rust !)
412 414 data-unused: 0.000% (no-pure no-rust !)
413 415 $ f --size --sha256 .hg/store/00changelog-*.nd
414 416 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
415 417 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
416 418 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
General Comments 0
You need to be logged in to leave comments. Login now