##// END OF EJS Templates
revlog-compression: update the config to be a list...
marmoute -
r44866:f0027a3d default
parent child Browse files
Show More
@@ -1,1564 +1,1564 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18
19 19 def loadconfigtable(ui, extname, configtable):
20 20 """update config item known to the ui with the extension ones"""
21 21 for section, items in sorted(configtable.items()):
22 22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 23 knownkeys = set(knownitems)
24 24 newkeys = set(items)
25 25 for key in sorted(knownkeys & newkeys):
26 26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 27 msg %= (extname, section, key)
28 28 ui.develwarn(msg, config=b'warn-config')
29 29
30 30 knownitems.update(items)
31 31
32 32
33 33 class configitem(object):
34 34 """represent a known config item
35 35
36 36 :section: the official config section where to find this item,
37 37 :name: the official name within the section,
38 38 :default: default value for this item,
39 39 :alias: optional list of tuples as alternatives,
40 40 :generic: this is a generic definition, match name using regular expression.
41 41 """
42 42
43 43 def __init__(
44 44 self,
45 45 section,
46 46 name,
47 47 default=None,
48 48 alias=(),
49 49 generic=False,
50 50 priority=0,
51 51 experimental=False,
52 52 ):
53 53 self.section = section
54 54 self.name = name
55 55 self.default = default
56 56 self.alias = list(alias)
57 57 self.generic = generic
58 58 self.priority = priority
59 59 self.experimental = experimental
60 60 self._re = None
61 61 if generic:
62 62 self._re = re.compile(self.name)
63 63
64 64
65 65 class itemregister(dict):
66 66 """A specialized dictionary that can handle wild-card selection"""
67 67
68 68 def __init__(self):
69 69 super(itemregister, self).__init__()
70 70 self._generics = set()
71 71
72 72 def update(self, other):
73 73 super(itemregister, self).update(other)
74 74 self._generics.update(other._generics)
75 75
76 76 def __setitem__(self, key, item):
77 77 super(itemregister, self).__setitem__(key, item)
78 78 if item.generic:
79 79 self._generics.add(item)
80 80
81 81 def get(self, key):
82 82 baseitem = super(itemregister, self).get(key)
83 83 if baseitem is not None and not baseitem.generic:
84 84 return baseitem
85 85
86 86 # search for a matching generic item
87 87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 88 for item in generics:
89 89 # we use 'match' instead of 'search' to make the matching simpler
90 90 # for people unfamiliar with regular expression. Having the match
91 91 # rooted to the start of the string will produce less surprising
92 92 # result for user writing simple regex for sub-attribute.
93 93 #
94 94 # For example using "color\..*" match produces an unsurprising
95 95 # result, while using search could suddenly match apparently
96 96 # unrelated configuration that happens to contains "color."
97 97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 98 # some match to avoid the need to prefix most pattern with "^".
99 99 # The "^" seems more error prone.
100 100 if item._re.match(key):
101 101 return item
102 102
103 103 return None
104 104
105 105
106 106 coreitems = {}
107 107
108 108
109 109 def _register(configtable, *args, **kwargs):
110 110 item = configitem(*args, **kwargs)
111 111 section = configtable.setdefault(item.section, itemregister())
112 112 if item.name in section:
113 113 msg = b"duplicated config item registration for '%s.%s'"
114 114 raise error.ProgrammingError(msg % (item.section, item.name))
115 115 section[item.name] = item
116 116
117 117
118 118 # special value for case where the default is derived from other values
119 119 dynamicdefault = object()
120 120
121 121 # Registering actual config items
122 122
123 123
124 124 def getitemregister(configtable):
125 125 f = functools.partial(_register, configtable)
126 126 # export pseudo enum as configitem.*
127 127 f.dynamicdefault = dynamicdefault
128 128 return f
129 129
130 130
131 131 coreconfigitem = getitemregister(coreitems)
132 132
133 133
134 134 def _registerdiffopts(section, configprefix=b''):
135 135 coreconfigitem(
136 136 section, configprefix + b'nodates', default=False,
137 137 )
138 138 coreconfigitem(
139 139 section, configprefix + b'showfunc', default=False,
140 140 )
141 141 coreconfigitem(
142 142 section, configprefix + b'unified', default=None,
143 143 )
144 144 coreconfigitem(
145 145 section, configprefix + b'git', default=False,
146 146 )
147 147 coreconfigitem(
148 148 section, configprefix + b'ignorews', default=False,
149 149 )
150 150 coreconfigitem(
151 151 section, configprefix + b'ignorewsamount', default=False,
152 152 )
153 153 coreconfigitem(
154 154 section, configprefix + b'ignoreblanklines', default=False,
155 155 )
156 156 coreconfigitem(
157 157 section, configprefix + b'ignorewseol', default=False,
158 158 )
159 159 coreconfigitem(
160 160 section, configprefix + b'nobinary', default=False,
161 161 )
162 162 coreconfigitem(
163 163 section, configprefix + b'noprefix', default=False,
164 164 )
165 165 coreconfigitem(
166 166 section, configprefix + b'word-diff', default=False,
167 167 )
168 168
169 169
170 170 coreconfigitem(
171 171 b'alias', b'.*', default=dynamicdefault, generic=True,
172 172 )
173 173 coreconfigitem(
174 174 b'auth', b'cookiefile', default=None,
175 175 )
176 176 _registerdiffopts(section=b'annotate')
177 177 # bookmarks.pushing: internal hack for discovery
178 178 coreconfigitem(
179 179 b'bookmarks', b'pushing', default=list,
180 180 )
181 181 # bundle.mainreporoot: internal hack for bundlerepo
182 182 coreconfigitem(
183 183 b'bundle', b'mainreporoot', default=b'',
184 184 )
185 185 coreconfigitem(
186 186 b'censor', b'policy', default=b'abort', experimental=True,
187 187 )
188 188 coreconfigitem(
189 189 b'chgserver', b'idletimeout', default=3600,
190 190 )
191 191 coreconfigitem(
192 192 b'chgserver', b'skiphash', default=False,
193 193 )
194 194 coreconfigitem(
195 195 b'cmdserver', b'log', default=None,
196 196 )
197 197 coreconfigitem(
198 198 b'cmdserver', b'max-log-files', default=7,
199 199 )
200 200 coreconfigitem(
201 201 b'cmdserver', b'max-log-size', default=b'1 MB',
202 202 )
203 203 coreconfigitem(
204 204 b'cmdserver', b'max-repo-cache', default=0, experimental=True,
205 205 )
206 206 coreconfigitem(
207 207 b'cmdserver', b'message-encodings', default=list, experimental=True,
208 208 )
209 209 coreconfigitem(
210 210 b'cmdserver',
211 211 b'track-log',
212 212 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
213 213 )
214 214 coreconfigitem(
215 215 b'color', b'.*', default=None, generic=True,
216 216 )
217 217 coreconfigitem(
218 218 b'color', b'mode', default=b'auto',
219 219 )
220 220 coreconfigitem(
221 221 b'color', b'pagermode', default=dynamicdefault,
222 222 )
223 223 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
224 224 coreconfigitem(
225 225 b'commands', b'commit.post-status', default=False,
226 226 )
227 227 coreconfigitem(
228 228 b'commands', b'grep.all-files', default=False, experimental=True,
229 229 )
230 230 coreconfigitem(
231 231 b'commands', b'merge.require-rev', default=False,
232 232 )
233 233 coreconfigitem(
234 234 b'commands', b'push.require-revs', default=False,
235 235 )
236 236 coreconfigitem(
237 237 b'commands', b'resolve.confirm', default=False,
238 238 )
239 239 coreconfigitem(
240 240 b'commands', b'resolve.explicit-re-merge', default=False,
241 241 )
242 242 coreconfigitem(
243 243 b'commands', b'resolve.mark-check', default=b'none',
244 244 )
245 245 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
246 246 coreconfigitem(
247 247 b'commands', b'show.aliasprefix', default=list,
248 248 )
249 249 coreconfigitem(
250 250 b'commands', b'status.relative', default=False,
251 251 )
252 252 coreconfigitem(
253 253 b'commands', b'status.skipstates', default=[], experimental=True,
254 254 )
255 255 coreconfigitem(
256 256 b'commands', b'status.terse', default=b'',
257 257 )
258 258 coreconfigitem(
259 259 b'commands', b'status.verbose', default=False,
260 260 )
261 261 coreconfigitem(
262 262 b'commands', b'update.check', default=None,
263 263 )
264 264 coreconfigitem(
265 265 b'commands', b'update.requiredest', default=False,
266 266 )
267 267 coreconfigitem(
268 268 b'committemplate', b'.*', default=None, generic=True,
269 269 )
270 270 coreconfigitem(
271 271 b'convert', b'bzr.saverev', default=True,
272 272 )
273 273 coreconfigitem(
274 274 b'convert', b'cvsps.cache', default=True,
275 275 )
276 276 coreconfigitem(
277 277 b'convert', b'cvsps.fuzz', default=60,
278 278 )
279 279 coreconfigitem(
280 280 b'convert', b'cvsps.logencoding', default=None,
281 281 )
282 282 coreconfigitem(
283 283 b'convert', b'cvsps.mergefrom', default=None,
284 284 )
285 285 coreconfigitem(
286 286 b'convert', b'cvsps.mergeto', default=None,
287 287 )
288 288 coreconfigitem(
289 289 b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'],
290 290 )
291 291 coreconfigitem(
292 292 b'convert', b'git.extrakeys', default=list,
293 293 )
294 294 coreconfigitem(
295 295 b'convert', b'git.findcopiesharder', default=False,
296 296 )
297 297 coreconfigitem(
298 298 b'convert', b'git.remoteprefix', default=b'remote',
299 299 )
300 300 coreconfigitem(
301 301 b'convert', b'git.renamelimit', default=400,
302 302 )
303 303 coreconfigitem(
304 304 b'convert', b'git.saverev', default=True,
305 305 )
306 306 coreconfigitem(
307 307 b'convert', b'git.similarity', default=50,
308 308 )
309 309 coreconfigitem(
310 310 b'convert', b'git.skipsubmodules', default=False,
311 311 )
312 312 coreconfigitem(
313 313 b'convert', b'hg.clonebranches', default=False,
314 314 )
315 315 coreconfigitem(
316 316 b'convert', b'hg.ignoreerrors', default=False,
317 317 )
318 318 coreconfigitem(
319 319 b'convert', b'hg.preserve-hash', default=False,
320 320 )
321 321 coreconfigitem(
322 322 b'convert', b'hg.revs', default=None,
323 323 )
324 324 coreconfigitem(
325 325 b'convert', b'hg.saverev', default=False,
326 326 )
327 327 coreconfigitem(
328 328 b'convert', b'hg.sourcename', default=None,
329 329 )
330 330 coreconfigitem(
331 331 b'convert', b'hg.startrev', default=None,
332 332 )
333 333 coreconfigitem(
334 334 b'convert', b'hg.tagsbranch', default=b'default',
335 335 )
336 336 coreconfigitem(
337 337 b'convert', b'hg.usebranchnames', default=True,
338 338 )
339 339 coreconfigitem(
340 340 b'convert', b'ignoreancestorcheck', default=False, experimental=True,
341 341 )
342 342 coreconfigitem(
343 343 b'convert', b'localtimezone', default=False,
344 344 )
345 345 coreconfigitem(
346 346 b'convert', b'p4.encoding', default=dynamicdefault,
347 347 )
348 348 coreconfigitem(
349 349 b'convert', b'p4.startrev', default=0,
350 350 )
351 351 coreconfigitem(
352 352 b'convert', b'skiptags', default=False,
353 353 )
354 354 coreconfigitem(
355 355 b'convert', b'svn.debugsvnlog', default=True,
356 356 )
357 357 coreconfigitem(
358 358 b'convert', b'svn.trunk', default=None,
359 359 )
360 360 coreconfigitem(
361 361 b'convert', b'svn.tags', default=None,
362 362 )
363 363 coreconfigitem(
364 364 b'convert', b'svn.branches', default=None,
365 365 )
366 366 coreconfigitem(
367 367 b'convert', b'svn.startrev', default=0,
368 368 )
369 369 coreconfigitem(
370 370 b'debug', b'dirstate.delaywrite', default=0,
371 371 )
372 372 coreconfigitem(
373 373 b'defaults', b'.*', default=None, generic=True,
374 374 )
375 375 coreconfigitem(
376 376 b'devel', b'all-warnings', default=False,
377 377 )
378 378 coreconfigitem(
379 379 b'devel', b'bundle2.debug', default=False,
380 380 )
381 381 coreconfigitem(
382 382 b'devel', b'bundle.delta', default=b'',
383 383 )
384 384 coreconfigitem(
385 385 b'devel', b'cache-vfs', default=None,
386 386 )
387 387 coreconfigitem(
388 388 b'devel', b'check-locks', default=False,
389 389 )
390 390 coreconfigitem(
391 391 b'devel', b'check-relroot', default=False,
392 392 )
393 393 coreconfigitem(
394 394 b'devel', b'default-date', default=None,
395 395 )
396 396 coreconfigitem(
397 397 b'devel', b'deprec-warn', default=False,
398 398 )
399 399 coreconfigitem(
400 400 b'devel', b'disableloaddefaultcerts', default=False,
401 401 )
402 402 coreconfigitem(
403 403 b'devel', b'warn-empty-changegroup', default=False,
404 404 )
405 405 coreconfigitem(
406 406 b'devel', b'legacy.exchange', default=list,
407 407 )
408 408 coreconfigitem(
409 409 b'devel', b'persistent-nodemap', default=False,
410 410 )
411 411 coreconfigitem(
412 412 b'devel', b'servercafile', default=b'',
413 413 )
414 414 coreconfigitem(
415 415 b'devel', b'serverexactprotocol', default=b'',
416 416 )
417 417 coreconfigitem(
418 418 b'devel', b'serverrequirecert', default=False,
419 419 )
420 420 coreconfigitem(
421 421 b'devel', b'strip-obsmarkers', default=True,
422 422 )
423 423 coreconfigitem(
424 424 b'devel', b'warn-config', default=None,
425 425 )
426 426 coreconfigitem(
427 427 b'devel', b'warn-config-default', default=None,
428 428 )
429 429 coreconfigitem(
430 430 b'devel', b'user.obsmarker', default=None,
431 431 )
432 432 coreconfigitem(
433 433 b'devel', b'warn-config-unknown', default=None,
434 434 )
435 435 coreconfigitem(
436 436 b'devel', b'debug.copies', default=False,
437 437 )
438 438 coreconfigitem(
439 439 b'devel', b'debug.extensions', default=False,
440 440 )
441 441 coreconfigitem(
442 442 b'devel', b'debug.repo-filters', default=False,
443 443 )
444 444 coreconfigitem(
445 445 b'devel', b'debug.peer-request', default=False,
446 446 )
447 447 coreconfigitem(
448 448 b'devel', b'discovery.randomize', default=True,
449 449 )
450 450 _registerdiffopts(section=b'diff')
451 451 coreconfigitem(
452 452 b'email', b'bcc', default=None,
453 453 )
454 454 coreconfigitem(
455 455 b'email', b'cc', default=None,
456 456 )
457 457 coreconfigitem(
458 458 b'email', b'charsets', default=list,
459 459 )
460 460 coreconfigitem(
461 461 b'email', b'from', default=None,
462 462 )
463 463 coreconfigitem(
464 464 b'email', b'method', default=b'smtp',
465 465 )
466 466 coreconfigitem(
467 467 b'email', b'reply-to', default=None,
468 468 )
469 469 coreconfigitem(
470 470 b'email', b'to', default=None,
471 471 )
472 472 coreconfigitem(
473 473 b'experimental', b'archivemetatemplate', default=dynamicdefault,
474 474 )
475 475 coreconfigitem(
476 476 b'experimental', b'auto-publish', default=b'publish',
477 477 )
478 478 coreconfigitem(
479 479 b'experimental', b'bundle-phases', default=False,
480 480 )
481 481 coreconfigitem(
482 482 b'experimental', b'bundle2-advertise', default=True,
483 483 )
484 484 coreconfigitem(
485 485 b'experimental', b'bundle2-output-capture', default=False,
486 486 )
487 487 coreconfigitem(
488 488 b'experimental', b'bundle2.pushback', default=False,
489 489 )
490 490 coreconfigitem(
491 491 b'experimental', b'bundle2lazylocking', default=False,
492 492 )
493 493 coreconfigitem(
494 494 b'experimental', b'bundlecomplevel', default=None,
495 495 )
496 496 coreconfigitem(
497 497 b'experimental', b'bundlecomplevel.bzip2', default=None,
498 498 )
499 499 coreconfigitem(
500 500 b'experimental', b'bundlecomplevel.gzip', default=None,
501 501 )
502 502 coreconfigitem(
503 503 b'experimental', b'bundlecomplevel.none', default=None,
504 504 )
505 505 coreconfigitem(
506 506 b'experimental', b'bundlecomplevel.zstd', default=None,
507 507 )
508 508 coreconfigitem(
509 509 b'experimental', b'changegroup3', default=False,
510 510 )
511 511 coreconfigitem(
512 512 b'experimental', b'cleanup-as-archived', default=False,
513 513 )
514 514 coreconfigitem(
515 515 b'experimental', b'clientcompressionengines', default=list,
516 516 )
517 517 coreconfigitem(
518 518 b'experimental', b'copytrace', default=b'on',
519 519 )
520 520 coreconfigitem(
521 521 b'experimental', b'copytrace.movecandidateslimit', default=100,
522 522 )
523 523 coreconfigitem(
524 524 b'experimental', b'copytrace.sourcecommitlimit', default=100,
525 525 )
526 526 coreconfigitem(
527 527 b'experimental', b'copies.read-from', default=b"filelog-only",
528 528 )
529 529 coreconfigitem(
530 530 b'experimental', b'copies.write-to', default=b'filelog-only',
531 531 )
532 532 coreconfigitem(
533 533 b'experimental', b'crecordtest', default=None,
534 534 )
535 535 coreconfigitem(
536 536 b'experimental', b'directaccess', default=False,
537 537 )
538 538 coreconfigitem(
539 539 b'experimental', b'directaccess.revnums', default=False,
540 540 )
541 541 coreconfigitem(
542 542 b'experimental', b'editortmpinhg', default=False,
543 543 )
544 544 coreconfigitem(
545 545 b'experimental', b'evolution', default=list,
546 546 )
547 547 coreconfigitem(
548 548 b'experimental',
549 549 b'evolution.allowdivergence',
550 550 default=False,
551 551 alias=[(b'experimental', b'allowdivergence')],
552 552 )
553 553 coreconfigitem(
554 554 b'experimental', b'evolution.allowunstable', default=None,
555 555 )
556 556 coreconfigitem(
557 557 b'experimental', b'evolution.createmarkers', default=None,
558 558 )
559 559 coreconfigitem(
560 560 b'experimental',
561 561 b'evolution.effect-flags',
562 562 default=True,
563 563 alias=[(b'experimental', b'effect-flags')],
564 564 )
565 565 coreconfigitem(
566 566 b'experimental', b'evolution.exchange', default=None,
567 567 )
568 568 coreconfigitem(
569 569 b'experimental', b'evolution.bundle-obsmarker', default=False,
570 570 )
571 571 coreconfigitem(
572 572 b'experimental', b'log.topo', default=False,
573 573 )
574 574 coreconfigitem(
575 575 b'experimental', b'evolution.report-instabilities', default=True,
576 576 )
577 577 coreconfigitem(
578 578 b'experimental', b'evolution.track-operation', default=True,
579 579 )
580 580 # repo-level config to exclude a revset visibility
581 581 #
582 582 # The target use case is to use `share` to expose different subset of the same
583 583 # repository, especially server side. See also `server.view`.
584 584 coreconfigitem(
585 585 b'experimental', b'extra-filter-revs', default=None,
586 586 )
587 587 coreconfigitem(
588 588 b'experimental', b'maxdeltachainspan', default=-1,
589 589 )
590 590 coreconfigitem(
591 591 b'experimental', b'mergetempdirprefix', default=None,
592 592 )
593 593 coreconfigitem(
594 594 b'experimental', b'mmapindexthreshold', default=None,
595 595 )
596 596 coreconfigitem(
597 597 b'experimental', b'narrow', default=False,
598 598 )
599 599 coreconfigitem(
600 600 b'experimental', b'nonnormalparanoidcheck', default=False,
601 601 )
602 602 coreconfigitem(
603 603 b'experimental', b'exportableenviron', default=list,
604 604 )
605 605 coreconfigitem(
606 606 b'experimental', b'extendedheader.index', default=None,
607 607 )
608 608 coreconfigitem(
609 609 b'experimental', b'extendedheader.similarity', default=False,
610 610 )
611 611 coreconfigitem(
612 612 b'experimental', b'graphshorten', default=False,
613 613 )
614 614 coreconfigitem(
615 615 b'experimental', b'graphstyle.parent', default=dynamicdefault,
616 616 )
617 617 coreconfigitem(
618 618 b'experimental', b'graphstyle.missing', default=dynamicdefault,
619 619 )
620 620 coreconfigitem(
621 621 b'experimental', b'graphstyle.grandparent', default=dynamicdefault,
622 622 )
623 623 coreconfigitem(
624 624 b'experimental', b'hook-track-tags', default=False,
625 625 )
626 626 coreconfigitem(
627 627 b'experimental', b'httppeer.advertise-v2', default=False,
628 628 )
629 629 coreconfigitem(
630 630 b'experimental', b'httppeer.v2-encoder-order', default=None,
631 631 )
632 632 coreconfigitem(
633 633 b'experimental', b'httppostargs', default=False,
634 634 )
635 635 coreconfigitem(
636 636 b'experimental', b'mergedriver', default=None,
637 637 )
638 638 coreconfigitem(b'experimental', b'nointerrupt', default=False)
639 639 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
640 640
641 641 coreconfigitem(
642 642 b'experimental', b'obsmarkers-exchange-debug', default=False,
643 643 )
644 644 coreconfigitem(
645 645 b'experimental', b'remotenames', default=False,
646 646 )
647 647 coreconfigitem(
648 648 b'experimental', b'removeemptydirs', default=True,
649 649 )
650 650 coreconfigitem(
651 651 b'experimental', b'revert.interactive.select-to-keep', default=False,
652 652 )
653 653 coreconfigitem(
654 654 b'experimental', b'revisions.prefixhexnode', default=False,
655 655 )
656 656 coreconfigitem(
657 657 b'experimental', b'revlogv2', default=None,
658 658 )
659 659 coreconfigitem(
660 660 b'experimental', b'revisions.disambiguatewithin', default=None,
661 661 )
662 662 coreconfigitem(
663 663 b'experimental', b'rust.index', default=False,
664 664 )
665 665 coreconfigitem(
666 666 b'experimental', b'exp-persistent-nodemap', default=False,
667 667 )
668 668 coreconfigitem(
669 669 b'experimental', b'exp-persistent-nodemap.mmap', default=True,
670 670 )
671 671 coreconfigitem(
672 672 b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
673 673 )
674 674 coreconfigitem(
675 675 b'experimental',
676 676 b'server.manifestdata.recommended-batch-size',
677 677 default=100000,
678 678 )
679 679 coreconfigitem(
680 680 b'experimental', b'server.stream-narrow-clones', default=False,
681 681 )
682 682 coreconfigitem(
683 683 b'experimental', b'single-head-per-branch', default=False,
684 684 )
685 685 coreconfigitem(
686 686 b'experimental',
687 687 b'single-head-per-branch:account-closed-heads',
688 688 default=False,
689 689 )
690 690 coreconfigitem(
691 691 b'experimental', b'sshserver.support-v2', default=False,
692 692 )
693 693 coreconfigitem(
694 694 b'experimental', b'sparse-read', default=False,
695 695 )
696 696 coreconfigitem(
697 697 b'experimental', b'sparse-read.density-threshold', default=0.50,
698 698 )
699 699 coreconfigitem(
700 700 b'experimental', b'sparse-read.min-gap-size', default=b'65K',
701 701 )
702 702 coreconfigitem(
703 703 b'experimental', b'treemanifest', default=False,
704 704 )
705 705 coreconfigitem(
706 706 b'experimental', b'update.atomic-file', default=False,
707 707 )
708 708 coreconfigitem(
709 709 b'experimental', b'sshpeer.advertise-v2', default=False,
710 710 )
711 711 coreconfigitem(
712 712 b'experimental', b'web.apiserver', default=False,
713 713 )
714 714 coreconfigitem(
715 715 b'experimental', b'web.api.http-v2', default=False,
716 716 )
717 717 coreconfigitem(
718 718 b'experimental', b'web.api.debugreflect', default=False,
719 719 )
720 720 coreconfigitem(
721 721 b'experimental', b'worker.wdir-get-thread-safe', default=False,
722 722 )
723 723 coreconfigitem(
724 724 b'experimental', b'worker.repository-upgrade', default=False,
725 725 )
726 726 coreconfigitem(
727 727 b'experimental', b'xdiff', default=False,
728 728 )
729 729 coreconfigitem(
730 730 b'extensions', b'.*', default=None, generic=True,
731 731 )
732 732 coreconfigitem(
733 733 b'extdata', b'.*', default=None, generic=True,
734 734 )
735 735 coreconfigitem(
736 736 b'format', b'bookmarks-in-store', default=False,
737 737 )
738 738 coreconfigitem(
739 739 b'format', b'chunkcachesize', default=None, experimental=True,
740 740 )
741 741 coreconfigitem(
742 742 b'format', b'dotencode', default=True,
743 743 )
744 744 coreconfigitem(
745 745 b'format', b'generaldelta', default=False, experimental=True,
746 746 )
747 747 coreconfigitem(
748 748 b'format', b'manifestcachesize', default=None, experimental=True,
749 749 )
750 750 coreconfigitem(
751 751 b'format', b'maxchainlen', default=dynamicdefault, experimental=True,
752 752 )
753 753 coreconfigitem(
754 754 b'format', b'obsstore-version', default=None,
755 755 )
756 756 coreconfigitem(
757 757 b'format', b'sparse-revlog', default=True,
758 758 )
759 759 coreconfigitem(
760 760 b'format',
761 761 b'revlog-compression',
762 default=b'zlib',
762 default=lambda: [b'zlib'],
763 763 alias=[(b'experimental', b'format.compression')],
764 764 )
765 765 coreconfigitem(
766 766 b'format', b'usefncache', default=True,
767 767 )
768 768 coreconfigitem(
769 769 b'format', b'usegeneraldelta', default=True,
770 770 )
771 771 coreconfigitem(
772 772 b'format', b'usestore', default=True,
773 773 )
774 774 coreconfigitem(
775 775 b'format',
776 776 b'exp-use-copies-side-data-changeset',
777 777 default=False,
778 778 experimental=True,
779 779 )
780 780 coreconfigitem(
781 781 b'format', b'exp-use-side-data', default=False, experimental=True,
782 782 )
783 783 coreconfigitem(
784 784 b'format', b'internal-phase', default=False, experimental=True,
785 785 )
786 786 coreconfigitem(
787 787 b'fsmonitor', b'warn_when_unused', default=True,
788 788 )
789 789 coreconfigitem(
790 790 b'fsmonitor', b'warn_update_file_count', default=50000,
791 791 )
792 792 coreconfigitem(
793 793 b'help', br'hidden-command\..*', default=False, generic=True,
794 794 )
795 795 coreconfigitem(
796 796 b'help', br'hidden-topic\..*', default=False, generic=True,
797 797 )
798 798 coreconfigitem(
799 799 b'hooks', b'.*', default=dynamicdefault, generic=True,
800 800 )
801 801 coreconfigitem(
802 802 b'hgweb-paths', b'.*', default=list, generic=True,
803 803 )
804 804 coreconfigitem(
805 805 b'hostfingerprints', b'.*', default=list, generic=True,
806 806 )
807 807 coreconfigitem(
808 808 b'hostsecurity', b'ciphers', default=None,
809 809 )
810 810 coreconfigitem(
811 811 b'hostsecurity', b'disabletls10warning', default=False,
812 812 )
813 813 coreconfigitem(
814 814 b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
815 815 )
816 816 coreconfigitem(
817 817 b'hostsecurity',
818 818 b'.*:minimumprotocol$',
819 819 default=dynamicdefault,
820 820 generic=True,
821 821 )
822 822 coreconfigitem(
823 823 b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True,
824 824 )
825 825 coreconfigitem(
826 826 b'hostsecurity', b'.*:fingerprints$', default=list, generic=True,
827 827 )
828 828 coreconfigitem(
829 829 b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True,
830 830 )
831 831
832 832 coreconfigitem(
833 833 b'http_proxy', b'always', default=False,
834 834 )
835 835 coreconfigitem(
836 836 b'http_proxy', b'host', default=None,
837 837 )
838 838 coreconfigitem(
839 839 b'http_proxy', b'no', default=list,
840 840 )
841 841 coreconfigitem(
842 842 b'http_proxy', b'passwd', default=None,
843 843 )
844 844 coreconfigitem(
845 845 b'http_proxy', b'user', default=None,
846 846 )
847 847
848 848 coreconfigitem(
849 849 b'http', b'timeout', default=None,
850 850 )
851 851
852 852 coreconfigitem(
853 853 b'logtoprocess', b'commandexception', default=None,
854 854 )
855 855 coreconfigitem(
856 856 b'logtoprocess', b'commandfinish', default=None,
857 857 )
858 858 coreconfigitem(
859 859 b'logtoprocess', b'command', default=None,
860 860 )
861 861 coreconfigitem(
862 862 b'logtoprocess', b'develwarn', default=None,
863 863 )
864 864 coreconfigitem(
865 865 b'logtoprocess', b'uiblocked', default=None,
866 866 )
867 867 coreconfigitem(
868 868 b'merge', b'checkunknown', default=b'abort',
869 869 )
870 870 coreconfigitem(
871 871 b'merge', b'checkignored', default=b'abort',
872 872 )
873 873 coreconfigitem(
874 874 b'experimental', b'merge.checkpathconflicts', default=False,
875 875 )
876 876 coreconfigitem(
877 877 b'merge', b'followcopies', default=True,
878 878 )
879 879 coreconfigitem(
880 880 b'merge', b'on-failure', default=b'continue',
881 881 )
882 882 coreconfigitem(
883 883 b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True,
884 884 )
885 885 coreconfigitem(
886 886 b'merge', b'strict-capability-check', default=False,
887 887 )
888 888 coreconfigitem(
889 889 b'merge-tools', b'.*', default=None, generic=True,
890 890 )
891 891 coreconfigitem(
892 892 b'merge-tools',
893 893 br'.*\.args$',
894 894 default=b"$local $base $other",
895 895 generic=True,
896 896 priority=-1,
897 897 )
898 898 coreconfigitem(
899 899 b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1,
900 900 )
901 901 coreconfigitem(
902 902 b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1,
903 903 )
904 904 coreconfigitem(
905 905 b'merge-tools',
906 906 br'.*\.checkchanged$',
907 907 default=False,
908 908 generic=True,
909 909 priority=-1,
910 910 )
911 911 coreconfigitem(
912 912 b'merge-tools',
913 913 br'.*\.executable$',
914 914 default=dynamicdefault,
915 915 generic=True,
916 916 priority=-1,
917 917 )
918 918 coreconfigitem(
919 919 b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1,
920 920 )
921 921 coreconfigitem(
922 922 b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1,
923 923 )
924 924 coreconfigitem(
925 925 b'merge-tools',
926 926 br'.*\.mergemarkers$',
927 927 default=b'basic',
928 928 generic=True,
929 929 priority=-1,
930 930 )
931 931 coreconfigitem(
932 932 b'merge-tools',
933 933 br'.*\.mergemarkertemplate$',
934 934 default=dynamicdefault, # take from ui.mergemarkertemplate
935 935 generic=True,
936 936 priority=-1,
937 937 )
938 938 coreconfigitem(
939 939 b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1,
940 940 )
941 941 coreconfigitem(
942 942 b'merge-tools',
943 943 br'.*\.premerge$',
944 944 default=dynamicdefault,
945 945 generic=True,
946 946 priority=-1,
947 947 )
948 948 coreconfigitem(
949 949 b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1,
950 950 )
951 951 coreconfigitem(
952 952 b'pager', b'attend-.*', default=dynamicdefault, generic=True,
953 953 )
954 954 coreconfigitem(
955 955 b'pager', b'ignore', default=list,
956 956 )
957 957 coreconfigitem(
958 958 b'pager', b'pager', default=dynamicdefault,
959 959 )
960 960 coreconfigitem(
961 961 b'patch', b'eol', default=b'strict',
962 962 )
963 963 coreconfigitem(
964 964 b'patch', b'fuzz', default=2,
965 965 )
966 966 coreconfigitem(
967 967 b'paths', b'default', default=None,
968 968 )
969 969 coreconfigitem(
970 970 b'paths', b'default-push', default=None,
971 971 )
972 972 coreconfigitem(
973 973 b'paths', b'.*', default=None, generic=True,
974 974 )
975 975 coreconfigitem(
976 976 b'phases', b'checksubrepos', default=b'follow',
977 977 )
978 978 coreconfigitem(
979 979 b'phases', b'new-commit', default=b'draft',
980 980 )
981 981 coreconfigitem(
982 982 b'phases', b'publish', default=True,
983 983 )
984 984 coreconfigitem(
985 985 b'profiling', b'enabled', default=False,
986 986 )
987 987 coreconfigitem(
988 988 b'profiling', b'format', default=b'text',
989 989 )
990 990 coreconfigitem(
991 991 b'profiling', b'freq', default=1000,
992 992 )
993 993 coreconfigitem(
994 994 b'profiling', b'limit', default=30,
995 995 )
996 996 coreconfigitem(
997 997 b'profiling', b'nested', default=0,
998 998 )
999 999 coreconfigitem(
1000 1000 b'profiling', b'output', default=None,
1001 1001 )
1002 1002 coreconfigitem(
1003 1003 b'profiling', b'showmax', default=0.999,
1004 1004 )
1005 1005 coreconfigitem(
1006 1006 b'profiling', b'showmin', default=dynamicdefault,
1007 1007 )
1008 1008 coreconfigitem(
1009 1009 b'profiling', b'showtime', default=True,
1010 1010 )
1011 1011 coreconfigitem(
1012 1012 b'profiling', b'sort', default=b'inlinetime',
1013 1013 )
1014 1014 coreconfigitem(
1015 1015 b'profiling', b'statformat', default=b'hotpath',
1016 1016 )
1017 1017 coreconfigitem(
1018 1018 b'profiling', b'time-track', default=dynamicdefault,
1019 1019 )
1020 1020 coreconfigitem(
1021 1021 b'profiling', b'type', default=b'stat',
1022 1022 )
1023 1023 coreconfigitem(
1024 1024 b'progress', b'assume-tty', default=False,
1025 1025 )
1026 1026 coreconfigitem(
1027 1027 b'progress', b'changedelay', default=1,
1028 1028 )
1029 1029 coreconfigitem(
1030 1030 b'progress', b'clear-complete', default=True,
1031 1031 )
1032 1032 coreconfigitem(
1033 1033 b'progress', b'debug', default=False,
1034 1034 )
1035 1035 coreconfigitem(
1036 1036 b'progress', b'delay', default=3,
1037 1037 )
1038 1038 coreconfigitem(
1039 1039 b'progress', b'disable', default=False,
1040 1040 )
1041 1041 coreconfigitem(
1042 1042 b'progress', b'estimateinterval', default=60.0,
1043 1043 )
1044 1044 coreconfigitem(
1045 1045 b'progress',
1046 1046 b'format',
1047 1047 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1048 1048 )
1049 1049 coreconfigitem(
1050 1050 b'progress', b'refresh', default=0.1,
1051 1051 )
1052 1052 coreconfigitem(
1053 1053 b'progress', b'width', default=dynamicdefault,
1054 1054 )
1055 1055 coreconfigitem(
1056 1056 b'push', b'pushvars.server', default=False,
1057 1057 )
1058 1058 coreconfigitem(
1059 1059 b'rewrite',
1060 1060 b'backup-bundle',
1061 1061 default=True,
1062 1062 alias=[(b'ui', b'history-editing-backup')],
1063 1063 )
1064 1064 coreconfigitem(
1065 1065 b'rewrite', b'update-timestamp', default=False,
1066 1066 )
1067 1067 coreconfigitem(
1068 1068 b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
1069 1069 )
1070 1070 coreconfigitem(
1071 1071 b'storage',
1072 1072 b'revlog.optimize-delta-parent-choice',
1073 1073 default=True,
1074 1074 alias=[(b'format', b'aggressivemergedeltas')],
1075 1075 )
1076 1076 coreconfigitem(
1077 1077 b'storage', b'revlog.reuse-external-delta', default=True,
1078 1078 )
1079 1079 coreconfigitem(
1080 1080 b'storage', b'revlog.reuse-external-delta-parent', default=None,
1081 1081 )
1082 1082 coreconfigitem(
1083 1083 b'storage', b'revlog.zlib.level', default=None,
1084 1084 )
1085 1085 coreconfigitem(
1086 1086 b'storage', b'revlog.zstd.level', default=None,
1087 1087 )
1088 1088 coreconfigitem(
1089 1089 b'server', b'bookmarks-pushkey-compat', default=True,
1090 1090 )
1091 1091 coreconfigitem(
1092 1092 b'server', b'bundle1', default=True,
1093 1093 )
1094 1094 coreconfigitem(
1095 1095 b'server', b'bundle1gd', default=None,
1096 1096 )
1097 1097 coreconfigitem(
1098 1098 b'server', b'bundle1.pull', default=None,
1099 1099 )
1100 1100 coreconfigitem(
1101 1101 b'server', b'bundle1gd.pull', default=None,
1102 1102 )
1103 1103 coreconfigitem(
1104 1104 b'server', b'bundle1.push', default=None,
1105 1105 )
1106 1106 coreconfigitem(
1107 1107 b'server', b'bundle1gd.push', default=None,
1108 1108 )
1109 1109 coreconfigitem(
1110 1110 b'server',
1111 1111 b'bundle2.stream',
1112 1112 default=True,
1113 1113 alias=[(b'experimental', b'bundle2.stream')],
1114 1114 )
1115 1115 coreconfigitem(
1116 1116 b'server', b'compressionengines', default=list,
1117 1117 )
1118 1118 coreconfigitem(
1119 1119 b'server', b'concurrent-push-mode', default=b'strict',
1120 1120 )
1121 1121 coreconfigitem(
1122 1122 b'server', b'disablefullbundle', default=False,
1123 1123 )
1124 1124 coreconfigitem(
1125 1125 b'server', b'maxhttpheaderlen', default=1024,
1126 1126 )
1127 1127 coreconfigitem(
1128 1128 b'server', b'pullbundle', default=False,
1129 1129 )
1130 1130 coreconfigitem(
1131 1131 b'server', b'preferuncompressed', default=False,
1132 1132 )
1133 1133 coreconfigitem(
1134 1134 b'server', b'streamunbundle', default=False,
1135 1135 )
1136 1136 coreconfigitem(
1137 1137 b'server', b'uncompressed', default=True,
1138 1138 )
1139 1139 coreconfigitem(
1140 1140 b'server', b'uncompressedallowsecret', default=False,
1141 1141 )
1142 1142 coreconfigitem(
1143 1143 b'server', b'view', default=b'served',
1144 1144 )
1145 1145 coreconfigitem(
1146 1146 b'server', b'validate', default=False,
1147 1147 )
1148 1148 coreconfigitem(
1149 1149 b'server', b'zliblevel', default=-1,
1150 1150 )
1151 1151 coreconfigitem(
1152 1152 b'server', b'zstdlevel', default=3,
1153 1153 )
1154 1154 coreconfigitem(
1155 1155 b'share', b'pool', default=None,
1156 1156 )
1157 1157 coreconfigitem(
1158 1158 b'share', b'poolnaming', default=b'identity',
1159 1159 )
1160 1160 coreconfigitem(
1161 1161 b'shelve', b'maxbackups', default=10,
1162 1162 )
1163 1163 coreconfigitem(
1164 1164 b'smtp', b'host', default=None,
1165 1165 )
1166 1166 coreconfigitem(
1167 1167 b'smtp', b'local_hostname', default=None,
1168 1168 )
1169 1169 coreconfigitem(
1170 1170 b'smtp', b'password', default=None,
1171 1171 )
1172 1172 coreconfigitem(
1173 1173 b'smtp', b'port', default=dynamicdefault,
1174 1174 )
1175 1175 coreconfigitem(
1176 1176 b'smtp', b'tls', default=b'none',
1177 1177 )
1178 1178 coreconfigitem(
1179 1179 b'smtp', b'username', default=None,
1180 1180 )
1181 1181 coreconfigitem(
1182 1182 b'sparse', b'missingwarning', default=True, experimental=True,
1183 1183 )
1184 1184 coreconfigitem(
1185 1185 b'subrepos',
1186 1186 b'allowed',
1187 1187 default=dynamicdefault, # to make backporting simpler
1188 1188 )
1189 1189 coreconfigitem(
1190 1190 b'subrepos', b'hg:allowed', default=dynamicdefault,
1191 1191 )
1192 1192 coreconfigitem(
1193 1193 b'subrepos', b'git:allowed', default=dynamicdefault,
1194 1194 )
1195 1195 coreconfigitem(
1196 1196 b'subrepos', b'svn:allowed', default=dynamicdefault,
1197 1197 )
1198 1198 coreconfigitem(
1199 1199 b'templates', b'.*', default=None, generic=True,
1200 1200 )
1201 1201 coreconfigitem(
1202 1202 b'templateconfig', b'.*', default=dynamicdefault, generic=True,
1203 1203 )
1204 1204 coreconfigitem(
1205 1205 b'trusted', b'groups', default=list,
1206 1206 )
1207 1207 coreconfigitem(
1208 1208 b'trusted', b'users', default=list,
1209 1209 )
1210 1210 coreconfigitem(
1211 1211 b'ui', b'_usedassubrepo', default=False,
1212 1212 )
1213 1213 coreconfigitem(
1214 1214 b'ui', b'allowemptycommit', default=False,
1215 1215 )
1216 1216 coreconfigitem(
1217 1217 b'ui', b'archivemeta', default=True,
1218 1218 )
1219 1219 coreconfigitem(
1220 1220 b'ui', b'askusername', default=False,
1221 1221 )
1222 1222 coreconfigitem(
1223 1223 b'ui', b'clonebundlefallback', default=False,
1224 1224 )
1225 1225 coreconfigitem(
1226 1226 b'ui', b'clonebundleprefers', default=list,
1227 1227 )
1228 1228 coreconfigitem(
1229 1229 b'ui', b'clonebundles', default=True,
1230 1230 )
1231 1231 coreconfigitem(
1232 1232 b'ui', b'color', default=b'auto',
1233 1233 )
1234 1234 coreconfigitem(
1235 1235 b'ui', b'commitsubrepos', default=False,
1236 1236 )
1237 1237 coreconfigitem(
1238 1238 b'ui', b'debug', default=False,
1239 1239 )
1240 1240 coreconfigitem(
1241 1241 b'ui', b'debugger', default=None,
1242 1242 )
1243 1243 coreconfigitem(
1244 1244 b'ui', b'editor', default=dynamicdefault,
1245 1245 )
1246 1246 coreconfigitem(
1247 1247 b'ui', b'fallbackencoding', default=None,
1248 1248 )
1249 1249 coreconfigitem(
1250 1250 b'ui', b'forcecwd', default=None,
1251 1251 )
1252 1252 coreconfigitem(
1253 1253 b'ui', b'forcemerge', default=None,
1254 1254 )
1255 1255 coreconfigitem(
1256 1256 b'ui', b'formatdebug', default=False,
1257 1257 )
1258 1258 coreconfigitem(
1259 1259 b'ui', b'formatjson', default=False,
1260 1260 )
1261 1261 coreconfigitem(
1262 1262 b'ui', b'formatted', default=None,
1263 1263 )
1264 1264 coreconfigitem(
1265 1265 b'ui', b'graphnodetemplate', default=None,
1266 1266 )
1267 1267 coreconfigitem(
1268 1268 b'ui', b'interactive', default=None,
1269 1269 )
1270 1270 coreconfigitem(
1271 1271 b'ui', b'interface', default=None,
1272 1272 )
1273 1273 coreconfigitem(
1274 1274 b'ui', b'interface.chunkselector', default=None,
1275 1275 )
1276 1276 coreconfigitem(
1277 1277 b'ui', b'large-file-limit', default=10000000,
1278 1278 )
1279 1279 coreconfigitem(
1280 1280 b'ui', b'logblockedtimes', default=False,
1281 1281 )
1282 1282 coreconfigitem(
1283 1283 b'ui', b'logtemplate', default=None,
1284 1284 )
1285 1285 coreconfigitem(
1286 1286 b'ui', b'merge', default=None,
1287 1287 )
1288 1288 coreconfigitem(
1289 1289 b'ui', b'mergemarkers', default=b'basic',
1290 1290 )
1291 1291 coreconfigitem(
1292 1292 b'ui',
1293 1293 b'mergemarkertemplate',
1294 1294 default=(
1295 1295 b'{node|short} '
1296 1296 b'{ifeq(tags, "tip", "", '
1297 1297 b'ifeq(tags, "", "", "{tags} "))}'
1298 1298 b'{if(bookmarks, "{bookmarks} ")}'
1299 1299 b'{ifeq(branch, "default", "", "{branch} ")}'
1300 1300 b'- {author|user}: {desc|firstline}'
1301 1301 ),
1302 1302 )
1303 1303 coreconfigitem(
1304 1304 b'ui', b'message-output', default=b'stdio',
1305 1305 )
1306 1306 coreconfigitem(
1307 1307 b'ui', b'nontty', default=False,
1308 1308 )
1309 1309 coreconfigitem(
1310 1310 b'ui', b'origbackuppath', default=None,
1311 1311 )
1312 1312 coreconfigitem(
1313 1313 b'ui', b'paginate', default=True,
1314 1314 )
1315 1315 coreconfigitem(
1316 1316 b'ui', b'patch', default=None,
1317 1317 )
1318 1318 coreconfigitem(
1319 1319 b'ui', b'pre-merge-tool-output-template', default=None,
1320 1320 )
1321 1321 coreconfigitem(
1322 1322 b'ui', b'portablefilenames', default=b'warn',
1323 1323 )
1324 1324 coreconfigitem(
1325 1325 b'ui', b'promptecho', default=False,
1326 1326 )
1327 1327 coreconfigitem(
1328 1328 b'ui', b'quiet', default=False,
1329 1329 )
1330 1330 coreconfigitem(
1331 1331 b'ui', b'quietbookmarkmove', default=False,
1332 1332 )
1333 1333 coreconfigitem(
1334 1334 b'ui', b'relative-paths', default=b'legacy',
1335 1335 )
1336 1336 coreconfigitem(
1337 1337 b'ui', b'remotecmd', default=b'hg',
1338 1338 )
1339 1339 coreconfigitem(
1340 1340 b'ui', b'report_untrusted', default=True,
1341 1341 )
1342 1342 coreconfigitem(
1343 1343 b'ui', b'rollback', default=True,
1344 1344 )
1345 1345 coreconfigitem(
1346 1346 b'ui', b'signal-safe-lock', default=True,
1347 1347 )
1348 1348 coreconfigitem(
1349 1349 b'ui', b'slash', default=False,
1350 1350 )
1351 1351 coreconfigitem(
1352 1352 b'ui', b'ssh', default=b'ssh',
1353 1353 )
1354 1354 coreconfigitem(
1355 1355 b'ui', b'ssherrorhint', default=None,
1356 1356 )
1357 1357 coreconfigitem(
1358 1358 b'ui', b'statuscopies', default=False,
1359 1359 )
1360 1360 coreconfigitem(
1361 1361 b'ui', b'strict', default=False,
1362 1362 )
1363 1363 coreconfigitem(
1364 1364 b'ui', b'style', default=b'',
1365 1365 )
1366 1366 coreconfigitem(
1367 1367 b'ui', b'supportcontact', default=None,
1368 1368 )
1369 1369 coreconfigitem(
1370 1370 b'ui', b'textwidth', default=78,
1371 1371 )
1372 1372 coreconfigitem(
1373 1373 b'ui', b'timeout', default=b'600',
1374 1374 )
1375 1375 coreconfigitem(
1376 1376 b'ui', b'timeout.warn', default=0,
1377 1377 )
1378 1378 coreconfigitem(
1379 1379 b'ui', b'traceback', default=False,
1380 1380 )
1381 1381 coreconfigitem(
1382 1382 b'ui', b'tweakdefaults', default=False,
1383 1383 )
1384 1384 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
1385 1385 coreconfigitem(
1386 1386 b'ui', b'verbose', default=False,
1387 1387 )
1388 1388 coreconfigitem(
1389 1389 b'verify', b'skipflags', default=None,
1390 1390 )
1391 1391 coreconfigitem(
1392 1392 b'web', b'allowbz2', default=False,
1393 1393 )
1394 1394 coreconfigitem(
1395 1395 b'web', b'allowgz', default=False,
1396 1396 )
1397 1397 coreconfigitem(
1398 1398 b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True,
1399 1399 )
1400 1400 coreconfigitem(
1401 1401 b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list,
1402 1402 )
1403 1403 coreconfigitem(
1404 1404 b'web', b'allowzip', default=False,
1405 1405 )
1406 1406 coreconfigitem(
1407 1407 b'web', b'archivesubrepos', default=False,
1408 1408 )
1409 1409 coreconfigitem(
1410 1410 b'web', b'cache', default=True,
1411 1411 )
1412 1412 coreconfigitem(
1413 1413 b'web', b'comparisoncontext', default=5,
1414 1414 )
1415 1415 coreconfigitem(
1416 1416 b'web', b'contact', default=None,
1417 1417 )
1418 1418 coreconfigitem(
1419 1419 b'web', b'deny_push', default=list,
1420 1420 )
1421 1421 coreconfigitem(
1422 1422 b'web', b'guessmime', default=False,
1423 1423 )
1424 1424 coreconfigitem(
1425 1425 b'web', b'hidden', default=False,
1426 1426 )
1427 1427 coreconfigitem(
1428 1428 b'web', b'labels', default=list,
1429 1429 )
1430 1430 coreconfigitem(
1431 1431 b'web', b'logoimg', default=b'hglogo.png',
1432 1432 )
1433 1433 coreconfigitem(
1434 1434 b'web', b'logourl', default=b'https://mercurial-scm.org/',
1435 1435 )
1436 1436 coreconfigitem(
1437 1437 b'web', b'accesslog', default=b'-',
1438 1438 )
1439 1439 coreconfigitem(
1440 1440 b'web', b'address', default=b'',
1441 1441 )
1442 1442 coreconfigitem(
1443 1443 b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list,
1444 1444 )
1445 1445 coreconfigitem(
1446 1446 b'web', b'allow_read', default=list,
1447 1447 )
1448 1448 coreconfigitem(
1449 1449 b'web', b'baseurl', default=None,
1450 1450 )
1451 1451 coreconfigitem(
1452 1452 b'web', b'cacerts', default=None,
1453 1453 )
1454 1454 coreconfigitem(
1455 1455 b'web', b'certificate', default=None,
1456 1456 )
1457 1457 coreconfigitem(
1458 1458 b'web', b'collapse', default=False,
1459 1459 )
1460 1460 coreconfigitem(
1461 1461 b'web', b'csp', default=None,
1462 1462 )
1463 1463 coreconfigitem(
1464 1464 b'web', b'deny_read', default=list,
1465 1465 )
1466 1466 coreconfigitem(
1467 1467 b'web', b'descend', default=True,
1468 1468 )
1469 1469 coreconfigitem(
1470 1470 b'web', b'description', default=b"",
1471 1471 )
1472 1472 coreconfigitem(
1473 1473 b'web', b'encoding', default=lambda: encoding.encoding,
1474 1474 )
1475 1475 coreconfigitem(
1476 1476 b'web', b'errorlog', default=b'-',
1477 1477 )
1478 1478 coreconfigitem(
1479 1479 b'web', b'ipv6', default=False,
1480 1480 )
1481 1481 coreconfigitem(
1482 1482 b'web', b'maxchanges', default=10,
1483 1483 )
1484 1484 coreconfigitem(
1485 1485 b'web', b'maxfiles', default=10,
1486 1486 )
1487 1487 coreconfigitem(
1488 1488 b'web', b'maxshortchanges', default=60,
1489 1489 )
1490 1490 coreconfigitem(
1491 1491 b'web', b'motd', default=b'',
1492 1492 )
1493 1493 coreconfigitem(
1494 1494 b'web', b'name', default=dynamicdefault,
1495 1495 )
1496 1496 coreconfigitem(
1497 1497 b'web', b'port', default=8000,
1498 1498 )
1499 1499 coreconfigitem(
1500 1500 b'web', b'prefix', default=b'',
1501 1501 )
1502 1502 coreconfigitem(
1503 1503 b'web', b'push_ssl', default=True,
1504 1504 )
1505 1505 coreconfigitem(
1506 1506 b'web', b'refreshinterval', default=20,
1507 1507 )
1508 1508 coreconfigitem(
1509 1509 b'web', b'server-header', default=None,
1510 1510 )
1511 1511 coreconfigitem(
1512 1512 b'web', b'static', default=None,
1513 1513 )
1514 1514 coreconfigitem(
1515 1515 b'web', b'staticurl', default=None,
1516 1516 )
1517 1517 coreconfigitem(
1518 1518 b'web', b'stripes', default=1,
1519 1519 )
1520 1520 coreconfigitem(
1521 1521 b'web', b'style', default=b'paper',
1522 1522 )
1523 1523 coreconfigitem(
1524 1524 b'web', b'templates', default=None,
1525 1525 )
1526 1526 coreconfigitem(
1527 1527 b'web', b'view', default=b'served', experimental=True,
1528 1528 )
1529 1529 coreconfigitem(
1530 1530 b'worker', b'backgroundclose', default=dynamicdefault,
1531 1531 )
1532 1532 # Windows defaults to a limit of 512 open files. A buffer of 128
1533 1533 # should give us enough headway.
1534 1534 coreconfigitem(
1535 1535 b'worker', b'backgroundclosemaxqueue', default=384,
1536 1536 )
1537 1537 coreconfigitem(
1538 1538 b'worker', b'backgroundcloseminfilecount', default=2048,
1539 1539 )
1540 1540 coreconfigitem(
1541 1541 b'worker', b'backgroundclosethreadcount', default=4,
1542 1542 )
1543 1543 coreconfigitem(
1544 1544 b'worker', b'enabled', default=True,
1545 1545 )
1546 1546 coreconfigitem(
1547 1547 b'worker', b'numcpus', default=None,
1548 1548 )
1549 1549
1550 1550 # Rebase related configuration moved to core because other extension are doing
1551 1551 # strange things. For example, shelve import the extensions to reuse some bit
1552 1552 # without formally loading it.
1553 1553 coreconfigitem(
1554 1554 b'commands', b'rebase.requiredest', default=False,
1555 1555 )
1556 1556 coreconfigitem(
1557 1557 b'experimental', b'rebaseskipobsolete', default=True,
1558 1558 )
1559 1559 coreconfigitem(
1560 1560 b'rebase', b'singletransaction', default=False,
1561 1561 )
1562 1562 coreconfigitem(
1563 1563 b'rebase', b'experimental.inmemory', default=False,
1564 1564 )
@@ -1,2877 +1,2878 b''
1 1 The Mercurial system uses a set of configuration files to control
2 2 aspects of its behavior.
3 3
4 4 Troubleshooting
5 5 ===============
6 6
7 7 If you're having problems with your configuration,
8 8 :hg:`config --debug` can help you understand what is introducing
9 9 a setting into your environment.
10 10
11 11 See :hg:`help config.syntax` and :hg:`help config.files`
12 12 for information about how and where to override things.
13 13
14 14 Structure
15 15 =========
16 16
17 17 The configuration files use a simple ini-file format. A configuration
18 18 file consists of sections, led by a ``[section]`` header and followed
19 19 by ``name = value`` entries::
20 20
21 21 [ui]
22 22 username = Firstname Lastname <firstname.lastname@example.net>
23 23 verbose = True
24 24
25 25 The above entries will be referred to as ``ui.username`` and
26 26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27 27
28 28 Files
29 29 =====
30 30
31 31 Mercurial reads configuration data from several files, if they exist.
32 32 These files do not exist by default and you will have to create the
33 33 appropriate configuration files yourself:
34 34
35 35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36 36
37 37 Global configuration like the username setting is typically put into:
38 38
39 39 .. container:: windows
40 40
41 41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42 42
43 43 .. container:: unix.plan9
44 44
45 45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46 46
47 47 The names of these files depend on the system on which Mercurial is
48 48 installed. ``*.rc`` files from a single directory are read in
49 49 alphabetical order, later ones overriding earlier ones. Where multiple
50 50 paths are given below, settings from earlier paths override later
51 51 ones.
52 52
53 53 .. container:: verbose.unix
54 54
55 55 On Unix, the following files are consulted:
56 56
57 57 - ``<repo>/.hg/hgrc`` (per-repository)
58 58 - ``$HOME/.hgrc`` (per-user)
59 59 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
60 60 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
61 61 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
62 62 - ``/etc/mercurial/hgrc`` (per-system)
63 63 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
64 64 - ``<internal>/*.rc`` (defaults)
65 65
66 66 .. container:: verbose.windows
67 67
68 68 On Windows, the following files are consulted:
69 69
70 70 - ``<repo>/.hg/hgrc`` (per-repository)
71 71 - ``%USERPROFILE%\.hgrc`` (per-user)
72 72 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
73 73 - ``%HOME%\.hgrc`` (per-user)
74 74 - ``%HOME%\Mercurial.ini`` (per-user)
75 75 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
76 76 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
77 77 - ``<install-dir>\Mercurial.ini`` (per-installation)
78 78 - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
79 79 - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
80 80 - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
81 81 - ``<internal>/*.rc`` (defaults)
82 82
83 83 .. note::
84 84
85 85 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
86 86 is used when running 32-bit Python on 64-bit Windows.
87 87
88 88 .. container:: verbose.plan9
89 89
90 90 On Plan9, the following files are consulted:
91 91
92 92 - ``<repo>/.hg/hgrc`` (per-repository)
93 93 - ``$home/lib/hgrc`` (per-user)
94 94 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
95 95 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
96 96 - ``/lib/mercurial/hgrc`` (per-system)
97 97 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
98 98 - ``<internal>/*.rc`` (defaults)
99 99
100 100 Per-repository configuration options only apply in a
101 101 particular repository. This file is not version-controlled, and
102 102 will not get transferred during a "clone" operation. Options in
103 103 this file override options in all other configuration files.
104 104
105 105 .. container:: unix.plan9
106 106
107 107 On Plan 9 and Unix, most of this file will be ignored if it doesn't
108 108 belong to a trusted user or to a trusted group. See
109 109 :hg:`help config.trusted` for more details.
110 110
111 111 Per-user configuration file(s) are for the user running Mercurial. Options
112 112 in these files apply to all Mercurial commands executed by this user in any
113 113 directory. Options in these files override per-system and per-installation
114 114 options.
115 115
116 116 Per-installation configuration files are searched for in the
117 117 directory where Mercurial is installed. ``<install-root>`` is the
118 118 parent directory of the **hg** executable (or symlink) being run.
119 119
120 120 .. container:: unix.plan9
121 121
122 122 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
123 123 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
124 124 files apply to all Mercurial commands executed by any user in any
125 125 directory.
126 126
127 127 Per-installation configuration files are for the system on
128 128 which Mercurial is running. Options in these files apply to all
129 129 Mercurial commands executed by any user in any directory. Registry
130 130 keys contain PATH-like strings, every part of which must reference
131 131 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
132 132 be read. Mercurial checks each of these locations in the specified
133 133 order until one or more configuration files are detected.
134 134
135 135 Per-system configuration files are for the system on which Mercurial
136 136 is running. Options in these files apply to all Mercurial commands
137 137 executed by any user in any directory. Options in these files
138 138 override per-installation options.
139 139
140 140 Mercurial comes with some default configuration. The default configuration
141 141 files are installed with Mercurial and will be overwritten on upgrades. Default
142 142 configuration files should never be edited by users or administrators but can
143 143 be overridden in other configuration files. So far the directory only contains
144 144 merge tool configuration but packagers can also put other default configuration
145 145 there.
146 146
147 147 Syntax
148 148 ======
149 149
150 150 A configuration file consists of sections, led by a ``[section]`` header
151 151 and followed by ``name = value`` entries (sometimes called
152 152 ``configuration keys``)::
153 153
154 154 [spam]
155 155 eggs=ham
156 156 green=
157 157 eggs
158 158
159 159 Each line contains one entry. If the lines that follow are indented,
160 160 they are treated as continuations of that entry. Leading whitespace is
161 161 removed from values. Empty lines are skipped. Lines beginning with
162 162 ``#`` or ``;`` are ignored and may be used to provide comments.
163 163
164 164 Configuration keys can be set multiple times, in which case Mercurial
165 165 will use the value that was configured last. As an example::
166 166
167 167 [spam]
168 168 eggs=large
169 169 ham=serrano
170 170 eggs=small
171 171
172 172 This would set the configuration key named ``eggs`` to ``small``.
173 173
174 174 It is also possible to define a section multiple times. A section can
175 175 be redefined on the same and/or on different configuration files. For
176 176 example::
177 177
178 178 [foo]
179 179 eggs=large
180 180 ham=serrano
181 181 eggs=small
182 182
183 183 [bar]
184 184 eggs=ham
185 185 green=
186 186 eggs
187 187
188 188 [foo]
189 189 ham=prosciutto
190 190 eggs=medium
191 191 bread=toasted
192 192
193 193 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
194 194 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
195 195 respectively. As you can see there only thing that matters is the last
196 196 value that was set for each of the configuration keys.
197 197
198 198 If a configuration key is set multiple times in different
199 199 configuration files the final value will depend on the order in which
200 200 the different configuration files are read, with settings from earlier
201 201 paths overriding later ones as described on the ``Files`` section
202 202 above.
203 203
204 204 A line of the form ``%include file`` will include ``file`` into the
205 205 current configuration file. The inclusion is recursive, which means
206 206 that included files can include other files. Filenames are relative to
207 207 the configuration file in which the ``%include`` directive is found.
208 208 Environment variables and ``~user`` constructs are expanded in
209 209 ``file``. This lets you do something like::
210 210
211 211 %include ~/.hgrc.d/$HOST.rc
212 212
213 213 to include a different configuration file on each computer you use.
214 214
215 215 A line with ``%unset name`` will remove ``name`` from the current
216 216 section, if it has been set previously.
217 217
218 218 The values are either free-form text strings, lists of text strings,
219 219 or Boolean values. Boolean values can be set to true using any of "1",
220 220 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
221 221 (all case insensitive).
222 222
223 223 List values are separated by whitespace or comma, except when values are
224 224 placed in double quotation marks::
225 225
226 226 allow_read = "John Doe, PhD", brian, betty
227 227
228 228 Quotation marks can be escaped by prefixing them with a backslash. Only
229 229 quotation marks at the beginning of a word is counted as a quotation
230 230 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
231 231
232 232 Sections
233 233 ========
234 234
235 235 This section describes the different sections that may appear in a
236 236 Mercurial configuration file, the purpose of each section, its possible
237 237 keys, and their possible values.
238 238
239 239 ``alias``
240 240 ---------
241 241
242 242 Defines command aliases.
243 243
244 244 Aliases allow you to define your own commands in terms of other
245 245 commands (or aliases), optionally including arguments. Positional
246 246 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
247 247 are expanded by Mercurial before execution. Positional arguments not
248 248 already used by ``$N`` in the definition are put at the end of the
249 249 command to be executed.
250 250
251 251 Alias definitions consist of lines of the form::
252 252
253 253 <alias> = <command> [<argument>]...
254 254
255 255 For example, this definition::
256 256
257 257 latest = log --limit 5
258 258
259 259 creates a new command ``latest`` that shows only the five most recent
260 260 changesets. You can define subsequent aliases using earlier ones::
261 261
262 262 stable5 = latest -b stable
263 263
264 264 .. note::
265 265
266 266 It is possible to create aliases with the same names as
267 267 existing commands, which will then override the original
268 268 definitions. This is almost always a bad idea!
269 269
270 270 An alias can start with an exclamation point (``!``) to make it a
271 271 shell alias. A shell alias is executed with the shell and will let you
272 272 run arbitrary commands. As an example, ::
273 273
274 274 echo = !echo $@
275 275
276 276 will let you do ``hg echo foo`` to have ``foo`` printed in your
277 277 terminal. A better example might be::
278 278
279 279 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
280 280
281 281 which will make ``hg purge`` delete all unknown files in the
282 282 repository in the same manner as the purge extension.
283 283
284 284 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
285 285 expand to the command arguments. Unmatched arguments are
286 286 removed. ``$0`` expands to the alias name and ``$@`` expands to all
287 287 arguments separated by a space. ``"$@"`` (with quotes) expands to all
288 288 arguments quoted individually and separated by a space. These expansions
289 289 happen before the command is passed to the shell.
290 290
291 291 Shell aliases are executed in an environment where ``$HG`` expands to
292 292 the path of the Mercurial that was used to execute the alias. This is
293 293 useful when you want to call further Mercurial commands in a shell
294 294 alias, as was done above for the purge alias. In addition,
295 295 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
296 296 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
297 297
298 298 .. note::
299 299
300 300 Some global configuration options such as ``-R`` are
301 301 processed before shell aliases and will thus not be passed to
302 302 aliases.
303 303
304 304
305 305 ``annotate``
306 306 ------------
307 307
308 308 Settings used when displaying file annotations. All values are
309 309 Booleans and default to False. See :hg:`help config.diff` for
310 310 related options for the diff command.
311 311
312 312 ``ignorews``
313 313 Ignore white space when comparing lines.
314 314
315 315 ``ignorewseol``
316 316 Ignore white space at the end of a line when comparing lines.
317 317
318 318 ``ignorewsamount``
319 319 Ignore changes in the amount of white space.
320 320
321 321 ``ignoreblanklines``
322 322 Ignore changes whose lines are all blank.
323 323
324 324
325 325 ``auth``
326 326 --------
327 327
328 328 Authentication credentials and other authentication-like configuration
329 329 for HTTP connections. This section allows you to store usernames and
330 330 passwords for use when logging *into* HTTP servers. See
331 331 :hg:`help config.web` if you want to configure *who* can login to
332 332 your HTTP server.
333 333
334 334 The following options apply to all hosts.
335 335
336 336 ``cookiefile``
337 337 Path to a file containing HTTP cookie lines. Cookies matching a
338 338 host will be sent automatically.
339 339
340 340 The file format uses the Mozilla cookies.txt format, which defines cookies
341 341 on their own lines. Each line contains 7 fields delimited by the tab
342 342 character (domain, is_domain_cookie, path, is_secure, expires, name,
343 343 value). For more info, do an Internet search for "Netscape cookies.txt
344 344 format."
345 345
346 346 Note: the cookies parser does not handle port numbers on domains. You
347 347 will need to remove ports from the domain for the cookie to be recognized.
348 348 This could result in a cookie being disclosed to an unwanted server.
349 349
350 350 The cookies file is read-only.
351 351
352 352 Other options in this section are grouped by name and have the following
353 353 format::
354 354
355 355 <name>.<argument> = <value>
356 356
357 357 where ``<name>`` is used to group arguments into authentication
358 358 entries. Example::
359 359
360 360 foo.prefix = hg.intevation.de/mercurial
361 361 foo.username = foo
362 362 foo.password = bar
363 363 foo.schemes = http https
364 364
365 365 bar.prefix = secure.example.org
366 366 bar.key = path/to/file.key
367 367 bar.cert = path/to/file.cert
368 368 bar.schemes = https
369 369
370 370 Supported arguments:
371 371
372 372 ``prefix``
373 373 Either ``*`` or a URI prefix with or without the scheme part.
374 374 The authentication entry with the longest matching prefix is used
375 375 (where ``*`` matches everything and counts as a match of length
376 376 1). If the prefix doesn't include a scheme, the match is performed
377 377 against the URI with its scheme stripped as well, and the schemes
378 378 argument, q.v., is then subsequently consulted.
379 379
380 380 ``username``
381 381 Optional. Username to authenticate with. If not given, and the
382 382 remote site requires basic or digest authentication, the user will
383 383 be prompted for it. Environment variables are expanded in the
384 384 username letting you do ``foo.username = $USER``. If the URI
385 385 includes a username, only ``[auth]`` entries with a matching
386 386 username or without a username will be considered.
387 387
388 388 ``password``
389 389 Optional. Password to authenticate with. If not given, and the
390 390 remote site requires basic or digest authentication, the user
391 391 will be prompted for it.
392 392
393 393 ``key``
394 394 Optional. PEM encoded client certificate key file. Environment
395 395 variables are expanded in the filename.
396 396
397 397 ``cert``
398 398 Optional. PEM encoded client certificate chain file. Environment
399 399 variables are expanded in the filename.
400 400
401 401 ``schemes``
402 402 Optional. Space separated list of URI schemes to use this
403 403 authentication entry with. Only used if the prefix doesn't include
404 404 a scheme. Supported schemes are http and https. They will match
405 405 static-http and static-https respectively, as well.
406 406 (default: https)
407 407
408 408 If no suitable authentication entry is found, the user is prompted
409 409 for credentials as usual if required by the remote.
410 410
411 411 ``color``
412 412 ---------
413 413
414 414 Configure the Mercurial color mode. For details about how to define your custom
415 415 effect and style see :hg:`help color`.
416 416
417 417 ``mode``
418 418 String: control the method used to output color. One of ``auto``, ``ansi``,
419 419 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
420 420 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
421 421 terminal. Any invalid value will disable color.
422 422
423 423 ``pagermode``
424 424 String: optional override of ``color.mode`` used with pager.
425 425
426 426 On some systems, terminfo mode may cause problems when using
427 427 color with ``less -R`` as a pager program. less with the -R option
428 428 will only display ECMA-48 color codes, and terminfo mode may sometimes
429 429 emit codes that less doesn't understand. You can work around this by
430 430 either using ansi mode (or auto mode), or by using less -r (which will
431 431 pass through all terminal control codes, not just color control
432 432 codes).
433 433
434 434 On some systems (such as MSYS in Windows), the terminal may support
435 435 a different color mode than the pager program.
436 436
437 437 ``commands``
438 438 ------------
439 439
440 440 ``commit.post-status``
441 441 Show status of files in the working directory after successful commit.
442 442 (default: False)
443 443
444 444 ``merge.require-rev``
445 445 Require that the revision to merge the current commit with be specified on
446 446 the command line. If this is enabled and a revision is not specified, the
447 447 command aborts.
448 448 (default: False)
449 449
450 450 ``push.require-revs``
451 451 Require revisions to push be specified using one or more mechanisms such as
452 452 specifying them positionally on the command line, using ``-r``, ``-b``,
453 453 and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
454 454 configuration. If this is enabled and revisions are not specified, the
455 455 command aborts.
456 456 (default: False)
457 457
458 458 ``resolve.confirm``
459 459 Confirm before performing action if no filename is passed.
460 460 (default: False)
461 461
462 462 ``resolve.explicit-re-merge``
463 463 Require uses of ``hg resolve`` to specify which action it should perform,
464 464 instead of re-merging files by default.
465 465 (default: False)
466 466
467 467 ``resolve.mark-check``
468 468 Determines what level of checking :hg:`resolve --mark` will perform before
469 469 marking files as resolved. Valid values are ``none`, ``warn``, and
470 470 ``abort``. ``warn`` will output a warning listing the file(s) that still
471 471 have conflict markers in them, but will still mark everything resolved.
472 472 ``abort`` will output the same warning but will not mark things as resolved.
473 473 If --all is passed and this is set to ``abort``, only a warning will be
474 474 shown (an error will not be raised).
475 475 (default: ``none``)
476 476
477 477 ``status.relative``
478 478 Make paths in :hg:`status` output relative to the current directory.
479 479 (default: False)
480 480
481 481 ``status.terse``
482 482 Default value for the --terse flag, which condenses status output.
483 483 (default: empty)
484 484
485 485 ``update.check``
486 486 Determines what level of checking :hg:`update` will perform before moving
487 487 to a destination revision. Valid values are ``abort``, ``none``,
488 488 ``linear``, and ``noconflict``. ``abort`` always fails if the working
489 489 directory has uncommitted changes. ``none`` performs no checking, and may
490 490 result in a merge with uncommitted changes. ``linear`` allows any update
491 491 as long as it follows a straight line in the revision history, and may
492 492 trigger a merge with uncommitted changes. ``noconflict`` will allow any
493 493 update which would not trigger a merge with uncommitted changes, if any
494 494 are present.
495 495 (default: ``linear``)
496 496
497 497 ``update.requiredest``
498 498 Require that the user pass a destination when running :hg:`update`.
499 499 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
500 500 will be disallowed.
501 501 (default: False)
502 502
503 503 ``committemplate``
504 504 ------------------
505 505
506 506 ``changeset``
507 507 String: configuration in this section is used as the template to
508 508 customize the text shown in the editor when committing.
509 509
510 510 In addition to pre-defined template keywords, commit log specific one
511 511 below can be used for customization:
512 512
513 513 ``extramsg``
514 514 String: Extra message (typically 'Leave message empty to abort
515 515 commit.'). This may be changed by some commands or extensions.
516 516
517 517 For example, the template configuration below shows as same text as
518 518 one shown by default::
519 519
520 520 [committemplate]
521 521 changeset = {desc}\n\n
522 522 HG: Enter commit message. Lines beginning with 'HG:' are removed.
523 523 HG: {extramsg}
524 524 HG: --
525 525 HG: user: {author}\n{ifeq(p2rev, "-1", "",
526 526 "HG: branch merge\n")
527 527 }HG: branch '{branch}'\n{if(activebookmark,
528 528 "HG: bookmark '{activebookmark}'\n") }{subrepos %
529 529 "HG: subrepo {subrepo}\n" }{file_adds %
530 530 "HG: added {file}\n" }{file_mods %
531 531 "HG: changed {file}\n" }{file_dels %
532 532 "HG: removed {file}\n" }{if(files, "",
533 533 "HG: no files changed\n")}
534 534
535 535 ``diff()``
536 536 String: show the diff (see :hg:`help templates` for detail)
537 537
538 538 Sometimes it is helpful to show the diff of the changeset in the editor without
539 539 having to prefix 'HG: ' to each line so that highlighting works correctly. For
540 540 this, Mercurial provides a special string which will ignore everything below
541 541 it::
542 542
543 543 HG: ------------------------ >8 ------------------------
544 544
545 545 For example, the template configuration below will show the diff below the
546 546 extra message::
547 547
548 548 [committemplate]
549 549 changeset = {desc}\n\n
550 550 HG: Enter commit message. Lines beginning with 'HG:' are removed.
551 551 HG: {extramsg}
552 552 HG: ------------------------ >8 ------------------------
553 553 HG: Do not touch the line above.
554 554 HG: Everything below will be removed.
555 555 {diff()}
556 556
557 557 .. note::
558 558
559 559 For some problematic encodings (see :hg:`help win32mbcs` for
560 560 detail), this customization should be configured carefully, to
561 561 avoid showing broken characters.
562 562
563 563 For example, if a multibyte character ending with backslash (0x5c) is
564 564 followed by the ASCII character 'n' in the customized template,
565 565 the sequence of backslash and 'n' is treated as line-feed unexpectedly
566 566 (and the multibyte character is broken, too).
567 567
568 568 Customized template is used for commands below (``--edit`` may be
569 569 required):
570 570
571 571 - :hg:`backout`
572 572 - :hg:`commit`
573 573 - :hg:`fetch` (for merge commit only)
574 574 - :hg:`graft`
575 575 - :hg:`histedit`
576 576 - :hg:`import`
577 577 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
578 578 - :hg:`rebase`
579 579 - :hg:`shelve`
580 580 - :hg:`sign`
581 581 - :hg:`tag`
582 582 - :hg:`transplant`
583 583
584 584 Configuring items below instead of ``changeset`` allows showing
585 585 customized message only for specific actions, or showing different
586 586 messages for each action.
587 587
588 588 - ``changeset.backout`` for :hg:`backout`
589 589 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
590 590 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
591 591 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
592 592 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
593 593 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
594 594 - ``changeset.gpg.sign`` for :hg:`sign`
595 595 - ``changeset.graft`` for :hg:`graft`
596 596 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
597 597 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
598 598 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
599 599 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
600 600 - ``changeset.import.bypass`` for :hg:`import --bypass`
601 601 - ``changeset.import.normal.merge`` for :hg:`import` on merges
602 602 - ``changeset.import.normal.normal`` for :hg:`import` on other
603 603 - ``changeset.mq.qnew`` for :hg:`qnew`
604 604 - ``changeset.mq.qfold`` for :hg:`qfold`
605 605 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
606 606 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
607 607 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
608 608 - ``changeset.rebase.normal`` for :hg:`rebase` on other
609 609 - ``changeset.shelve.shelve`` for :hg:`shelve`
610 610 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
611 611 - ``changeset.tag.remove`` for :hg:`tag --remove`
612 612 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
613 613 - ``changeset.transplant.normal`` for :hg:`transplant` on other
614 614
615 615 These dot-separated lists of names are treated as hierarchical ones.
616 616 For example, ``changeset.tag.remove`` customizes the commit message
617 617 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
618 618 commit message for :hg:`tag` regardless of ``--remove`` option.
619 619
620 620 When the external editor is invoked for a commit, the corresponding
621 621 dot-separated list of names without the ``changeset.`` prefix
622 622 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
623 623 variable.
624 624
625 625 In this section, items other than ``changeset`` can be referred from
626 626 others. For example, the configuration to list committed files up
627 627 below can be referred as ``{listupfiles}``::
628 628
629 629 [committemplate]
630 630 listupfiles = {file_adds %
631 631 "HG: added {file}\n" }{file_mods %
632 632 "HG: changed {file}\n" }{file_dels %
633 633 "HG: removed {file}\n" }{if(files, "",
634 634 "HG: no files changed\n")}
635 635
636 636 ``decode/encode``
637 637 -----------------
638 638
639 639 Filters for transforming files on checkout/checkin. This would
640 640 typically be used for newline processing or other
641 641 localization/canonicalization of files.
642 642
643 643 Filters consist of a filter pattern followed by a filter command.
644 644 Filter patterns are globs by default, rooted at the repository root.
645 645 For example, to match any file ending in ``.txt`` in the root
646 646 directory only, use the pattern ``*.txt``. To match any file ending
647 647 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
648 648 For each file only the first matching filter applies.
649 649
650 650 The filter command can start with a specifier, either ``pipe:`` or
651 651 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
652 652
653 653 A ``pipe:`` command must accept data on stdin and return the transformed
654 654 data on stdout.
655 655
656 656 Pipe example::
657 657
658 658 [encode]
659 659 # uncompress gzip files on checkin to improve delta compression
660 660 # note: not necessarily a good idea, just an example
661 661 *.gz = pipe: gunzip
662 662
663 663 [decode]
664 664 # recompress gzip files when writing them to the working dir (we
665 665 # can safely omit "pipe:", because it's the default)
666 666 *.gz = gzip
667 667
668 668 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
669 669 with the name of a temporary file that contains the data to be
670 670 filtered by the command. The string ``OUTFILE`` is replaced with the name
671 671 of an empty temporary file, where the filtered data must be written by
672 672 the command.
673 673
674 674 .. container:: windows
675 675
676 676 .. note::
677 677
678 678 The tempfile mechanism is recommended for Windows systems,
679 679 where the standard shell I/O redirection operators often have
680 680 strange effects and may corrupt the contents of your files.
681 681
682 682 This filter mechanism is used internally by the ``eol`` extension to
683 683 translate line ending characters between Windows (CRLF) and Unix (LF)
684 684 format. We suggest you use the ``eol`` extension for convenience.
685 685
686 686
687 687 ``defaults``
688 688 ------------
689 689
690 690 (defaults are deprecated. Don't use them. Use aliases instead.)
691 691
692 692 Use the ``[defaults]`` section to define command defaults, i.e. the
693 693 default options/arguments to pass to the specified commands.
694 694
695 695 The following example makes :hg:`log` run in verbose mode, and
696 696 :hg:`status` show only the modified files, by default::
697 697
698 698 [defaults]
699 699 log = -v
700 700 status = -m
701 701
702 702 The actual commands, instead of their aliases, must be used when
703 703 defining command defaults. The command defaults will also be applied
704 704 to the aliases of the commands defined.
705 705
706 706
707 707 ``diff``
708 708 --------
709 709
710 710 Settings used when displaying diffs. Everything except for ``unified``
711 711 is a Boolean and defaults to False. See :hg:`help config.annotate`
712 712 for related options for the annotate command.
713 713
714 714 ``git``
715 715 Use git extended diff format.
716 716
717 717 ``nobinary``
718 718 Omit git binary patches.
719 719
720 720 ``nodates``
721 721 Don't include dates in diff headers.
722 722
723 723 ``noprefix``
724 724 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
725 725
726 726 ``showfunc``
727 727 Show which function each change is in.
728 728
729 729 ``ignorews``
730 730 Ignore white space when comparing lines.
731 731
732 732 ``ignorewsamount``
733 733 Ignore changes in the amount of white space.
734 734
735 735 ``ignoreblanklines``
736 736 Ignore changes whose lines are all blank.
737 737
738 738 ``unified``
739 739 Number of lines of context to show.
740 740
741 741 ``word-diff``
742 742 Highlight changed words.
743 743
744 744 ``email``
745 745 ---------
746 746
747 747 Settings for extensions that send email messages.
748 748
749 749 ``from``
750 750 Optional. Email address to use in "From" header and SMTP envelope
751 751 of outgoing messages.
752 752
753 753 ``to``
754 754 Optional. Comma-separated list of recipients' email addresses.
755 755
756 756 ``cc``
757 757 Optional. Comma-separated list of carbon copy recipients'
758 758 email addresses.
759 759
760 760 ``bcc``
761 761 Optional. Comma-separated list of blind carbon copy recipients'
762 762 email addresses.
763 763
764 764 ``method``
765 765 Optional. Method to use to send email messages. If value is ``smtp``
766 766 (default), use SMTP (see the ``[smtp]`` section for configuration).
767 767 Otherwise, use as name of program to run that acts like sendmail
768 768 (takes ``-f`` option for sender, list of recipients on command line,
769 769 message on stdin). Normally, setting this to ``sendmail`` or
770 770 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
771 771
772 772 ``charsets``
773 773 Optional. Comma-separated list of character sets considered
774 774 convenient for recipients. Addresses, headers, and parts not
775 775 containing patches of outgoing messages will be encoded in the
776 776 first character set to which conversion from local encoding
777 777 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
778 778 conversion fails, the text in question is sent as is.
779 779 (default: '')
780 780
781 781 Order of outgoing email character sets:
782 782
783 783 1. ``us-ascii``: always first, regardless of settings
784 784 2. ``email.charsets``: in order given by user
785 785 3. ``ui.fallbackencoding``: if not in email.charsets
786 786 4. ``$HGENCODING``: if not in email.charsets
787 787 5. ``utf-8``: always last, regardless of settings
788 788
789 789 Email example::
790 790
791 791 [email]
792 792 from = Joseph User <joe.user@example.com>
793 793 method = /usr/sbin/sendmail
794 794 # charsets for western Europeans
795 795 # us-ascii, utf-8 omitted, as they are tried first and last
796 796 charsets = iso-8859-1, iso-8859-15, windows-1252
797 797
798 798
799 799 ``extensions``
800 800 --------------
801 801
802 802 Mercurial has an extension mechanism for adding new features. To
803 803 enable an extension, create an entry for it in this section.
804 804
805 805 If you know that the extension is already in Python's search path,
806 806 you can give the name of the module, followed by ``=``, with nothing
807 807 after the ``=``.
808 808
809 809 Otherwise, give a name that you choose, followed by ``=``, followed by
810 810 the path to the ``.py`` file (including the file name extension) that
811 811 defines the extension.
812 812
813 813 To explicitly disable an extension that is enabled in an hgrc of
814 814 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
815 815 or ``foo = !`` when path is not supplied.
816 816
817 817 Example for ``~/.hgrc``::
818 818
819 819 [extensions]
820 820 # (the churn extension will get loaded from Mercurial's path)
821 821 churn =
822 822 # (this extension will get loaded from the file specified)
823 823 myfeature = ~/.hgext/myfeature.py
824 824
825 825
826 826 ``format``
827 827 ----------
828 828
829 829 Configuration that controls the repository format. Newer format options are more
830 830 powerful, but incompatible with some older versions of Mercurial. Format options
831 831 are considered at repository initialization only. You need to make a new clone
832 832 for config changes to be taken into account.
833 833
834 834 For more details about repository format and version compatibility, see
835 835 https://www.mercurial-scm.org/wiki/MissingRequirement
836 836
837 837 ``usegeneraldelta``
838 838 Enable or disable the "generaldelta" repository format which improves
839 839 repository compression by allowing "revlog" to store deltas against
840 840 arbitrary revisions instead of the previously stored one. This provides
841 841 significant improvement for repositories with branches.
842 842
843 843 Repositories with this on-disk format require Mercurial version 1.9.
844 844
845 845 Enabled by default.
846 846
847 847 ``dotencode``
848 848 Enable or disable the "dotencode" repository format which enhances
849 849 the "fncache" repository format (which has to be enabled to use
850 850 dotencode) to avoid issues with filenames starting with "._" on
851 851 Mac OS X and spaces on Windows.
852 852
853 853 Repositories with this on-disk format require Mercurial version 1.7.
854 854
855 855 Enabled by default.
856 856
857 857 ``usefncache``
858 858 Enable or disable the "fncache" repository format which enhances
859 859 the "store" repository format (which has to be enabled to use
860 860 fncache) to allow longer filenames and avoids using Windows
861 861 reserved names, e.g. "nul".
862 862
863 863 Repositories with this on-disk format require Mercurial version 1.1.
864 864
865 865 Enabled by default.
866 866
867 867 ``usestore``
868 868 Enable or disable the "store" repository format which improves
869 869 compatibility with systems that fold case or otherwise mangle
870 870 filenames. Disabling this option will allow you to store longer filenames
871 871 in some situations at the expense of compatibility.
872 872
873 873 Repositories with this on-disk format require Mercurial version 0.9.4.
874 874
875 875 Enabled by default.
876 876
877 877 ``sparse-revlog``
878 878 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
879 879 delta re-use inside revlog. For very branchy repositories, it results in a
880 880 smaller store. For repositories with many revisions, it also helps
881 881 performance (by using shortened delta chains.)
882 882
883 883 Repositories with this on-disk format require Mercurial version 4.7
884 884
885 885 Enabled by default.
886 886
887 887 ``revlog-compression``
888 888 Compression algorithm used by revlog. Supported values are `zlib` and
889 889 `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
890 890 a newer format that is usually a net win over `zlib`, operating faster at
891 better compression rates. Use `zstd` to reduce CPU usage.
891 better compression rates. Use `zstd` to reduce CPU usage. Multiple values
892 can be specified, the first available one will be used.
892 893
893 894 On some systems, the Mercurial installation may lack `zstd` support.
894 895
895 896 Default is `zlib`.
896 897
897 898 ``bookmarks-in-store``
898 899 Store bookmarks in .hg/store/. This means that bookmarks are shared when
899 900 using `hg share` regardless of the `-B` option.
900 901
901 902 Repositories with this on-disk format require Mercurial version 5.1.
902 903
903 904 Disabled by default.
904 905
905 906
906 907 ``graph``
907 908 ---------
908 909
909 910 Web graph view configuration. This section let you change graph
910 911 elements display properties by branches, for instance to make the
911 912 ``default`` branch stand out.
912 913
913 914 Each line has the following format::
914 915
915 916 <branch>.<argument> = <value>
916 917
917 918 where ``<branch>`` is the name of the branch being
918 919 customized. Example::
919 920
920 921 [graph]
921 922 # 2px width
922 923 default.width = 2
923 924 # red color
924 925 default.color = FF0000
925 926
926 927 Supported arguments:
927 928
928 929 ``width``
929 930 Set branch edges width in pixels.
930 931
931 932 ``color``
932 933 Set branch edges color in hexadecimal RGB notation.
933 934
934 935 ``hooks``
935 936 ---------
936 937
937 938 Commands or Python functions that get automatically executed by
938 939 various actions such as starting or finishing a commit. Multiple
939 940 hooks can be run for the same action by appending a suffix to the
940 941 action. Overriding a site-wide hook can be done by changing its
941 942 value or setting it to an empty string. Hooks can be prioritized
942 943 by adding a prefix of ``priority.`` to the hook name on a new line
943 944 and setting the priority. The default priority is 0.
944 945
945 946 Example ``.hg/hgrc``::
946 947
947 948 [hooks]
948 949 # update working directory after adding changesets
949 950 changegroup.update = hg update
950 951 # do not use the site-wide hook
951 952 incoming =
952 953 incoming.email = /my/email/hook
953 954 incoming.autobuild = /my/build/hook
954 955 # force autobuild hook to run before other incoming hooks
955 956 priority.incoming.autobuild = 1
956 957
957 958 Most hooks are run with environment variables set that give useful
958 959 additional information. For each hook below, the environment variables
959 960 it is passed are listed with names in the form ``$HG_foo``. The
960 961 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
961 962 They contain the type of hook which triggered the run and the full name
962 963 of the hook in the config, respectively. In the example above, this will
963 964 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
964 965
965 966 .. container:: windows
966 967
967 968 Some basic Unix syntax can be enabled for portability, including ``$VAR``
968 969 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
969 970 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
970 971 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
971 972 slash or inside of a strong quote. Strong quotes will be replaced by
972 973 double quotes after processing.
973 974
974 975 This feature is enabled by adding a prefix of ``tonative.`` to the hook
975 976 name on a new line, and setting it to ``True``. For example::
976 977
977 978 [hooks]
978 979 incoming.autobuild = /my/build/hook
979 980 # enable translation to cmd.exe syntax for autobuild hook
980 981 tonative.incoming.autobuild = True
981 982
982 983 ``changegroup``
983 984 Run after a changegroup has been added via push, pull or unbundle. The ID of
984 985 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
985 986 The URL from which changes came is in ``$HG_URL``.
986 987
987 988 ``commit``
988 989 Run after a changeset has been created in the local repository. The ID
989 990 of the newly created changeset is in ``$HG_NODE``. Parent changeset
990 991 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
991 992
992 993 ``incoming``
993 994 Run after a changeset has been pulled, pushed, or unbundled into
994 995 the local repository. The ID of the newly arrived changeset is in
995 996 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
996 997
997 998 ``outgoing``
998 999 Run after sending changes from the local repository to another. The ID of
999 1000 first changeset sent is in ``$HG_NODE``. The source of operation is in
1000 1001 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
1001 1002
1002 1003 ``post-<command>``
1003 1004 Run after successful invocations of the associated command. The
1004 1005 contents of the command line are passed as ``$HG_ARGS`` and the result
1005 1006 code in ``$HG_RESULT``. Parsed command line arguments are passed as
1006 1007 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
1007 1008 the python data internally passed to <command>. ``$HG_OPTS`` is a
1008 1009 dictionary of options (with unspecified options set to their defaults).
1009 1010 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
1010 1011
1011 1012 ``fail-<command>``
1012 1013 Run after a failed invocation of an associated command. The contents
1013 1014 of the command line are passed as ``$HG_ARGS``. Parsed command line
1014 1015 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
1015 1016 string representations of the python data internally passed to
1016 1017 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
1017 1018 options set to their defaults). ``$HG_PATS`` is a list of arguments.
1018 1019 Hook failure is ignored.
1019 1020
1020 1021 ``pre-<command>``
1021 1022 Run before executing the associated command. The contents of the
1022 1023 command line are passed as ``$HG_ARGS``. Parsed command line arguments
1023 1024 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
1024 1025 representations of the data internally passed to <command>. ``$HG_OPTS``
1025 1026 is a dictionary of options (with unspecified options set to their
1026 1027 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
1027 1028 failure, the command doesn't execute and Mercurial returns the failure
1028 1029 code.
1029 1030
1030 1031 ``prechangegroup``
1031 1032 Run before a changegroup is added via push, pull or unbundle. Exit
1032 1033 status 0 allows the changegroup to proceed. A non-zero status will
1033 1034 cause the push, pull or unbundle to fail. The URL from which changes
1034 1035 will come is in ``$HG_URL``.
1035 1036
1036 1037 ``precommit``
1037 1038 Run before starting a local commit. Exit status 0 allows the
1038 1039 commit to proceed. A non-zero status will cause the commit to fail.
1039 1040 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1040 1041
1041 1042 ``prelistkeys``
1042 1043 Run before listing pushkeys (like bookmarks) in the
1043 1044 repository. A non-zero status will cause failure. The key namespace is
1044 1045 in ``$HG_NAMESPACE``.
1045 1046
1046 1047 ``preoutgoing``
1047 1048 Run before collecting changes to send from the local repository to
1048 1049 another. A non-zero status will cause failure. This lets you prevent
1049 1050 pull over HTTP or SSH. It can also prevent propagating commits (via
1050 1051 local pull, push (outbound) or bundle commands), but not completely,
1051 1052 since you can just copy files instead. The source of operation is in
1052 1053 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1053 1054 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1054 1055 is happening on behalf of a repository on same system.
1055 1056
1056 1057 ``prepushkey``
1057 1058 Run before a pushkey (like a bookmark) is added to the
1058 1059 repository. A non-zero status will cause the key to be rejected. The
1059 1060 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1060 1061 the old value (if any) is in ``$HG_OLD``, and the new value is in
1061 1062 ``$HG_NEW``.
1062 1063
1063 1064 ``pretag``
1064 1065 Run before creating a tag. Exit status 0 allows the tag to be
1065 1066 created. A non-zero status will cause the tag to fail. The ID of the
1066 1067 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1067 1068 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1068 1069
1069 1070 ``pretxnopen``
1070 1071 Run before any new repository transaction is open. The reason for the
1071 1072 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1072 1073 transaction will be in ``HG_TXNID``. A non-zero status will prevent the
1073 1074 transaction from being opened.
1074 1075
1075 1076 ``pretxnclose``
1076 1077 Run right before the transaction is actually finalized. Any repository change
1077 1078 will be visible to the hook program. This lets you validate the transaction
1078 1079 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1079 1080 status will cause the transaction to be rolled back. The reason for the
1080 1081 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1081 1082 the transaction will be in ``HG_TXNID``. The rest of the available data will
1082 1083 vary according the transaction type. New changesets will add ``$HG_NODE``
1083 1084 (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
1084 1085 added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables. Bookmark and
1085 1086 phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
1086 1087 respectively, etc.
1087 1088
1088 1089 ``pretxnclose-bookmark``
1089 1090 Run right before a bookmark change is actually finalized. Any repository
1090 1091 change will be visible to the hook program. This lets you validate the
1091 1092 transaction content or change it. Exit status 0 allows the commit to
1092 1093 proceed. A non-zero status will cause the transaction to be rolled back.
1093 1094 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1094 1095 bookmark location will be available in ``$HG_NODE`` while the previous
1095 1096 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1096 1097 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1097 1098 will be empty.
1098 1099 In addition, the reason for the transaction opening will be in
1099 1100 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1100 1101 ``HG_TXNID``.
1101 1102
1102 1103 ``pretxnclose-phase``
1103 1104 Run right before a phase change is actually finalized. Any repository change
1104 1105 will be visible to the hook program. This lets you validate the transaction
1105 1106 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1106 1107 status will cause the transaction to be rolled back. The hook is called
1107 1108 multiple times, once for each revision affected by a phase change.
1108 1109 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1109 1110 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1110 1111 will be empty. In addition, the reason for the transaction opening will be in
1111 1112 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1112 1113 ``HG_TXNID``. The hook is also run for newly added revisions. In this case
1113 1114 the ``$HG_OLDPHASE`` entry will be empty.
1114 1115
1115 1116 ``txnclose``
1116 1117 Run after any repository transaction has been committed. At this
1117 1118 point, the transaction can no longer be rolled back. The hook will run
1118 1119 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1119 1120 details about available variables.
1120 1121
1121 1122 ``txnclose-bookmark``
1122 1123 Run after any bookmark change has been committed. At this point, the
1123 1124 transaction can no longer be rolled back. The hook will run after the lock
1124 1125 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1125 1126 about available variables.
1126 1127
1127 1128 ``txnclose-phase``
1128 1129 Run after any phase change has been committed. At this point, the
1129 1130 transaction can no longer be rolled back. The hook will run after the lock
1130 1131 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1131 1132 available variables.
1132 1133
1133 1134 ``txnabort``
1134 1135 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1135 1136 for details about available variables.
1136 1137
1137 1138 ``pretxnchangegroup``
1138 1139 Run after a changegroup has been added via push, pull or unbundle, but before
1139 1140 the transaction has been committed. The changegroup is visible to the hook
1140 1141 program. This allows validation of incoming changes before accepting them.
1141 1142 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1142 1143 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1143 1144 status will cause the transaction to be rolled back, and the push, pull or
1144 1145 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1145 1146
1146 1147 ``pretxncommit``
1147 1148 Run after a changeset has been created, but before the transaction is
1148 1149 committed. The changeset is visible to the hook program. This allows
1149 1150 validation of the commit message and changes. Exit status 0 allows the
1150 1151 commit to proceed. A non-zero status will cause the transaction to
1151 1152 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1152 1153 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1153 1154
1154 1155 ``preupdate``
1155 1156 Run before updating the working directory. Exit status 0 allows
1156 1157 the update to proceed. A non-zero status will prevent the update.
1157 1158 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1158 1159 merge, the ID of second new parent is in ``$HG_PARENT2``.
1159 1160
1160 1161 ``listkeys``
1161 1162 Run after listing pushkeys (like bookmarks) in the repository. The
1162 1163 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1163 1164 dictionary containing the keys and values.
1164 1165
1165 1166 ``pushkey``
1166 1167 Run after a pushkey (like a bookmark) is added to the
1167 1168 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1168 1169 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1169 1170 value is in ``$HG_NEW``.
1170 1171
1171 1172 ``tag``
1172 1173 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1173 1174 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1174 1175 the repository if ``$HG_LOCAL=0``.
1175 1176
1176 1177 ``update``
1177 1178 Run after updating the working directory. The changeset ID of first
1178 1179 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1179 1180 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1180 1181 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1181 1182
1182 1183 .. note::
1183 1184
1184 1185 It is generally better to use standard hooks rather than the
1185 1186 generic pre- and post- command hooks, as they are guaranteed to be
1186 1187 called in the appropriate contexts for influencing transactions.
1187 1188 Also, hooks like "commit" will be called in all contexts that
1188 1189 generate a commit (e.g. tag) and not just the commit command.
1189 1190
1190 1191 .. note::
1191 1192
1192 1193 Environment variables with empty values may not be passed to
1193 1194 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1194 1195 will have an empty value under Unix-like platforms for non-merge
1195 1196 changesets, while it will not be available at all under Windows.
1196 1197
1197 1198 The syntax for Python hooks is as follows::
1198 1199
1199 1200 hookname = python:modulename.submodule.callable
1200 1201 hookname = python:/path/to/python/module.py:callable
1201 1202
1202 1203 Python hooks are run within the Mercurial process. Each hook is
1203 1204 called with at least three keyword arguments: a ui object (keyword
1204 1205 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1205 1206 keyword that tells what kind of hook is used. Arguments listed as
1206 1207 environment variables above are passed as keyword arguments, with no
1207 1208 ``HG_`` prefix, and names in lower case.
1208 1209
1209 1210 If a Python hook returns a "true" value or raises an exception, this
1210 1211 is treated as a failure.
1211 1212
1212 1213
1213 1214 ``hostfingerprints``
1214 1215 --------------------
1215 1216
1216 1217 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1217 1218
1218 1219 Fingerprints of the certificates of known HTTPS servers.
1219 1220
1220 1221 A HTTPS connection to a server with a fingerprint configured here will
1221 1222 only succeed if the servers certificate matches the fingerprint.
1222 1223 This is very similar to how ssh known hosts works.
1223 1224
1224 1225 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1225 1226 Multiple values can be specified (separated by spaces or commas). This can
1226 1227 be used to define both old and new fingerprints while a host transitions
1227 1228 to a new certificate.
1228 1229
1229 1230 The CA chain and web.cacerts is not used for servers with a fingerprint.
1230 1231
1231 1232 For example::
1232 1233
1233 1234 [hostfingerprints]
1234 1235 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1235 1236 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1236 1237
1237 1238 ``hostsecurity``
1238 1239 ----------------
1239 1240
1240 1241 Used to specify global and per-host security settings for connecting to
1241 1242 other machines.
1242 1243
1243 1244 The following options control default behavior for all hosts.
1244 1245
1245 1246 ``ciphers``
1246 1247 Defines the cryptographic ciphers to use for connections.
1247 1248
1248 1249 Value must be a valid OpenSSL Cipher List Format as documented at
1249 1250 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1250 1251
1251 1252 This setting is for advanced users only. Setting to incorrect values
1252 1253 can significantly lower connection security or decrease performance.
1253 1254 You have been warned.
1254 1255
1255 1256 This option requires Python 2.7.
1256 1257
1257 1258 ``minimumprotocol``
1258 1259 Defines the minimum channel encryption protocol to use.
1259 1260
1260 1261 By default, the highest version of TLS supported by both client and server
1261 1262 is used.
1262 1263
1263 1264 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1264 1265
1265 1266 When running on an old Python version, only ``tls1.0`` is allowed since
1266 1267 old versions of Python only support up to TLS 1.0.
1267 1268
1268 1269 When running a Python that supports modern TLS versions, the default is
1269 1270 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1270 1271 weakens security and should only be used as a feature of last resort if
1271 1272 a server does not support TLS 1.1+.
1272 1273
1273 1274 Options in the ``[hostsecurity]`` section can have the form
1274 1275 ``hostname``:``setting``. This allows multiple settings to be defined on a
1275 1276 per-host basis.
1276 1277
1277 1278 The following per-host settings can be defined.
1278 1279
1279 1280 ``ciphers``
1280 1281 This behaves like ``ciphers`` as described above except it only applies
1281 1282 to the host on which it is defined.
1282 1283
1283 1284 ``fingerprints``
1284 1285 A list of hashes of the DER encoded peer/remote certificate. Values have
1285 1286 the form ``algorithm``:``fingerprint``. e.g.
1286 1287 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1287 1288 In addition, colons (``:``) can appear in the fingerprint part.
1288 1289
1289 1290 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1290 1291 ``sha512``.
1291 1292
1292 1293 Use of ``sha256`` or ``sha512`` is preferred.
1293 1294
1294 1295 If a fingerprint is specified, the CA chain is not validated for this
1295 1296 host and Mercurial will require the remote certificate to match one
1296 1297 of the fingerprints specified. This means if the server updates its
1297 1298 certificate, Mercurial will abort until a new fingerprint is defined.
1298 1299 This can provide stronger security than traditional CA-based validation
1299 1300 at the expense of convenience.
1300 1301
1301 1302 This option takes precedence over ``verifycertsfile``.
1302 1303
1303 1304 ``minimumprotocol``
1304 1305 This behaves like ``minimumprotocol`` as described above except it
1305 1306 only applies to the host on which it is defined.
1306 1307
1307 1308 ``verifycertsfile``
1308 1309 Path to file a containing a list of PEM encoded certificates used to
1309 1310 verify the server certificate. Environment variables and ``~user``
1310 1311 constructs are expanded in the filename.
1311 1312
1312 1313 The server certificate or the certificate's certificate authority (CA)
1313 1314 must match a certificate from this file or certificate verification
1314 1315 will fail and connections to the server will be refused.
1315 1316
1316 1317 If defined, only certificates provided by this file will be used:
1317 1318 ``web.cacerts`` and any system/default certificates will not be
1318 1319 used.
1319 1320
1320 1321 This option has no effect if the per-host ``fingerprints`` option
1321 1322 is set.
1322 1323
1323 1324 The format of the file is as follows::
1324 1325
1325 1326 -----BEGIN CERTIFICATE-----
1326 1327 ... (certificate in base64 PEM encoding) ...
1327 1328 -----END CERTIFICATE-----
1328 1329 -----BEGIN CERTIFICATE-----
1329 1330 ... (certificate in base64 PEM encoding) ...
1330 1331 -----END CERTIFICATE-----
1331 1332
1332 1333 For example::
1333 1334
1334 1335 [hostsecurity]
1335 1336 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1336 1337 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1337 1338 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1338 1339 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1339 1340
1340 1341 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1341 1342 when connecting to ``hg.example.com``::
1342 1343
1343 1344 [hostsecurity]
1344 1345 minimumprotocol = tls1.2
1345 1346 hg.example.com:minimumprotocol = tls1.1
1346 1347
1347 1348 ``http_proxy``
1348 1349 --------------
1349 1350
1350 1351 Used to access web-based Mercurial repositories through a HTTP
1351 1352 proxy.
1352 1353
1353 1354 ``host``
1354 1355 Host name and (optional) port of the proxy server, for example
1355 1356 "myproxy:8000".
1356 1357
1357 1358 ``no``
1358 1359 Optional. Comma-separated list of host names that should bypass
1359 1360 the proxy.
1360 1361
1361 1362 ``passwd``
1362 1363 Optional. Password to authenticate with at the proxy server.
1363 1364
1364 1365 ``user``
1365 1366 Optional. User name to authenticate with at the proxy server.
1366 1367
1367 1368 ``always``
1368 1369 Optional. Always use the proxy, even for localhost and any entries
1369 1370 in ``http_proxy.no``. (default: False)
1370 1371
1371 1372 ``http``
1372 1373 ----------
1373 1374
1374 1375 Used to configure access to Mercurial repositories via HTTP.
1375 1376
1376 1377 ``timeout``
1377 1378 If set, blocking operations will timeout after that many seconds.
1378 1379 (default: None)
1379 1380
1380 1381 ``merge``
1381 1382 ---------
1382 1383
1383 1384 This section specifies behavior during merges and updates.
1384 1385
1385 1386 ``checkignored``
1386 1387 Controls behavior when an ignored file on disk has the same name as a tracked
1387 1388 file in the changeset being merged or updated to, and has different
1388 1389 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1389 1390 abort on such files. With ``warn``, warn on such files and back them up as
1390 1391 ``.orig``. With ``ignore``, don't print a warning and back them up as
1391 1392 ``.orig``. (default: ``abort``)
1392 1393
1393 1394 ``checkunknown``
1394 1395 Controls behavior when an unknown file that isn't ignored has the same name
1395 1396 as a tracked file in the changeset being merged or updated to, and has
1396 1397 different contents. Similar to ``merge.checkignored``, except for files that
1397 1398 are not ignored. (default: ``abort``)
1398 1399
1399 1400 ``on-failure``
1400 1401 When set to ``continue`` (the default), the merge process attempts to
1401 1402 merge all unresolved files using the merge chosen tool, regardless of
1402 1403 whether previous file merge attempts during the process succeeded or not.
1403 1404 Setting this to ``prompt`` will prompt after any merge failure continue
1404 1405 or halt the merge process. Setting this to ``halt`` will automatically
1405 1406 halt the merge process on any merge tool failure. The merge process
1406 1407 can be restarted by using the ``resolve`` command. When a merge is
1407 1408 halted, the repository is left in a normal ``unresolved`` merge state.
1408 1409 (default: ``continue``)
1409 1410
1410 1411 ``strict-capability-check``
1411 1412 Whether capabilities of internal merge tools are checked strictly
1412 1413 or not, while examining rules to decide merge tool to be used.
1413 1414 (default: False)
1414 1415
1415 1416 ``merge-patterns``
1416 1417 ------------------
1417 1418
1418 1419 This section specifies merge tools to associate with particular file
1419 1420 patterns. Tools matched here will take precedence over the default
1420 1421 merge tool. Patterns are globs by default, rooted at the repository
1421 1422 root.
1422 1423
1423 1424 Example::
1424 1425
1425 1426 [merge-patterns]
1426 1427 **.c = kdiff3
1427 1428 **.jpg = myimgmerge
1428 1429
1429 1430 ``merge-tools``
1430 1431 ---------------
1431 1432
1432 1433 This section configures external merge tools to use for file-level
1433 1434 merges. This section has likely been preconfigured at install time.
1434 1435 Use :hg:`config merge-tools` to check the existing configuration.
1435 1436 Also see :hg:`help merge-tools` for more details.
1436 1437
1437 1438 Example ``~/.hgrc``::
1438 1439
1439 1440 [merge-tools]
1440 1441 # Override stock tool location
1441 1442 kdiff3.executable = ~/bin/kdiff3
1442 1443 # Specify command line
1443 1444 kdiff3.args = $base $local $other -o $output
1444 1445 # Give higher priority
1445 1446 kdiff3.priority = 1
1446 1447
1447 1448 # Changing the priority of preconfigured tool
1448 1449 meld.priority = 0
1449 1450
1450 1451 # Disable a preconfigured tool
1451 1452 vimdiff.disabled = yes
1452 1453
1453 1454 # Define new tool
1454 1455 myHtmlTool.args = -m $local $other $base $output
1455 1456 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1456 1457 myHtmlTool.priority = 1
1457 1458
1458 1459 Supported arguments:
1459 1460
1460 1461 ``priority``
1461 1462 The priority in which to evaluate this tool.
1462 1463 (default: 0)
1463 1464
1464 1465 ``executable``
1465 1466 Either just the name of the executable or its pathname.
1466 1467
1467 1468 .. container:: windows
1468 1469
1469 1470 On Windows, the path can use environment variables with ${ProgramFiles}
1470 1471 syntax.
1471 1472
1472 1473 (default: the tool name)
1473 1474
1474 1475 ``args``
1475 1476 The arguments to pass to the tool executable. You can refer to the
1476 1477 files being merged as well as the output file through these
1477 1478 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1478 1479
1479 1480 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1480 1481 being performed. During an update or merge, ``$local`` represents the original
1481 1482 state of the file, while ``$other`` represents the commit you are updating to or
1482 1483 the commit you are merging with. During a rebase, ``$local`` represents the
1483 1484 destination of the rebase, and ``$other`` represents the commit being rebased.
1484 1485
1485 1486 Some operations define custom labels to assist with identifying the revisions,
1486 1487 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1487 1488 labels are not available, these will be ``local``, ``other``, and ``base``,
1488 1489 respectively.
1489 1490 (default: ``$local $base $other``)
1490 1491
1491 1492 ``premerge``
1492 1493 Attempt to run internal non-interactive 3-way merge tool before
1493 1494 launching external tool. Options are ``true``, ``false``, ``keep`` or
1494 1495 ``keep-merge3``. The ``keep`` option will leave markers in the file if the
1495 1496 premerge fails. The ``keep-merge3`` will do the same but include information
1496 1497 about the base of the merge in the marker (see internal :merge3 in
1497 1498 :hg:`help merge-tools`).
1498 1499 (default: True)
1499 1500
1500 1501 ``binary``
1501 1502 This tool can merge binary files. (default: False, unless tool
1502 1503 was selected by file pattern match)
1503 1504
1504 1505 ``symlink``
1505 1506 This tool can merge symlinks. (default: False)
1506 1507
1507 1508 ``check``
1508 1509 A list of merge success-checking options:
1509 1510
1510 1511 ``changed``
1511 1512 Ask whether merge was successful when the merged file shows no changes.
1512 1513 ``conflicts``
1513 1514 Check whether there are conflicts even though the tool reported success.
1514 1515 ``prompt``
1515 1516 Always prompt for merge success, regardless of success reported by tool.
1516 1517
1517 1518 ``fixeol``
1518 1519 Attempt to fix up EOL changes caused by the merge tool.
1519 1520 (default: False)
1520 1521
1521 1522 ``gui``
1522 1523 This tool requires a graphical interface to run. (default: False)
1523 1524
1524 1525 ``mergemarkers``
1525 1526 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1526 1527 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1527 1528 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1528 1529 markers generated during premerge will be ``detailed`` if either this option or
1529 1530 the corresponding option in the ``[ui]`` section is ``detailed``.
1530 1531 (default: ``basic``)
1531 1532
1532 1533 ``mergemarkertemplate``
1533 1534 This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
1534 1535 section on a per-tool basis; this applies to the ``$label``-prefixed variables
1535 1536 and to the conflict markers that are generated if ``premerge`` is ``keep` or
1536 1537 ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
1537 1538 information.
1538 1539
1539 1540 .. container:: windows
1540 1541
1541 1542 ``regkey``
1542 1543 Windows registry key which describes install location of this
1543 1544 tool. Mercurial will search for this key first under
1544 1545 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1545 1546 (default: None)
1546 1547
1547 1548 ``regkeyalt``
1548 1549 An alternate Windows registry key to try if the first key is not
1549 1550 found. The alternate key uses the same ``regname`` and ``regappend``
1550 1551 semantics of the primary key. The most common use for this key
1551 1552 is to search for 32bit applications on 64bit operating systems.
1552 1553 (default: None)
1553 1554
1554 1555 ``regname``
1555 1556 Name of value to read from specified registry key.
1556 1557 (default: the unnamed (default) value)
1557 1558
1558 1559 ``regappend``
1559 1560 String to append to the value read from the registry, typically
1560 1561 the executable name of the tool.
1561 1562 (default: None)
1562 1563
1563 1564 ``pager``
1564 1565 ---------
1565 1566
1566 1567 Setting used to control when to paginate and with what external tool. See
1567 1568 :hg:`help pager` for details.
1568 1569
1569 1570 ``pager``
1570 1571 Define the external tool used as pager.
1571 1572
1572 1573 If no pager is set, Mercurial uses the environment variable $PAGER.
1573 1574 If neither pager.pager, nor $PAGER is set, a default pager will be
1574 1575 used, typically `less` on Unix and `more` on Windows. Example::
1575 1576
1576 1577 [pager]
1577 1578 pager = less -FRX
1578 1579
1579 1580 ``ignore``
1580 1581 List of commands to disable the pager for. Example::
1581 1582
1582 1583 [pager]
1583 1584 ignore = version, help, update
1584 1585
1585 1586 ``patch``
1586 1587 ---------
1587 1588
1588 1589 Settings used when applying patches, for instance through the 'import'
1589 1590 command or with Mercurial Queues extension.
1590 1591
1591 1592 ``eol``
1592 1593 When set to 'strict' patch content and patched files end of lines
1593 1594 are preserved. When set to ``lf`` or ``crlf``, both files end of
1594 1595 lines are ignored when patching and the result line endings are
1595 1596 normalized to either LF (Unix) or CRLF (Windows). When set to
1596 1597 ``auto``, end of lines are again ignored while patching but line
1597 1598 endings in patched files are normalized to their original setting
1598 1599 on a per-file basis. If target file does not exist or has no end
1599 1600 of line, patch line endings are preserved.
1600 1601 (default: strict)
1601 1602
1602 1603 ``fuzz``
1603 1604 The number of lines of 'fuzz' to allow when applying patches. This
1604 1605 controls how much context the patcher is allowed to ignore when
1605 1606 trying to apply a patch.
1606 1607 (default: 2)
1607 1608
1608 1609 ``paths``
1609 1610 ---------
1610 1611
1611 1612 Assigns symbolic names and behavior to repositories.
1612 1613
1613 1614 Options are symbolic names defining the URL or directory that is the
1614 1615 location of the repository. Example::
1615 1616
1616 1617 [paths]
1617 1618 my_server = https://example.com/my_repo
1618 1619 local_path = /home/me/repo
1619 1620
1620 1621 These symbolic names can be used from the command line. To pull
1621 1622 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1622 1623 :hg:`push local_path`.
1623 1624
1624 1625 Options containing colons (``:``) denote sub-options that can influence
1625 1626 behavior for that specific path. Example::
1626 1627
1627 1628 [paths]
1628 1629 my_server = https://example.com/my_path
1629 1630 my_server:pushurl = ssh://example.com/my_path
1630 1631
1631 1632 The following sub-options can be defined:
1632 1633
1633 1634 ``pushurl``
1634 1635 The URL to use for push operations. If not defined, the location
1635 1636 defined by the path's main entry is used.
1636 1637
1637 1638 ``pushrev``
1638 1639 A revset defining which revisions to push by default.
1639 1640
1640 1641 When :hg:`push` is executed without a ``-r`` argument, the revset
1641 1642 defined by this sub-option is evaluated to determine what to push.
1642 1643
1643 1644 For example, a value of ``.`` will push the working directory's
1644 1645 revision by default.
1645 1646
1646 1647 Revsets specifying bookmarks will not result in the bookmark being
1647 1648 pushed.
1648 1649
1649 1650 The following special named paths exist:
1650 1651
1651 1652 ``default``
1652 1653 The URL or directory to use when no source or remote is specified.
1653 1654
1654 1655 :hg:`clone` will automatically define this path to the location the
1655 1656 repository was cloned from.
1656 1657
1657 1658 ``default-push``
1658 1659 (deprecated) The URL or directory for the default :hg:`push` location.
1659 1660 ``default:pushurl`` should be used instead.
1660 1661
1661 1662 ``phases``
1662 1663 ----------
1663 1664
1664 1665 Specifies default handling of phases. See :hg:`help phases` for more
1665 1666 information about working with phases.
1666 1667
1667 1668 ``publish``
1668 1669 Controls draft phase behavior when working as a server. When true,
1669 1670 pushed changesets are set to public in both client and server and
1670 1671 pulled or cloned changesets are set to public in the client.
1671 1672 (default: True)
1672 1673
1673 1674 ``new-commit``
1674 1675 Phase of newly-created commits.
1675 1676 (default: draft)
1676 1677
1677 1678 ``checksubrepos``
1678 1679 Check the phase of the current revision of each subrepository. Allowed
1679 1680 values are "ignore", "follow" and "abort". For settings other than
1680 1681 "ignore", the phase of the current revision of each subrepository is
1681 1682 checked before committing the parent repository. If any of those phases is
1682 1683 greater than the phase of the parent repository (e.g. if a subrepo is in a
1683 1684 "secret" phase while the parent repo is in "draft" phase), the commit is
1684 1685 either aborted (if checksubrepos is set to "abort") or the higher phase is
1685 1686 used for the parent repository commit (if set to "follow").
1686 1687 (default: follow)
1687 1688
1688 1689
1689 1690 ``profiling``
1690 1691 -------------
1691 1692
1692 1693 Specifies profiling type, format, and file output. Two profilers are
1693 1694 supported: an instrumenting profiler (named ``ls``), and a sampling
1694 1695 profiler (named ``stat``).
1695 1696
1696 1697 In this section description, 'profiling data' stands for the raw data
1697 1698 collected during profiling, while 'profiling report' stands for a
1698 1699 statistical text report generated from the profiling data.
1699 1700
1700 1701 ``enabled``
1701 1702 Enable the profiler.
1702 1703 (default: false)
1703 1704
1704 1705 This is equivalent to passing ``--profile`` on the command line.
1705 1706
1706 1707 ``type``
1707 1708 The type of profiler to use.
1708 1709 (default: stat)
1709 1710
1710 1711 ``ls``
1711 1712 Use Python's built-in instrumenting profiler. This profiler
1712 1713 works on all platforms, but each line number it reports is the
1713 1714 first line of a function. This restriction makes it difficult to
1714 1715 identify the expensive parts of a non-trivial function.
1715 1716 ``stat``
1716 1717 Use a statistical profiler, statprof. This profiler is most
1717 1718 useful for profiling commands that run for longer than about 0.1
1718 1719 seconds.
1719 1720
1720 1721 ``format``
1721 1722 Profiling format. Specific to the ``ls`` instrumenting profiler.
1722 1723 (default: text)
1723 1724
1724 1725 ``text``
1725 1726 Generate a profiling report. When saving to a file, it should be
1726 1727 noted that only the report is saved, and the profiling data is
1727 1728 not kept.
1728 1729 ``kcachegrind``
1729 1730 Format profiling data for kcachegrind use: when saving to a
1730 1731 file, the generated file can directly be loaded into
1731 1732 kcachegrind.
1732 1733
1733 1734 ``statformat``
1734 1735 Profiling format for the ``stat`` profiler.
1735 1736 (default: hotpath)
1736 1737
1737 1738 ``hotpath``
1738 1739 Show a tree-based display containing the hot path of execution (where
1739 1740 most time was spent).
1740 1741 ``bymethod``
1741 1742 Show a table of methods ordered by how frequently they are active.
1742 1743 ``byline``
1743 1744 Show a table of lines in files ordered by how frequently they are active.
1744 1745 ``json``
1745 1746 Render profiling data as JSON.
1746 1747
1747 1748 ``frequency``
1748 1749 Sampling frequency. Specific to the ``stat`` sampling profiler.
1749 1750 (default: 1000)
1750 1751
1751 1752 ``output``
1752 1753 File path where profiling data or report should be saved. If the
1753 1754 file exists, it is replaced. (default: None, data is printed on
1754 1755 stderr)
1755 1756
1756 1757 ``sort``
1757 1758 Sort field. Specific to the ``ls`` instrumenting profiler.
1758 1759 One of ``callcount``, ``reccallcount``, ``totaltime`` and
1759 1760 ``inlinetime``.
1760 1761 (default: inlinetime)
1761 1762
1762 1763 ``time-track``
1763 1764 Control if the stat profiler track ``cpu`` or ``real`` time.
1764 1765 (default: ``cpu`` on Windows, otherwise ``real``)
1765 1766
1766 1767 ``limit``
1767 1768 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
1768 1769 (default: 30)
1769 1770
1770 1771 ``nested``
1771 1772 Show at most this number of lines of drill-down info after each main entry.
1772 1773 This can help explain the difference between Total and Inline.
1773 1774 Specific to the ``ls`` instrumenting profiler.
1774 1775 (default: 0)
1775 1776
1776 1777 ``showmin``
1777 1778 Minimum fraction of samples an entry must have for it to be displayed.
1778 1779 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
1779 1780 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
1780 1781
1781 1782 Only used by the ``stat`` profiler.
1782 1783
1783 1784 For the ``hotpath`` format, default is ``0.05``.
1784 1785 For the ``chrome`` format, default is ``0.005``.
1785 1786
1786 1787 The option is unused on other formats.
1787 1788
1788 1789 ``showmax``
1789 1790 Maximum fraction of samples an entry can have before it is ignored in
1790 1791 display. Values format is the same as ``showmin``.
1791 1792
1792 1793 Only used by the ``stat`` profiler.
1793 1794
1794 1795 For the ``chrome`` format, default is ``0.999``.
1795 1796
1796 1797 The option is unused on other formats.
1797 1798
1798 1799 ``showtime``
1799 1800 Show time taken as absolute durations, in addition to percentages.
1800 1801 Only used by the ``hotpath`` format.
1801 1802 (default: true)
1802 1803
1803 1804 ``progress``
1804 1805 ------------
1805 1806
1806 1807 Mercurial commands can draw progress bars that are as informative as
1807 1808 possible. Some progress bars only offer indeterminate information, while others
1808 1809 have a definite end point.
1809 1810
1810 1811 ``debug``
1811 1812 Whether to print debug info when updating the progress bar. (default: False)
1812 1813
1813 1814 ``delay``
1814 1815 Number of seconds (float) before showing the progress bar. (default: 3)
1815 1816
1816 1817 ``changedelay``
1817 1818 Minimum delay before showing a new topic. When set to less than 3 * refresh,
1818 1819 that value will be used instead. (default: 1)
1819 1820
1820 1821 ``estimateinterval``
1821 1822 Maximum sampling interval in seconds for speed and estimated time
1822 1823 calculation. (default: 60)
1823 1824
1824 1825 ``refresh``
1825 1826 Time in seconds between refreshes of the progress bar. (default: 0.1)
1826 1827
1827 1828 ``format``
1828 1829 Format of the progress bar.
1829 1830
1830 1831 Valid entries for the format field are ``topic``, ``bar``, ``number``,
1831 1832 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
1832 1833 last 20 characters of the item, but this can be changed by adding either
1833 1834 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
1834 1835 first num characters.
1835 1836
1836 1837 (default: topic bar number estimate)
1837 1838
1838 1839 ``width``
1839 1840 If set, the maximum width of the progress information (that is, min(width,
1840 1841 term width) will be used).
1841 1842
1842 1843 ``clear-complete``
1843 1844 Clear the progress bar after it's done. (default: True)
1844 1845
1845 1846 ``disable``
1846 1847 If true, don't show a progress bar.
1847 1848
1848 1849 ``assume-tty``
1849 1850 If true, ALWAYS show a progress bar, unless disable is given.
1850 1851
1851 1852 ``rebase``
1852 1853 ----------
1853 1854
1854 1855 ``evolution.allowdivergence``
1855 1856 Default to False, when True allow creating divergence when performing
1856 1857 rebase of obsolete changesets.
1857 1858
1858 1859 ``revsetalias``
1859 1860 ---------------
1860 1861
1861 1862 Alias definitions for revsets. See :hg:`help revsets` for details.
1862 1863
1863 1864 ``rewrite``
1864 1865 -----------
1865 1866
1866 1867 ``backup-bundle``
1867 1868 Whether to save stripped changesets to a bundle file. (default: True)
1868 1869
1869 1870 ``update-timestamp``
1870 1871 If true, updates the date and time of the changeset to current. It is only
1871 1872 applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
1872 1873 current version.
1873 1874
1874 1875 ``storage``
1875 1876 -----------
1876 1877
1877 1878 Control the strategy Mercurial uses internally to store history. Options in this
1878 1879 category impact performance and repository size.
1879 1880
1880 1881 ``revlog.optimize-delta-parent-choice``
1881 1882 When storing a merge revision, both parents will be equally considered as
1882 1883 a possible delta base. This results in better delta selection and improved
1883 1884 revlog compression. This option is enabled by default.
1884 1885
1885 1886 Turning this option off can result in large increase of repository size for
1886 1887 repository with many merges.
1887 1888
1888 1889 ``revlog.reuse-external-delta-parent``
1889 1890 Control the order in which delta parents are considered when adding new
1890 1891 revisions from an external source.
1891 1892 (typically: apply bundle from `hg pull` or `hg push`).
1892 1893
1893 1894 New revisions are usually provided as a delta against other revisions. By
1894 1895 default, Mercurial will try to reuse this delta first, therefore using the
1895 1896 same "delta parent" as the source. Directly using delta's from the source
1896 1897 reduces CPU usage and usually speeds up operation. However, in some case,
1897 1898 the source might have sub-optimal delta bases and forcing their reevaluation
1898 1899 is useful. For example, pushes from an old client could have sub-optimal
1899 1900 delta's parent that the server want to optimize. (lack of general delta, bad
1900 1901 parents, choice, lack of sparse-revlog, etc).
1901 1902
1902 1903 This option is enabled by default. Turning it off will ensure bad delta
1903 1904 parent choices from older client do not propagate to this repository, at
1904 1905 the cost of a small increase in CPU consumption.
1905 1906
1906 1907 Note: this option only control the order in which delta parents are
1907 1908 considered. Even when disabled, the existing delta from the source will be
1908 1909 reused if the same delta parent is selected.
1909 1910
1910 1911 ``revlog.reuse-external-delta``
1911 1912 Control the reuse of delta from external source.
1912 1913 (typically: apply bundle from `hg pull` or `hg push`).
1913 1914
1914 1915 New revisions are usually provided as a delta against another revision. By
1915 1916 default, Mercurial will not recompute the same delta again, trusting
1916 1917 externally provided deltas. There have been rare cases of small adjustment
1917 1918 to the diffing algorithm in the past. So in some rare case, recomputing
1918 1919 delta provided by ancient clients can provides better results. Disabling
1919 1920 this option means going through a full delta recomputation for all incoming
1920 1921 revisions. It means a large increase in CPU usage and will slow operations
1921 1922 down.
1922 1923
1923 1924 This option is enabled by default. When disabled, it also disables the
1924 1925 related ``storage.revlog.reuse-external-delta-parent`` option.
1925 1926
1926 1927 ``revlog.zlib.level``
1927 1928 Zlib compression level used when storing data into the repository. Accepted
1928 1929 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
1929 1930 default value is 6.
1930 1931
1931 1932
1932 1933 ``revlog.zstd.level``
1933 1934 zstd compression level used when storing data into the repository. Accepted
1934 1935 Value range from 1 (lowest compression) to 22 (highest compression).
1935 1936 (default 3)
1936 1937
1937 1938 ``server``
1938 1939 ----------
1939 1940
1940 1941 Controls generic server settings.
1941 1942
1942 1943 ``bookmarks-pushkey-compat``
1943 1944 Trigger pushkey hook when being pushed bookmark updates. This config exist
1944 1945 for compatibility purpose (default to True)
1945 1946
1946 1947 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
1947 1948 movement we recommend you migrate them to ``txnclose-bookmark`` and
1948 1949 ``pretxnclose-bookmark``.
1949 1950
1950 1951 ``compressionengines``
1951 1952 List of compression engines and their relative priority to advertise
1952 1953 to clients.
1953 1954
1954 1955 The order of compression engines determines their priority, the first
1955 1956 having the highest priority. If a compression engine is not listed
1956 1957 here, it won't be advertised to clients.
1957 1958
1958 1959 If not set (the default), built-in defaults are used. Run
1959 1960 :hg:`debuginstall` to list available compression engines and their
1960 1961 default wire protocol priority.
1961 1962
1962 1963 Older Mercurial clients only support zlib compression and this setting
1963 1964 has no effect for legacy clients.
1964 1965
1965 1966 ``uncompressed``
1966 1967 Whether to allow clients to clone a repository using the
1967 1968 uncompressed streaming protocol. This transfers about 40% more
1968 1969 data than a regular clone, but uses less memory and CPU on both
1969 1970 server and client. Over a LAN (100 Mbps or better) or a very fast
1970 1971 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
1971 1972 regular clone. Over most WAN connections (anything slower than
1972 1973 about 6 Mbps), uncompressed streaming is slower, because of the
1973 1974 extra data transfer overhead. This mode will also temporarily hold
1974 1975 the write lock while determining what data to transfer.
1975 1976 (default: True)
1976 1977
1977 1978 ``uncompressedallowsecret``
1978 1979 Whether to allow stream clones when the repository contains secret
1979 1980 changesets. (default: False)
1980 1981
1981 1982 ``preferuncompressed``
1982 1983 When set, clients will try to use the uncompressed streaming
1983 1984 protocol. (default: False)
1984 1985
1985 1986 ``disablefullbundle``
1986 1987 When set, servers will refuse attempts to do pull-based clones.
1987 1988 If this option is set, ``preferuncompressed`` and/or clone bundles
1988 1989 are highly recommended. Partial clones will still be allowed.
1989 1990 (default: False)
1990 1991
1991 1992 ``streamunbundle``
1992 1993 When set, servers will apply data sent from the client directly,
1993 1994 otherwise it will be written to a temporary file first. This option
1994 1995 effectively prevents concurrent pushes.
1995 1996
1996 1997 ``pullbundle``
1997 1998 When set, the server will check pullbundle.manifest for bundles
1998 1999 covering the requested heads and common nodes. The first matching
1999 2000 entry will be streamed to the client.
2000 2001
2001 2002 For HTTP transport, the stream will still use zlib compression
2002 2003 for older clients.
2003 2004
2004 2005 ``concurrent-push-mode``
2005 2006 Level of allowed race condition between two pushing clients.
2006 2007
2007 2008 - 'strict': push is abort if another client touched the repository
2008 2009 while the push was preparing. (default)
2009 2010 - 'check-related': push is only aborted if it affects head that got also
2010 2011 affected while the push was preparing.
2011 2012
2012 2013 This requires compatible client (version 4.3 and later). Old client will
2013 2014 use 'strict'.
2014 2015
2015 2016 ``validate``
2016 2017 Whether to validate the completeness of pushed changesets by
2017 2018 checking that all new file revisions specified in manifests are
2018 2019 present. (default: False)
2019 2020
2020 2021 ``maxhttpheaderlen``
2021 2022 Instruct HTTP clients not to send request headers longer than this
2022 2023 many bytes. (default: 1024)
2023 2024
2024 2025 ``bundle1``
2025 2026 Whether to allow clients to push and pull using the legacy bundle1
2026 2027 exchange format. (default: True)
2027 2028
2028 2029 ``bundle1gd``
2029 2030 Like ``bundle1`` but only used if the repository is using the
2030 2031 *generaldelta* storage format. (default: True)
2031 2032
2032 2033 ``bundle1.push``
2033 2034 Whether to allow clients to push using the legacy bundle1 exchange
2034 2035 format. (default: True)
2035 2036
2036 2037 ``bundle1gd.push``
2037 2038 Like ``bundle1.push`` but only used if the repository is using the
2038 2039 *generaldelta* storage format. (default: True)
2039 2040
2040 2041 ``bundle1.pull``
2041 2042 Whether to allow clients to pull using the legacy bundle1 exchange
2042 2043 format. (default: True)
2043 2044
2044 2045 ``bundle1gd.pull``
2045 2046 Like ``bundle1.pull`` but only used if the repository is using the
2046 2047 *generaldelta* storage format. (default: True)
2047 2048
2048 2049 Large repositories using the *generaldelta* storage format should
2049 2050 consider setting this option because converting *generaldelta*
2050 2051 repositories to the exchange format required by the bundle1 data
2051 2052 format can consume a lot of CPU.
2052 2053
2053 2054 ``bundle2.stream``
2054 2055 Whether to allow clients to pull using the bundle2 streaming protocol.
2055 2056 (default: True)
2056 2057
2057 2058 ``zliblevel``
2058 2059 Integer between ``-1`` and ``9`` that controls the zlib compression level
2059 2060 for wire protocol commands that send zlib compressed output (notably the
2060 2061 commands that send repository history data).
2061 2062
2062 2063 The default (``-1``) uses the default zlib compression level, which is
2063 2064 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2064 2065 maximum compression.
2065 2066
2066 2067 Setting this option allows server operators to make trade-offs between
2067 2068 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2068 2069 but sends more bytes to clients.
2069 2070
2070 2071 This option only impacts the HTTP server.
2071 2072
2072 2073 ``zstdlevel``
2073 2074 Integer between ``1`` and ``22`` that controls the zstd compression level
2074 2075 for wire protocol commands. ``1`` is the minimal amount of compression and
2075 2076 ``22`` is the highest amount of compression.
2076 2077
2077 2078 The default (``3``) should be significantly faster than zlib while likely
2078 2079 delivering better compression ratios.
2079 2080
2080 2081 This option only impacts the HTTP server.
2081 2082
2082 2083 See also ``server.zliblevel``.
2083 2084
2084 2085 ``view``
2085 2086 Repository filter used when exchanging revisions with the peer.
2086 2087
2087 2088 The default view (``served``) excludes secret and hidden changesets.
2088 2089 Another useful value is ``immutable`` (no draft, secret or hidden
2089 2090 changesets). (EXPERIMENTAL)
2090 2091
2091 2092 ``smtp``
2092 2093 --------
2093 2094
2094 2095 Configuration for extensions that need to send email messages.
2095 2096
2096 2097 ``host``
2097 2098 Host name of mail server, e.g. "mail.example.com".
2098 2099
2099 2100 ``port``
2100 2101 Optional. Port to connect to on mail server. (default: 465 if
2101 2102 ``tls`` is smtps; 25 otherwise)
2102 2103
2103 2104 ``tls``
2104 2105 Optional. Method to enable TLS when connecting to mail server: starttls,
2105 2106 smtps or none. (default: none)
2106 2107
2107 2108 ``username``
2108 2109 Optional. User name for authenticating with the SMTP server.
2109 2110 (default: None)
2110 2111
2111 2112 ``password``
2112 2113 Optional. Password for authenticating with the SMTP server. If not
2113 2114 specified, interactive sessions will prompt the user for a
2114 2115 password; non-interactive sessions will fail. (default: None)
2115 2116
2116 2117 ``local_hostname``
2117 2118 Optional. The hostname that the sender can use to identify
2118 2119 itself to the MTA.
2119 2120
2120 2121
2121 2122 ``subpaths``
2122 2123 ------------
2123 2124
2124 2125 Subrepository source URLs can go stale if a remote server changes name
2125 2126 or becomes temporarily unavailable. This section lets you define
2126 2127 rewrite rules of the form::
2127 2128
2128 2129 <pattern> = <replacement>
2129 2130
2130 2131 where ``pattern`` is a regular expression matching a subrepository
2131 2132 source URL and ``replacement`` is the replacement string used to
2132 2133 rewrite it. Groups can be matched in ``pattern`` and referenced in
2133 2134 ``replacements``. For instance::
2134 2135
2135 2136 http://server/(.*)-hg/ = http://hg.server/\1/
2136 2137
2137 2138 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2138 2139
2139 2140 Relative subrepository paths are first made absolute, and the
2140 2141 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2141 2142 doesn't match the full path, an attempt is made to apply it on the
2142 2143 relative path alone. The rules are applied in definition order.
2143 2144
2144 2145 ``subrepos``
2145 2146 ------------
2146 2147
2147 2148 This section contains options that control the behavior of the
2148 2149 subrepositories feature. See also :hg:`help subrepos`.
2149 2150
2150 2151 Security note: auditing in Mercurial is known to be insufficient to
2151 2152 prevent clone-time code execution with carefully constructed Git
2152 2153 subrepos. It is unknown if a similar detect is present in Subversion
2153 2154 subrepos. Both Git and Subversion subrepos are disabled by default
2154 2155 out of security concerns. These subrepo types can be enabled using
2155 2156 the respective options below.
2156 2157
2157 2158 ``allowed``
2158 2159 Whether subrepositories are allowed in the working directory.
2159 2160
2160 2161 When false, commands involving subrepositories (like :hg:`update`)
2161 2162 will fail for all subrepository types.
2162 2163 (default: true)
2163 2164
2164 2165 ``hg:allowed``
2165 2166 Whether Mercurial subrepositories are allowed in the working
2166 2167 directory. This option only has an effect if ``subrepos.allowed``
2167 2168 is true.
2168 2169 (default: true)
2169 2170
2170 2171 ``git:allowed``
2171 2172 Whether Git subrepositories are allowed in the working directory.
2172 2173 This option only has an effect if ``subrepos.allowed`` is true.
2173 2174
2174 2175 See the security note above before enabling Git subrepos.
2175 2176 (default: false)
2176 2177
2177 2178 ``svn:allowed``
2178 2179 Whether Subversion subrepositories are allowed in the working
2179 2180 directory. This option only has an effect if ``subrepos.allowed``
2180 2181 is true.
2181 2182
2182 2183 See the security note above before enabling Subversion subrepos.
2183 2184 (default: false)
2184 2185
2185 2186 ``templatealias``
2186 2187 -----------------
2187 2188
2188 2189 Alias definitions for templates. See :hg:`help templates` for details.
2189 2190
2190 2191 ``templates``
2191 2192 -------------
2192 2193
2193 2194 Use the ``[templates]`` section to define template strings.
2194 2195 See :hg:`help templates` for details.
2195 2196
2196 2197 ``trusted``
2197 2198 -----------
2198 2199
2199 2200 Mercurial will not use the settings in the
2200 2201 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2201 2202 user or to a trusted group, as various hgrc features allow arbitrary
2202 2203 commands to be run. This issue is often encountered when configuring
2203 2204 hooks or extensions for shared repositories or servers. However,
2204 2205 the web interface will use some safe settings from the ``[web]``
2205 2206 section.
2206 2207
2207 2208 This section specifies what users and groups are trusted. The
2208 2209 current user is always trusted. To trust everybody, list a user or a
2209 2210 group with name ``*``. These settings must be placed in an
2210 2211 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2211 2212 user or service running Mercurial.
2212 2213
2213 2214 ``users``
2214 2215 Comma-separated list of trusted users.
2215 2216
2216 2217 ``groups``
2217 2218 Comma-separated list of trusted groups.
2218 2219
2219 2220
2220 2221 ``ui``
2221 2222 ------
2222 2223
2223 2224 User interface controls.
2224 2225
2225 2226 ``archivemeta``
2226 2227 Whether to include the .hg_archival.txt file containing meta data
2227 2228 (hashes for the repository base and for tip) in archives created
2228 2229 by the :hg:`archive` command or downloaded via hgweb.
2229 2230 (default: True)
2230 2231
2231 2232 ``askusername``
2232 2233 Whether to prompt for a username when committing. If True, and
2233 2234 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2234 2235 be prompted to enter a username. If no username is entered, the
2235 2236 default ``USER@HOST`` is used instead.
2236 2237 (default: False)
2237 2238
2238 2239 ``clonebundles``
2239 2240 Whether the "clone bundles" feature is enabled.
2240 2241
2241 2242 When enabled, :hg:`clone` may download and apply a server-advertised
2242 2243 bundle file from a URL instead of using the normal exchange mechanism.
2243 2244
2244 2245 This can likely result in faster and more reliable clones.
2245 2246
2246 2247 (default: True)
2247 2248
2248 2249 ``clonebundlefallback``
2249 2250 Whether failure to apply an advertised "clone bundle" from a server
2250 2251 should result in fallback to a regular clone.
2251 2252
2252 2253 This is disabled by default because servers advertising "clone
2253 2254 bundles" often do so to reduce server load. If advertised bundles
2254 2255 start mass failing and clients automatically fall back to a regular
2255 2256 clone, this would add significant and unexpected load to the server
2256 2257 since the server is expecting clone operations to be offloaded to
2257 2258 pre-generated bundles. Failing fast (the default behavior) ensures
2258 2259 clients don't overwhelm the server when "clone bundle" application
2259 2260 fails.
2260 2261
2261 2262 (default: False)
2262 2263
2263 2264 ``clonebundleprefers``
2264 2265 Defines preferences for which "clone bundles" to use.
2265 2266
2266 2267 Servers advertising "clone bundles" may advertise multiple available
2267 2268 bundles. Each bundle may have different attributes, such as the bundle
2268 2269 type and compression format. This option is used to prefer a particular
2269 2270 bundle over another.
2270 2271
2271 2272 The following keys are defined by Mercurial:
2272 2273
2273 2274 BUNDLESPEC
2274 2275 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2275 2276 e.g. ``gzip-v2`` or ``bzip2-v1``.
2276 2277
2277 2278 COMPRESSION
2278 2279 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2279 2280
2280 2281 Server operators may define custom keys.
2281 2282
2282 2283 Example values: ``COMPRESSION=bzip2``,
2283 2284 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2284 2285
2285 2286 By default, the first bundle advertised by the server is used.
2286 2287
2287 2288 ``color``
2288 2289 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2289 2290 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2290 2291 seems possible. See :hg:`help color` for details.
2291 2292
2292 2293 ``commitsubrepos``
2293 2294 Whether to commit modified subrepositories when committing the
2294 2295 parent repository. If False and one subrepository has uncommitted
2295 2296 changes, abort the commit.
2296 2297 (default: False)
2297 2298
2298 2299 ``debug``
2299 2300 Print debugging information. (default: False)
2300 2301
2301 2302 ``editor``
2302 2303 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2303 2304
2304 2305 ``fallbackencoding``
2305 2306 Encoding to try if it's not possible to decode the changelog using
2306 2307 UTF-8. (default: ISO-8859-1)
2307 2308
2308 2309 ``graphnodetemplate``
2309 2310 The template used to print changeset nodes in an ASCII revision graph.
2310 2311 (default: ``{graphnode}``)
2311 2312
2312 2313 ``ignore``
2313 2314 A file to read per-user ignore patterns from. This file should be
2314 2315 in the same format as a repository-wide .hgignore file. Filenames
2315 2316 are relative to the repository root. This option supports hook syntax,
2316 2317 so if you want to specify multiple ignore files, you can do so by
2317 2318 setting something like ``ignore.other = ~/.hgignore2``. For details
2318 2319 of the ignore file format, see the ``hgignore(5)`` man page.
2319 2320
2320 2321 ``interactive``
2321 2322 Allow to prompt the user. (default: True)
2322 2323
2323 2324 ``interface``
2324 2325 Select the default interface for interactive features (default: text).
2325 2326 Possible values are 'text' and 'curses'.
2326 2327
2327 2328 ``interface.chunkselector``
2328 2329 Select the interface for change recording (e.g. :hg:`commit -i`).
2329 2330 Possible values are 'text' and 'curses'.
2330 2331 This config overrides the interface specified by ui.interface.
2331 2332
2332 2333 ``large-file-limit``
2333 2334 Largest file size that gives no memory use warning.
2334 2335 Possible values are integers or 0 to disable the check.
2335 2336 (default: 10000000)
2336 2337
2337 2338 ``logtemplate``
2338 2339 Template string for commands that print changesets.
2339 2340
2340 2341 ``merge``
2341 2342 The conflict resolution program to use during a manual merge.
2342 2343 For more information on merge tools see :hg:`help merge-tools`.
2343 2344 For configuring merge tools see the ``[merge-tools]`` section.
2344 2345
2345 2346 ``mergemarkers``
2346 2347 Sets the merge conflict marker label styling. The ``detailed``
2347 2348 style uses the ``mergemarkertemplate`` setting to style the labels.
2348 2349 The ``basic`` style just uses 'local' and 'other' as the marker label.
2349 2350 One of ``basic`` or ``detailed``.
2350 2351 (default: ``basic``)
2351 2352
2352 2353 ``mergemarkertemplate``
2353 2354 The template used to print the commit description next to each conflict
2354 2355 marker during merge conflicts. See :hg:`help templates` for the template
2355 2356 format.
2356 2357
2357 2358 Defaults to showing the hash, tags, branches, bookmarks, author, and
2358 2359 the first line of the commit description.
2359 2360
2360 2361 If you use non-ASCII characters in names for tags, branches, bookmarks,
2361 2362 authors, and/or commit descriptions, you must pay attention to encodings of
2362 2363 managed files. At template expansion, non-ASCII characters use the encoding
2363 2364 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2364 2365 environment variables that govern your locale. If the encoding of the merge
2365 2366 markers is different from the encoding of the merged files,
2366 2367 serious problems may occur.
2367 2368
2368 2369 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2369 2370
2370 2371 ``message-output``
2371 2372 Where to write status and error messages. (default: ``stdio``)
2372 2373
2373 2374 ``stderr``
2374 2375 Everything to stderr.
2375 2376 ``stdio``
2376 2377 Status to stdout, and error to stderr.
2377 2378
2378 2379 ``origbackuppath``
2379 2380 The path to a directory used to store generated .orig files. If the path is
2380 2381 not a directory, one will be created. If set, files stored in this
2381 2382 directory have the same name as the original file and do not have a .orig
2382 2383 suffix.
2383 2384
2384 2385 ``paginate``
2385 2386 Control the pagination of command output (default: True). See :hg:`help pager`
2386 2387 for details.
2387 2388
2388 2389 ``patch``
2389 2390 An optional external tool that ``hg import`` and some extensions
2390 2391 will use for applying patches. By default Mercurial uses an
2391 2392 internal patch utility. The external tool must work as the common
2392 2393 Unix ``patch`` program. In particular, it must accept a ``-p``
2393 2394 argument to strip patch headers, a ``-d`` argument to specify the
2394 2395 current directory, a file name to patch, and a patch file to take
2395 2396 from stdin.
2396 2397
2397 2398 It is possible to specify a patch tool together with extra
2398 2399 arguments. For example, setting this option to ``patch --merge``
2399 2400 will use the ``patch`` program with its 2-way merge option.
2400 2401
2401 2402 ``portablefilenames``
2402 2403 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2403 2404 (default: ``warn``)
2404 2405
2405 2406 ``warn``
2406 2407 Print a warning message on POSIX platforms, if a file with a non-portable
2407 2408 filename is added (e.g. a file with a name that can't be created on
2408 2409 Windows because it contains reserved parts like ``AUX``, reserved
2409 2410 characters like ``:``, or would cause a case collision with an existing
2410 2411 file).
2411 2412
2412 2413 ``ignore``
2413 2414 Don't print a warning.
2414 2415
2415 2416 ``abort``
2416 2417 The command is aborted.
2417 2418
2418 2419 ``true``
2419 2420 Alias for ``warn``.
2420 2421
2421 2422 ``false``
2422 2423 Alias for ``ignore``.
2423 2424
2424 2425 .. container:: windows
2425 2426
2426 2427 On Windows, this configuration option is ignored and the command aborted.
2427 2428
2428 2429 ``pre-merge-tool-output-template``
2429 2430 A template that is printed before executing an external merge tool. This can
2430 2431 be used to print out additional context that might be useful to have during
2431 2432 the conflict resolution, such as the description of the various commits
2432 2433 involved or bookmarks/tags.
2433 2434
2434 2435 Additional information is available in the ``local`, ``base``, and ``other``
2435 2436 dicts. For example: ``{local.label}``, ``{base.name}``, or
2436 2437 ``{other.islink}``.
2437 2438
2438 2439 ``quiet``
2439 2440 Reduce the amount of output printed.
2440 2441 (default: False)
2441 2442
2442 2443 ``relative-paths``
2443 2444 Prefer relative paths in the UI.
2444 2445
2445 2446 ``remotecmd``
2446 2447 Remote command to use for clone/push/pull operations.
2447 2448 (default: ``hg``)
2448 2449
2449 2450 ``report_untrusted``
2450 2451 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2451 2452 trusted user or group.
2452 2453 (default: True)
2453 2454
2454 2455 ``slash``
2455 2456 (Deprecated. Use ``slashpath`` template filter instead.)
2456 2457
2457 2458 Display paths using a slash (``/``) as the path separator. This
2458 2459 only makes a difference on systems where the default path
2459 2460 separator is not the slash character (e.g. Windows uses the
2460 2461 backslash character (``\``)).
2461 2462 (default: False)
2462 2463
2463 2464 ``statuscopies``
2464 2465 Display copies in the status command.
2465 2466
2466 2467 ``ssh``
2467 2468 Command to use for SSH connections. (default: ``ssh``)
2468 2469
2469 2470 ``ssherrorhint``
2470 2471 A hint shown to the user in the case of SSH error (e.g.
2471 2472 ``Please see http://company/internalwiki/ssh.html``)
2472 2473
2473 2474 ``strict``
2474 2475 Require exact command names, instead of allowing unambiguous
2475 2476 abbreviations. (default: False)
2476 2477
2477 2478 ``style``
2478 2479 Name of style to use for command output.
2479 2480
2480 2481 ``supportcontact``
2481 2482 A URL where users should report a Mercurial traceback. Use this if you are a
2482 2483 large organisation with its own Mercurial deployment process and crash
2483 2484 reports should be addressed to your internal support.
2484 2485
2485 2486 ``textwidth``
2486 2487 Maximum width of help text. A longer line generated by ``hg help`` or
2487 2488 ``hg subcommand --help`` will be broken after white space to get this
2488 2489 width or the terminal width, whichever comes first.
2489 2490 A non-positive value will disable this and the terminal width will be
2490 2491 used. (default: 78)
2491 2492
2492 2493 ``timeout``
2493 2494 The timeout used when a lock is held (in seconds), a negative value
2494 2495 means no timeout. (default: 600)
2495 2496
2496 2497 ``timeout.warn``
2497 2498 Time (in seconds) before a warning is printed about held lock. A negative
2498 2499 value means no warning. (default: 0)
2499 2500
2500 2501 ``traceback``
2501 2502 Mercurial always prints a traceback when an unknown exception
2502 2503 occurs. Setting this to True will make Mercurial print a traceback
2503 2504 on all exceptions, even those recognized by Mercurial (such as
2504 2505 IOError or MemoryError). (default: False)
2505 2506
2506 2507 ``tweakdefaults``
2507 2508
2508 2509 By default Mercurial's behavior changes very little from release
2509 2510 to release, but over time the recommended config settings
2510 2511 shift. Enable this config to opt in to get automatic tweaks to
2511 2512 Mercurial's behavior over time. This config setting will have no
2512 2513 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2513 2514 not include ``tweakdefaults``. (default: False)
2514 2515
2515 2516 It currently means::
2516 2517
2517 2518 .. tweakdefaultsmarker
2518 2519
2519 2520 ``username``
2520 2521 The committer of a changeset created when running "commit".
2521 2522 Typically a person's name and email address, e.g. ``Fred Widget
2522 2523 <fred@example.com>``. Environment variables in the
2523 2524 username are expanded.
2524 2525
2525 2526 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2526 2527 hgrc is empty, e.g. if the system admin set ``username =`` in the
2527 2528 system hgrc, it has to be specified manually or in a different
2528 2529 hgrc file)
2529 2530
2530 2531 ``verbose``
2531 2532 Increase the amount of output printed. (default: False)
2532 2533
2533 2534
2534 2535 ``web``
2535 2536 -------
2536 2537
2537 2538 Web interface configuration. The settings in this section apply to
2538 2539 both the builtin webserver (started by :hg:`serve`) and the script you
2539 2540 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2540 2541 and WSGI).
2541 2542
2542 2543 The Mercurial webserver does no authentication (it does not prompt for
2543 2544 usernames and passwords to validate *who* users are), but it does do
2544 2545 authorization (it grants or denies access for *authenticated users*
2545 2546 based on settings in this section). You must either configure your
2546 2547 webserver to do authentication for you, or disable the authorization
2547 2548 checks.
2548 2549
2549 2550 For a quick setup in a trusted environment, e.g., a private LAN, where
2550 2551 you want it to accept pushes from anybody, you can use the following
2551 2552 command line::
2552 2553
2553 2554 $ hg --config web.allow-push=* --config web.push_ssl=False serve
2554 2555
2555 2556 Note that this will allow anybody to push anything to the server and
2556 2557 that this should not be used for public servers.
2557 2558
2558 2559 The full set of options is:
2559 2560
2560 2561 ``accesslog``
2561 2562 Where to output the access log. (default: stdout)
2562 2563
2563 2564 ``address``
2564 2565 Interface address to bind to. (default: all)
2565 2566
2566 2567 ``allow-archive``
2567 2568 List of archive format (bz2, gz, zip) allowed for downloading.
2568 2569 (default: empty)
2569 2570
2570 2571 ``allowbz2``
2571 2572 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
2572 2573 revisions.
2573 2574 (default: False)
2574 2575
2575 2576 ``allowgz``
2576 2577 (DEPRECATED) Whether to allow .tar.gz downloading of repository
2577 2578 revisions.
2578 2579 (default: False)
2579 2580
2580 2581 ``allow-pull``
2581 2582 Whether to allow pulling from the repository. (default: True)
2582 2583
2583 2584 ``allow-push``
2584 2585 Whether to allow pushing to the repository. If empty or not set,
2585 2586 pushing is not allowed. If the special value ``*``, any remote
2586 2587 user can push, including unauthenticated users. Otherwise, the
2587 2588 remote user must have been authenticated, and the authenticated
2588 2589 user name must be present in this list. The contents of the
2589 2590 allow-push list are examined after the deny_push list.
2590 2591
2591 2592 ``allow_read``
2592 2593 If the user has not already been denied repository access due to
2593 2594 the contents of deny_read, this list determines whether to grant
2594 2595 repository access to the user. If this list is not empty, and the
2595 2596 user is unauthenticated or not present in the list, then access is
2596 2597 denied for the user. If the list is empty or not set, then access
2597 2598 is permitted to all users by default. Setting allow_read to the
2598 2599 special value ``*`` is equivalent to it not being set (i.e. access
2599 2600 is permitted to all users). The contents of the allow_read list are
2600 2601 examined after the deny_read list.
2601 2602
2602 2603 ``allowzip``
2603 2604 (DEPRECATED) Whether to allow .zip downloading of repository
2604 2605 revisions. This feature creates temporary files.
2605 2606 (default: False)
2606 2607
2607 2608 ``archivesubrepos``
2608 2609 Whether to recurse into subrepositories when archiving.
2609 2610 (default: False)
2610 2611
2611 2612 ``baseurl``
2612 2613 Base URL to use when publishing URLs in other locations, so
2613 2614 third-party tools like email notification hooks can construct
2614 2615 URLs. Example: ``http://hgserver/repos/``.
2615 2616
2616 2617 ``cacerts``
2617 2618 Path to file containing a list of PEM encoded certificate
2618 2619 authority certificates. Environment variables and ``~user``
2619 2620 constructs are expanded in the filename. If specified on the
2620 2621 client, then it will verify the identity of remote HTTPS servers
2621 2622 with these certificates.
2622 2623
2623 2624 To disable SSL verification temporarily, specify ``--insecure`` from
2624 2625 command line.
2625 2626
2626 2627 You can use OpenSSL's CA certificate file if your platform has
2627 2628 one. On most Linux systems this will be
2628 2629 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
2629 2630 generate this file manually. The form must be as follows::
2630 2631
2631 2632 -----BEGIN CERTIFICATE-----
2632 2633 ... (certificate in base64 PEM encoding) ...
2633 2634 -----END CERTIFICATE-----
2634 2635 -----BEGIN CERTIFICATE-----
2635 2636 ... (certificate in base64 PEM encoding) ...
2636 2637 -----END CERTIFICATE-----
2637 2638
2638 2639 ``cache``
2639 2640 Whether to support caching in hgweb. (default: True)
2640 2641
2641 2642 ``certificate``
2642 2643 Certificate to use when running :hg:`serve`.
2643 2644
2644 2645 ``collapse``
2645 2646 With ``descend`` enabled, repositories in subdirectories are shown at
2646 2647 a single level alongside repositories in the current path. With
2647 2648 ``collapse`` also enabled, repositories residing at a deeper level than
2648 2649 the current path are grouped behind navigable directory entries that
2649 2650 lead to the locations of these repositories. In effect, this setting
2650 2651 collapses each collection of repositories found within a subdirectory
2651 2652 into a single entry for that subdirectory. (default: False)
2652 2653
2653 2654 ``comparisoncontext``
2654 2655 Number of lines of context to show in side-by-side file comparison. If
2655 2656 negative or the value ``full``, whole files are shown. (default: 5)
2656 2657
2657 2658 This setting can be overridden by a ``context`` request parameter to the
2658 2659 ``comparison`` command, taking the same values.
2659 2660
2660 2661 ``contact``
2661 2662 Name or email address of the person in charge of the repository.
2662 2663 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
2663 2664
2664 2665 ``csp``
2665 2666 Send a ``Content-Security-Policy`` HTTP header with this value.
2666 2667
2667 2668 The value may contain a special string ``%nonce%``, which will be replaced
2668 2669 by a randomly-generated one-time use value. If the value contains
2669 2670 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
2670 2671 one-time property of the nonce. This nonce will also be inserted into
2671 2672 ``<script>`` elements containing inline JavaScript.
2672 2673
2673 2674 Note: lots of HTML content sent by the server is derived from repository
2674 2675 data. Please consider the potential for malicious repository data to
2675 2676 "inject" itself into generated HTML content as part of your security
2676 2677 threat model.
2677 2678
2678 2679 ``deny_push``
2679 2680 Whether to deny pushing to the repository. If empty or not set,
2680 2681 push is not denied. If the special value ``*``, all remote users are
2681 2682 denied push. Otherwise, unauthenticated users are all denied, and
2682 2683 any authenticated user name present in this list is also denied. The
2683 2684 contents of the deny_push list are examined before the allow-push list.
2684 2685
2685 2686 ``deny_read``
2686 2687 Whether to deny reading/viewing of the repository. If this list is
2687 2688 not empty, unauthenticated users are all denied, and any
2688 2689 authenticated user name present in this list is also denied access to
2689 2690 the repository. If set to the special value ``*``, all remote users
2690 2691 are denied access (rarely needed ;). If deny_read is empty or not set,
2691 2692 the determination of repository access depends on the presence and
2692 2693 content of the allow_read list (see description). If both
2693 2694 deny_read and allow_read are empty or not set, then access is
2694 2695 permitted to all users by default. If the repository is being
2695 2696 served via hgwebdir, denied users will not be able to see it in
2696 2697 the list of repositories. The contents of the deny_read list have
2697 2698 priority over (are examined before) the contents of the allow_read
2698 2699 list.
2699 2700
2700 2701 ``descend``
2701 2702 hgwebdir indexes will not descend into subdirectories. Only repositories
2702 2703 directly in the current path will be shown (other repositories are still
2703 2704 available from the index corresponding to their containing path).
2704 2705
2705 2706 ``description``
2706 2707 Textual description of the repository's purpose or contents.
2707 2708 (default: "unknown")
2708 2709
2709 2710 ``encoding``
2710 2711 Character encoding name. (default: the current locale charset)
2711 2712 Example: "UTF-8".
2712 2713
2713 2714 ``errorlog``
2714 2715 Where to output the error log. (default: stderr)
2715 2716
2716 2717 ``guessmime``
2717 2718 Control MIME types for raw download of file content.
2718 2719 Set to True to let hgweb guess the content type from the file
2719 2720 extension. This will serve HTML files as ``text/html`` and might
2720 2721 allow cross-site scripting attacks when serving untrusted
2721 2722 repositories. (default: False)
2722 2723
2723 2724 ``hidden``
2724 2725 Whether to hide the repository in the hgwebdir index.
2725 2726 (default: False)
2726 2727
2727 2728 ``ipv6``
2728 2729 Whether to use IPv6. (default: False)
2729 2730
2730 2731 ``labels``
2731 2732 List of string *labels* associated with the repository.
2732 2733
2733 2734 Labels are exposed as a template keyword and can be used to customize
2734 2735 output. e.g. the ``index`` template can group or filter repositories
2735 2736 by labels and the ``summary`` template can display additional content
2736 2737 if a specific label is present.
2737 2738
2738 2739 ``logoimg``
2739 2740 File name of the logo image that some templates display on each page.
2740 2741 The file name is relative to ``staticurl``. That is, the full path to
2741 2742 the logo image is "staticurl/logoimg".
2742 2743 If unset, ``hglogo.png`` will be used.
2743 2744
2744 2745 ``logourl``
2745 2746 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
2746 2747 will be used.
2747 2748
2748 2749 ``maxchanges``
2749 2750 Maximum number of changes to list on the changelog. (default: 10)
2750 2751
2751 2752 ``maxfiles``
2752 2753 Maximum number of files to list per changeset. (default: 10)
2753 2754
2754 2755 ``maxshortchanges``
2755 2756 Maximum number of changes to list on the shortlog, graph or filelog
2756 2757 pages. (default: 60)
2757 2758
2758 2759 ``name``
2759 2760 Repository name to use in the web interface.
2760 2761 (default: current working directory)
2761 2762
2762 2763 ``port``
2763 2764 Port to listen on. (default: 8000)
2764 2765
2765 2766 ``prefix``
2766 2767 Prefix path to serve from. (default: '' (server root))
2767 2768
2768 2769 ``push_ssl``
2769 2770 Whether to require that inbound pushes be transported over SSL to
2770 2771 prevent password sniffing. (default: True)
2771 2772
2772 2773 ``refreshinterval``
2773 2774 How frequently directory listings re-scan the filesystem for new
2774 2775 repositories, in seconds. This is relevant when wildcards are used
2775 2776 to define paths. Depending on how much filesystem traversal is
2776 2777 required, refreshing may negatively impact performance.
2777 2778
2778 2779 Values less than or equal to 0 always refresh.
2779 2780 (default: 20)
2780 2781
2781 2782 ``server-header``
2782 2783 Value for HTTP ``Server`` response header.
2783 2784
2784 2785 ``static``
2785 2786 Directory where static files are served from.
2786 2787
2787 2788 ``staticurl``
2788 2789 Base URL to use for static files. If unset, static files (e.g. the
2789 2790 hgicon.png favicon) will be served by the CGI script itself. Use
2790 2791 this setting to serve them directly with the HTTP server.
2791 2792 Example: ``http://hgserver/static/``.
2792 2793
2793 2794 ``stripes``
2794 2795 How many lines a "zebra stripe" should span in multi-line output.
2795 2796 Set to 0 to disable. (default: 1)
2796 2797
2797 2798 ``style``
2798 2799 Which template map style to use. The available options are the names of
2799 2800 subdirectories in the HTML templates path. (default: ``paper``)
2800 2801 Example: ``monoblue``.
2801 2802
2802 2803 ``templates``
2803 2804 Where to find the HTML templates. The default path to the HTML templates
2804 2805 can be obtained from ``hg debuginstall``.
2805 2806
2806 2807 ``websub``
2807 2808 ----------
2808 2809
2809 2810 Web substitution filter definition. You can use this section to
2810 2811 define a set of regular expression substitution patterns which
2811 2812 let you automatically modify the hgweb server output.
2812 2813
2813 2814 The default hgweb templates only apply these substitution patterns
2814 2815 on the revision description fields. You can apply them anywhere
2815 2816 you want when you create your own templates by adding calls to the
2816 2817 "websub" filter (usually after calling the "escape" filter).
2817 2818
2818 2819 This can be used, for example, to convert issue references to links
2819 2820 to your issue tracker, or to convert "markdown-like" syntax into
2820 2821 HTML (see the examples below).
2821 2822
2822 2823 Each entry in this section names a substitution filter.
2823 2824 The value of each entry defines the substitution expression itself.
2824 2825 The websub expressions follow the old interhg extension syntax,
2825 2826 which in turn imitates the Unix sed replacement syntax::
2826 2827
2827 2828 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
2828 2829
2829 2830 You can use any separator other than "/". The final "i" is optional
2830 2831 and indicates that the search must be case insensitive.
2831 2832
2832 2833 Examples::
2833 2834
2834 2835 [websub]
2835 2836 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
2836 2837 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
2837 2838 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
2838 2839
2839 2840 ``worker``
2840 2841 ----------
2841 2842
2842 2843 Parallel master/worker configuration. We currently perform working
2843 2844 directory updates in parallel on Unix-like systems, which greatly
2844 2845 helps performance.
2845 2846
2846 2847 ``enabled``
2847 2848 Whether to enable workers code to be used.
2848 2849 (default: true)
2849 2850
2850 2851 ``numcpus``
2851 2852 Number of CPUs to use for parallel operations. A zero or
2852 2853 negative value is treated as ``use the default``.
2853 2854 (default: 4 or the number of CPUs on the system, whichever is larger)
2854 2855
2855 2856 ``backgroundclose``
2856 2857 Whether to enable closing file handles on background threads during certain
2857 2858 operations. Some platforms aren't very efficient at closing file
2858 2859 handles that have been written or appended to. By performing file closing
2859 2860 on background threads, file write rate can increase substantially.
2860 2861 (default: true on Windows, false elsewhere)
2861 2862
2862 2863 ``backgroundcloseminfilecount``
2863 2864 Minimum number of files required to trigger background file closing.
2864 2865 Operations not writing this many files won't start background close
2865 2866 threads.
2866 2867 (default: 2048)
2867 2868
2868 2869 ``backgroundclosemaxqueue``
2869 2870 The maximum number of opened file handles waiting to be closed in the
2870 2871 background. This option only has an effect if ``backgroundclose`` is
2871 2872 enabled.
2872 2873 (default: 384)
2873 2874
2874 2875 ``backgroundclosethreadcount``
2875 2876 Number of threads to process background file closes. Only relevant if
2876 2877 ``backgroundclose`` is enabled.
2877 2878 (default: 4)
@@ -1,3793 +1,3796 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 context,
36 36 dirstate,
37 37 dirstateguard,
38 38 discovery,
39 39 encoding,
40 40 error,
41 41 exchange,
42 42 extensions,
43 43 filelog,
44 44 hook,
45 45 lock as lockmod,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 rcutil,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store as storemod,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 from .interfaces import (
72 72 repository,
73 73 util as interfaceutil,
74 74 )
75 75
76 76 from .utils import (
77 77 hashutil,
78 78 procutil,
79 79 stringutil,
80 80 )
81 81
82 82 from .revlogutils import constants as revlogconst
83 83
84 84 release = lockmod.release
85 85 urlerr = util.urlerr
86 86 urlreq = util.urlreq
87 87
88 88 # set of (path, vfs-location) tuples. vfs-location is:
89 89 # - 'plain for vfs relative paths
90 90 # - '' for svfs relative paths
91 91 _cachedfiles = set()
92 92
93 93
94 94 class _basefilecache(scmutil.filecache):
95 95 """All filecache usage on repo are done for logic that should be unfiltered
96 96 """
97 97
98 98 def __get__(self, repo, type=None):
99 99 if repo is None:
100 100 return self
101 101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 102 unfi = repo.unfiltered()
103 103 try:
104 104 return unfi.__dict__[self.sname]
105 105 except KeyError:
106 106 pass
107 107 return super(_basefilecache, self).__get__(unfi, type)
108 108
109 109 def set(self, repo, value):
110 110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 111
112 112
113 113 class repofilecache(_basefilecache):
114 114 """filecache for files in .hg but outside of .hg/store"""
115 115
116 116 def __init__(self, *paths):
117 117 super(repofilecache, self).__init__(*paths)
118 118 for path in paths:
119 119 _cachedfiles.add((path, b'plain'))
120 120
121 121 def join(self, obj, fname):
122 122 return obj.vfs.join(fname)
123 123
124 124
125 125 class storecache(_basefilecache):
126 126 """filecache for files in the store"""
127 127
128 128 def __init__(self, *paths):
129 129 super(storecache, self).__init__(*paths)
130 130 for path in paths:
131 131 _cachedfiles.add((path, b''))
132 132
133 133 def join(self, obj, fname):
134 134 return obj.sjoin(fname)
135 135
136 136
137 137 class mixedrepostorecache(_basefilecache):
138 138 """filecache for a mix files in .hg/store and outside"""
139 139
140 140 def __init__(self, *pathsandlocations):
141 141 # scmutil.filecache only uses the path for passing back into our
142 142 # join(), so we can safely pass a list of paths and locations
143 143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 144 _cachedfiles.update(pathsandlocations)
145 145
146 146 def join(self, obj, fnameandlocation):
147 147 fname, location = fnameandlocation
148 148 if location == b'plain':
149 149 return obj.vfs.join(fname)
150 150 else:
151 151 if location != b'':
152 152 raise error.ProgrammingError(
153 153 b'unexpected location: %s' % location
154 154 )
155 155 return obj.sjoin(fname)
156 156
157 157
158 158 def isfilecached(repo, name):
159 159 """check if a repo has already cached "name" filecache-ed property
160 160
161 161 This returns (cachedobj-or-None, iscached) tuple.
162 162 """
163 163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 164 if not cacheentry:
165 165 return None, False
166 166 return cacheentry.obj, True
167 167
168 168
169 169 class unfilteredpropertycache(util.propertycache):
170 170 """propertycache that apply to unfiltered repo only"""
171 171
172 172 def __get__(self, repo, type=None):
173 173 unfi = repo.unfiltered()
174 174 if unfi is repo:
175 175 return super(unfilteredpropertycache, self).__get__(unfi)
176 176 return getattr(unfi, self.name)
177 177
178 178
179 179 class filteredpropertycache(util.propertycache):
180 180 """propertycache that must take filtering in account"""
181 181
182 182 def cachevalue(self, obj, value):
183 183 object.__setattr__(obj, self.name, value)
184 184
185 185
186 186 def hasunfilteredcache(repo, name):
187 187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 188 return name in vars(repo.unfiltered())
189 189
190 190
191 191 def unfilteredmethod(orig):
192 192 """decorate method that always need to be run on unfiltered version"""
193 193
194 194 def wrapper(repo, *args, **kwargs):
195 195 return orig(repo.unfiltered(), *args, **kwargs)
196 196
197 197 return wrapper
198 198
199 199
200 200 moderncaps = {
201 201 b'lookup',
202 202 b'branchmap',
203 203 b'pushkey',
204 204 b'known',
205 205 b'getbundle',
206 206 b'unbundle',
207 207 }
208 208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 209
210 210
211 211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 212 class localcommandexecutor(object):
213 213 def __init__(self, peer):
214 214 self._peer = peer
215 215 self._sent = False
216 216 self._closed = False
217 217
218 218 def __enter__(self):
219 219 return self
220 220
221 221 def __exit__(self, exctype, excvalue, exctb):
222 222 self.close()
223 223
224 224 def callcommand(self, command, args):
225 225 if self._sent:
226 226 raise error.ProgrammingError(
227 227 b'callcommand() cannot be used after sendcommands()'
228 228 )
229 229
230 230 if self._closed:
231 231 raise error.ProgrammingError(
232 232 b'callcommand() cannot be used after close()'
233 233 )
234 234
235 235 # We don't need to support anything fancy. Just call the named
236 236 # method on the peer and return a resolved future.
237 237 fn = getattr(self._peer, pycompat.sysstr(command))
238 238
239 239 f = pycompat.futures.Future()
240 240
241 241 try:
242 242 result = fn(**pycompat.strkwargs(args))
243 243 except Exception:
244 244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 245 else:
246 246 f.set_result(result)
247 247
248 248 return f
249 249
250 250 def sendcommands(self):
251 251 self._sent = True
252 252
253 253 def close(self):
254 254 self._closed = True
255 255
256 256
257 257 @interfaceutil.implementer(repository.ipeercommands)
258 258 class localpeer(repository.peer):
259 259 '''peer for a local repo; reflects only the most recent API'''
260 260
261 261 def __init__(self, repo, caps=None):
262 262 super(localpeer, self).__init__()
263 263
264 264 if caps is None:
265 265 caps = moderncaps.copy()
266 266 self._repo = repo.filtered(b'served')
267 267 self.ui = repo.ui
268 268 self._caps = repo._restrictcapabilities(caps)
269 269
270 270 # Begin of _basepeer interface.
271 271
272 272 def url(self):
273 273 return self._repo.url()
274 274
275 275 def local(self):
276 276 return self._repo
277 277
278 278 def peer(self):
279 279 return self
280 280
281 281 def canpush(self):
282 282 return True
283 283
284 284 def close(self):
285 285 self._repo.close()
286 286
287 287 # End of _basepeer interface.
288 288
289 289 # Begin of _basewirecommands interface.
290 290
291 291 def branchmap(self):
292 292 return self._repo.branchmap()
293 293
294 294 def capabilities(self):
295 295 return self._caps
296 296
297 297 def clonebundles(self):
298 298 return self._repo.tryread(b'clonebundles.manifest')
299 299
300 300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 301 """Used to test argument passing over the wire"""
302 302 return b"%s %s %s %s %s" % (
303 303 one,
304 304 two,
305 305 pycompat.bytestr(three),
306 306 pycompat.bytestr(four),
307 307 pycompat.bytestr(five),
308 308 )
309 309
310 310 def getbundle(
311 311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 312 ):
313 313 chunks = exchange.getbundlechunks(
314 314 self._repo,
315 315 source,
316 316 heads=heads,
317 317 common=common,
318 318 bundlecaps=bundlecaps,
319 319 **kwargs
320 320 )[1]
321 321 cb = util.chunkbuffer(chunks)
322 322
323 323 if exchange.bundle2requested(bundlecaps):
324 324 # When requesting a bundle2, getbundle returns a stream to make the
325 325 # wire level function happier. We need to build a proper object
326 326 # from it in local peer.
327 327 return bundle2.getunbundler(self.ui, cb)
328 328 else:
329 329 return changegroup.getunbundler(b'01', cb, None)
330 330
331 331 def heads(self):
332 332 return self._repo.heads()
333 333
334 334 def known(self, nodes):
335 335 return self._repo.known(nodes)
336 336
337 337 def listkeys(self, namespace):
338 338 return self._repo.listkeys(namespace)
339 339
340 340 def lookup(self, key):
341 341 return self._repo.lookup(key)
342 342
343 343 def pushkey(self, namespace, key, old, new):
344 344 return self._repo.pushkey(namespace, key, old, new)
345 345
346 346 def stream_out(self):
347 347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 348
349 349 def unbundle(self, bundle, heads, url):
350 350 """apply a bundle on a repo
351 351
352 352 This function handles the repo locking itself."""
353 353 try:
354 354 try:
355 355 bundle = exchange.readbundle(self.ui, bundle, None)
356 356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 357 if util.safehasattr(ret, b'getchunks'):
358 358 # This is a bundle20 object, turn it into an unbundler.
359 359 # This little dance should be dropped eventually when the
360 360 # API is finally improved.
361 361 stream = util.chunkbuffer(ret.getchunks())
362 362 ret = bundle2.getunbundler(self.ui, stream)
363 363 return ret
364 364 except Exception as exc:
365 365 # If the exception contains output salvaged from a bundle2
366 366 # reply, we need to make sure it is printed before continuing
367 367 # to fail. So we build a bundle2 with such output and consume
368 368 # it directly.
369 369 #
370 370 # This is not very elegant but allows a "simple" solution for
371 371 # issue4594
372 372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 373 if output:
374 374 bundler = bundle2.bundle20(self._repo.ui)
375 375 for out in output:
376 376 bundler.addpart(out)
377 377 stream = util.chunkbuffer(bundler.getchunks())
378 378 b = bundle2.getunbundler(self.ui, stream)
379 379 bundle2.processbundle(self._repo, b)
380 380 raise
381 381 except error.PushRaced as exc:
382 382 raise error.ResponseError(
383 383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 384 )
385 385
386 386 # End of _basewirecommands interface.
387 387
388 388 # Begin of peer interface.
389 389
390 390 def commandexecutor(self):
391 391 return localcommandexecutor(self)
392 392
393 393 # End of peer interface.
394 394
395 395
396 396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 397 class locallegacypeer(localpeer):
398 398 '''peer extension which implements legacy methods too; used for tests with
399 399 restricted capabilities'''
400 400
401 401 def __init__(self, repo):
402 402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 403
404 404 # Begin of baselegacywirecommands interface.
405 405
406 406 def between(self, pairs):
407 407 return self._repo.between(pairs)
408 408
409 409 def branches(self, nodes):
410 410 return self._repo.branches(nodes)
411 411
412 412 def changegroup(self, nodes, source):
413 413 outgoing = discovery.outgoing(
414 414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 415 )
416 416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 417
418 418 def changegroupsubset(self, bases, heads, source):
419 419 outgoing = discovery.outgoing(
420 420 self._repo, missingroots=bases, missingheads=heads
421 421 )
422 422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 423
424 424 # End of baselegacywirecommands interface.
425 425
426 426
427 427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 428 # clients.
429 429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 430
431 431 # A repository with the sparserevlog feature will have delta chains that
432 432 # can spread over a larger span. Sparse reading cuts these large spans into
433 433 # pieces, so that each piece isn't too big.
434 434 # Without the sparserevlog capability, reading from the repository could use
435 435 # huge amounts of memory, because the whole span would be read at once,
436 436 # including all the intermediate revisions that aren't pertinent for the chain.
437 437 # This is why once a repository has enabled sparse-read, it becomes required.
438 438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 439
440 440 # A repository with the sidedataflag requirement will allow to store extra
441 441 # information for revision without altering their original hashes.
442 442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 443
444 444 # A repository with the the copies-sidedata-changeset requirement will store
445 445 # copies related information in changeset's sidedata.
446 446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 447
448 448 # Functions receiving (ui, features) that extensions can register to impact
449 449 # the ability to load repositories with custom requirements. Only
450 450 # functions defined in loaded extensions are called.
451 451 #
452 452 # The function receives a set of requirement strings that the repository
453 453 # is capable of opening. Functions will typically add elements to the
454 454 # set to reflect that the extension knows how to handle that requirements.
455 455 featuresetupfuncs = set()
456 456
457 457
458 458 def makelocalrepository(baseui, path, intents=None):
459 459 """Create a local repository object.
460 460
461 461 Given arguments needed to construct a local repository, this function
462 462 performs various early repository loading functionality (such as
463 463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
464 464 the repository can be opened, derives a type suitable for representing
465 465 that repository, and returns an instance of it.
466 466
467 467 The returned object conforms to the ``repository.completelocalrepository``
468 468 interface.
469 469
470 470 The repository type is derived by calling a series of factory functions
471 471 for each aspect/interface of the final repository. These are defined by
472 472 ``REPO_INTERFACES``.
473 473
474 474 Each factory function is called to produce a type implementing a specific
475 475 interface. The cumulative list of returned types will be combined into a
476 476 new type and that type will be instantiated to represent the local
477 477 repository.
478 478
479 479 The factory functions each receive various state that may be consulted
480 480 as part of deriving a type.
481 481
482 482 Extensions should wrap these factory functions to customize repository type
483 483 creation. Note that an extension's wrapped function may be called even if
484 484 that extension is not loaded for the repo being constructed. Extensions
485 485 should check if their ``__name__`` appears in the
486 486 ``extensionmodulenames`` set passed to the factory function and no-op if
487 487 not.
488 488 """
489 489 ui = baseui.copy()
490 490 # Prevent copying repo configuration.
491 491 ui.copy = baseui.copy
492 492
493 493 # Working directory VFS rooted at repository root.
494 494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
495 495
496 496 # Main VFS for .hg/ directory.
497 497 hgpath = wdirvfs.join(b'.hg')
498 498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
499 499
500 500 # The .hg/ path should exist and should be a directory. All other
501 501 # cases are errors.
502 502 if not hgvfs.isdir():
503 503 try:
504 504 hgvfs.stat()
505 505 except OSError as e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508
509 509 raise error.RepoError(_(b'repository %s not found') % path)
510 510
511 511 # .hg/requires file contains a newline-delimited list of
512 512 # features/capabilities the opener (us) must have in order to use
513 513 # the repository. This file was introduced in Mercurial 0.9.2,
514 514 # which means very old repositories may not have one. We assume
515 515 # a missing file translates to no requirements.
516 516 try:
517 517 requirements = set(hgvfs.read(b'requires').splitlines())
518 518 except IOError as e:
519 519 if e.errno != errno.ENOENT:
520 520 raise
521 521 requirements = set()
522 522
523 523 # The .hg/hgrc file may load extensions or contain config options
524 524 # that influence repository construction. Attempt to load it and
525 525 # process any new extensions that it may have pulled in.
526 526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
527 527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
528 528 extensions.loadall(ui)
529 529 extensions.populateui(ui)
530 530
531 531 # Set of module names of extensions loaded for this repository.
532 532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
533 533
534 534 supportedrequirements = gathersupportedrequirements(ui)
535 535
536 536 # We first validate the requirements are known.
537 537 ensurerequirementsrecognized(requirements, supportedrequirements)
538 538
539 539 # Then we validate that the known set is reasonable to use together.
540 540 ensurerequirementscompatible(ui, requirements)
541 541
542 542 # TODO there are unhandled edge cases related to opening repositories with
543 543 # shared storage. If storage is shared, we should also test for requirements
544 544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
545 545 # that repo, as that repo may load extensions needed to open it. This is a
546 546 # bit complicated because we don't want the other hgrc to overwrite settings
547 547 # in this hgrc.
548 548 #
549 549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
550 550 # file when sharing repos. But if a requirement is added after the share is
551 551 # performed, thereby introducing a new requirement for the opener, we may
552 552 # will not see that and could encounter a run-time error interacting with
553 553 # that shared store since it has an unknown-to-us requirement.
554 554
555 555 # At this point, we know we should be capable of opening the repository.
556 556 # Now get on with doing that.
557 557
558 558 features = set()
559 559
560 560 # The "store" part of the repository holds versioned data. How it is
561 561 # accessed is determined by various requirements. The ``shared`` or
562 562 # ``relshared`` requirements indicate the store lives in the path contained
563 563 # in the ``.hg/sharedpath`` file. This is an absolute path for
564 564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
565 565 if b'shared' in requirements or b'relshared' in requirements:
566 566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
567 567 if b'relshared' in requirements:
568 568 sharedpath = hgvfs.join(sharedpath)
569 569
570 570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
571 571
572 572 if not sharedvfs.exists():
573 573 raise error.RepoError(
574 574 _(b'.hg/sharedpath points to nonexistent directory %s')
575 575 % sharedvfs.base
576 576 )
577 577
578 578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
579 579
580 580 storebasepath = sharedvfs.base
581 581 cachepath = sharedvfs.join(b'cache')
582 582 else:
583 583 storebasepath = hgvfs.base
584 584 cachepath = hgvfs.join(b'cache')
585 585 wcachepath = hgvfs.join(b'wcache')
586 586
587 587 # The store has changed over time and the exact layout is dictated by
588 588 # requirements. The store interface abstracts differences across all
589 589 # of them.
590 590 store = makestore(
591 591 requirements,
592 592 storebasepath,
593 593 lambda base: vfsmod.vfs(base, cacheaudited=True),
594 594 )
595 595 hgvfs.createmode = store.createmode
596 596
597 597 storevfs = store.vfs
598 598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
599 599
600 600 # The cache vfs is used to manage cache files.
601 601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
602 602 cachevfs.createmode = store.createmode
603 603 # The cache vfs is used to manage cache files related to the working copy
604 604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
605 605 wcachevfs.createmode = store.createmode
606 606
607 607 # Now resolve the type for the repository object. We do this by repeatedly
608 608 # calling a factory function to produces types for specific aspects of the
609 609 # repo's operation. The aggregate returned types are used as base classes
610 610 # for a dynamically-derived type, which will represent our new repository.
611 611
612 612 bases = []
613 613 extrastate = {}
614 614
615 615 for iface, fn in REPO_INTERFACES:
616 616 # We pass all potentially useful state to give extensions tons of
617 617 # flexibility.
618 618 typ = fn()(
619 619 ui=ui,
620 620 intents=intents,
621 621 requirements=requirements,
622 622 features=features,
623 623 wdirvfs=wdirvfs,
624 624 hgvfs=hgvfs,
625 625 store=store,
626 626 storevfs=storevfs,
627 627 storeoptions=storevfs.options,
628 628 cachevfs=cachevfs,
629 629 wcachevfs=wcachevfs,
630 630 extensionmodulenames=extensionmodulenames,
631 631 extrastate=extrastate,
632 632 baseclasses=bases,
633 633 )
634 634
635 635 if not isinstance(typ, type):
636 636 raise error.ProgrammingError(
637 637 b'unable to construct type for %s' % iface
638 638 )
639 639
640 640 bases.append(typ)
641 641
642 642 # type() allows you to use characters in type names that wouldn't be
643 643 # recognized as Python symbols in source code. We abuse that to add
644 644 # rich information about our constructed repo.
645 645 name = pycompat.sysstr(
646 646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
647 647 )
648 648
649 649 cls = type(name, tuple(bases), {})
650 650
651 651 return cls(
652 652 baseui=baseui,
653 653 ui=ui,
654 654 origroot=path,
655 655 wdirvfs=wdirvfs,
656 656 hgvfs=hgvfs,
657 657 requirements=requirements,
658 658 supportedrequirements=supportedrequirements,
659 659 sharedpath=storebasepath,
660 660 store=store,
661 661 cachevfs=cachevfs,
662 662 wcachevfs=wcachevfs,
663 663 features=features,
664 664 intents=intents,
665 665 )
666 666
667 667
668 668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
669 669 """Load hgrc files/content into a ui instance.
670 670
671 671 This is called during repository opening to load any additional
672 672 config files or settings relevant to the current repository.
673 673
674 674 Returns a bool indicating whether any additional configs were loaded.
675 675
676 676 Extensions should monkeypatch this function to modify how per-repo
677 677 configs are loaded. For example, an extension may wish to pull in
678 678 configs from alternate files or sources.
679 679 """
680 680 if not rcutil.use_repo_hgrc():
681 681 return False
682 682 try:
683 683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
684 684 return True
685 685 except IOError:
686 686 return False
687 687
688 688
689 689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
690 690 """Perform additional actions after .hg/hgrc is loaded.
691 691
692 692 This function is called during repository loading immediately after
693 693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
694 694
695 695 The function can be used to validate configs, automatically add
696 696 options (including extensions) based on requirements, etc.
697 697 """
698 698
699 699 # Map of requirements to list of extensions to load automatically when
700 700 # requirement is present.
701 701 autoextensions = {
702 702 b'largefiles': [b'largefiles'],
703 703 b'lfs': [b'lfs'],
704 704 }
705 705
706 706 for requirement, names in sorted(autoextensions.items()):
707 707 if requirement not in requirements:
708 708 continue
709 709
710 710 for name in names:
711 711 if not ui.hasconfig(b'extensions', name):
712 712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
713 713
714 714
715 715 def gathersupportedrequirements(ui):
716 716 """Determine the complete set of recognized requirements."""
717 717 # Start with all requirements supported by this file.
718 718 supported = set(localrepository._basesupported)
719 719
720 720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
721 721 # relevant to this ui instance.
722 722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
723 723
724 724 for fn in featuresetupfuncs:
725 725 if fn.__module__ in modules:
726 726 fn(ui, supported)
727 727
728 728 # Add derived requirements from registered compression engines.
729 729 for name in util.compengines:
730 730 engine = util.compengines[name]
731 731 if engine.available() and engine.revlogheader():
732 732 supported.add(b'exp-compression-%s' % name)
733 733 if engine.name() == b'zstd':
734 734 supported.add(b'revlog-compression-zstd')
735 735
736 736 return supported
737 737
738 738
739 739 def ensurerequirementsrecognized(requirements, supported):
740 740 """Validate that a set of local requirements is recognized.
741 741
742 742 Receives a set of requirements. Raises an ``error.RepoError`` if there
743 743 exists any requirement in that set that currently loaded code doesn't
744 744 recognize.
745 745
746 746 Returns a set of supported requirements.
747 747 """
748 748 missing = set()
749 749
750 750 for requirement in requirements:
751 751 if requirement in supported:
752 752 continue
753 753
754 754 if not requirement or not requirement[0:1].isalnum():
755 755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
756 756
757 757 missing.add(requirement)
758 758
759 759 if missing:
760 760 raise error.RequirementError(
761 761 _(b'repository requires features unknown to this Mercurial: %s')
762 762 % b' '.join(sorted(missing)),
763 763 hint=_(
764 764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
765 765 b'for more information'
766 766 ),
767 767 )
768 768
769 769
770 770 def ensurerequirementscompatible(ui, requirements):
771 771 """Validates that a set of recognized requirements is mutually compatible.
772 772
773 773 Some requirements may not be compatible with others or require
774 774 config options that aren't enabled. This function is called during
775 775 repository opening to ensure that the set of requirements needed
776 776 to open a repository is sane and compatible with config options.
777 777
778 778 Extensions can monkeypatch this function to perform additional
779 779 checking.
780 780
781 781 ``error.RepoError`` should be raised on failure.
782 782 """
783 783 if b'exp-sparse' in requirements and not sparse.enabled:
784 784 raise error.RepoError(
785 785 _(
786 786 b'repository is using sparse feature but '
787 787 b'sparse is not enabled; enable the '
788 788 b'"sparse" extensions to access'
789 789 )
790 790 )
791 791
792 792
793 793 def makestore(requirements, path, vfstype):
794 794 """Construct a storage object for a repository."""
795 795 if b'store' in requirements:
796 796 if b'fncache' in requirements:
797 797 return storemod.fncachestore(
798 798 path, vfstype, b'dotencode' in requirements
799 799 )
800 800
801 801 return storemod.encodedstore(path, vfstype)
802 802
803 803 return storemod.basicstore(path, vfstype)
804 804
805 805
806 806 def resolvestorevfsoptions(ui, requirements, features):
807 807 """Resolve the options to pass to the store vfs opener.
808 808
809 809 The returned dict is used to influence behavior of the storage layer.
810 810 """
811 811 options = {}
812 812
813 813 if b'treemanifest' in requirements:
814 814 options[b'treemanifest'] = True
815 815
816 816 # experimental config: format.manifestcachesize
817 817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
818 818 if manifestcachesize is not None:
819 819 options[b'manifestcachesize'] = manifestcachesize
820 820
821 821 # In the absence of another requirement superseding a revlog-related
822 822 # requirement, we have to assume the repo is using revlog version 0.
823 823 # This revlog format is super old and we don't bother trying to parse
824 824 # opener options for it because those options wouldn't do anything
825 825 # meaningful on such old repos.
826 826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
827 827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
828 828 else: # explicitly mark repo as using revlogv0
829 829 options[b'revlogv0'] = True
830 830
831 831 if COPIESSDC_REQUIREMENT in requirements:
832 832 options[b'copies-storage'] = b'changeset-sidedata'
833 833 else:
834 834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
835 835 copiesextramode = (b'changeset-only', b'compatibility')
836 836 if writecopiesto in copiesextramode:
837 837 options[b'copies-storage'] = b'extra'
838 838
839 839 return options
840 840
841 841
842 842 def resolverevlogstorevfsoptions(ui, requirements, features):
843 843 """Resolve opener options specific to revlogs."""
844 844
845 845 options = {}
846 846 options[b'flagprocessors'] = {}
847 847
848 848 if b'revlogv1' in requirements:
849 849 options[b'revlogv1'] = True
850 850 if REVLOGV2_REQUIREMENT in requirements:
851 851 options[b'revlogv2'] = True
852 852
853 853 if b'generaldelta' in requirements:
854 854 options[b'generaldelta'] = True
855 855
856 856 # experimental config: format.chunkcachesize
857 857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
858 858 if chunkcachesize is not None:
859 859 options[b'chunkcachesize'] = chunkcachesize
860 860
861 861 deltabothparents = ui.configbool(
862 862 b'storage', b'revlog.optimize-delta-parent-choice'
863 863 )
864 864 options[b'deltabothparents'] = deltabothparents
865 865
866 866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
867 867 lazydeltabase = False
868 868 if lazydelta:
869 869 lazydeltabase = ui.configbool(
870 870 b'storage', b'revlog.reuse-external-delta-parent'
871 871 )
872 872 if lazydeltabase is None:
873 873 lazydeltabase = not scmutil.gddeltaconfig(ui)
874 874 options[b'lazydelta'] = lazydelta
875 875 options[b'lazydeltabase'] = lazydeltabase
876 876
877 877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
878 878 if 0 <= chainspan:
879 879 options[b'maxdeltachainspan'] = chainspan
880 880
881 881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
882 882 if mmapindexthreshold is not None:
883 883 options[b'mmapindexthreshold'] = mmapindexthreshold
884 884
885 885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
886 886 srdensitythres = float(
887 887 ui.config(b'experimental', b'sparse-read.density-threshold')
888 888 )
889 889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
890 890 options[b'with-sparse-read'] = withsparseread
891 891 options[b'sparse-read-density-threshold'] = srdensitythres
892 892 options[b'sparse-read-min-gap-size'] = srmingapsize
893 893
894 894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
895 895 options[b'sparse-revlog'] = sparserevlog
896 896 if sparserevlog:
897 897 options[b'generaldelta'] = True
898 898
899 899 sidedata = SIDEDATA_REQUIREMENT in requirements
900 900 options[b'side-data'] = sidedata
901 901
902 902 maxchainlen = None
903 903 if sparserevlog:
904 904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
905 905 # experimental config: format.maxchainlen
906 906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
907 907 if maxchainlen is not None:
908 908 options[b'maxchainlen'] = maxchainlen
909 909
910 910 for r in requirements:
911 911 # we allow multiple compression engine requirement to co-exist because
912 912 # strickly speaking, revlog seems to support mixed compression style.
913 913 #
914 914 # The compression used for new entries will be "the last one"
915 915 prefix = r.startswith
916 916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
917 917 options[b'compengine'] = r.split(b'-', 2)[2]
918 918
919 919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
920 920 if options[b'zlib.level'] is not None:
921 921 if not (0 <= options[b'zlib.level'] <= 9):
922 922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
923 923 raise error.Abort(msg % options[b'zlib.level'])
924 924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
925 925 if options[b'zstd.level'] is not None:
926 926 if not (0 <= options[b'zstd.level'] <= 22):
927 927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
928 928 raise error.Abort(msg % options[b'zstd.level'])
929 929
930 930 if repository.NARROW_REQUIREMENT in requirements:
931 931 options[b'enableellipsis'] = True
932 932
933 933 if ui.configbool(b'experimental', b'rust.index'):
934 934 options[b'rust.index'] = True
935 935 if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
936 936 options[b'exp-persistent-nodemap'] = True
937 937 if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
938 938 options[b'exp-persistent-nodemap.mmap'] = True
939 939 if ui.configbool(b'devel', b'persistent-nodemap'):
940 940 options[b'devel-force-nodemap'] = True
941 941
942 942 return options
943 943
944 944
945 945 def makemain(**kwargs):
946 946 """Produce a type conforming to ``ilocalrepositorymain``."""
947 947 return localrepository
948 948
949 949
950 950 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
951 951 class revlogfilestorage(object):
952 952 """File storage when using revlogs."""
953 953
954 954 def file(self, path):
955 955 if path[0] == b'/':
956 956 path = path[1:]
957 957
958 958 return filelog.filelog(self.svfs, path)
959 959
960 960
961 961 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 962 class revlognarrowfilestorage(object):
963 963 """File storage when using revlogs and narrow files."""
964 964
965 965 def file(self, path):
966 966 if path[0] == b'/':
967 967 path = path[1:]
968 968
969 969 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
970 970
971 971
972 972 def makefilestorage(requirements, features, **kwargs):
973 973 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
974 974 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
975 975 features.add(repository.REPO_FEATURE_STREAM_CLONE)
976 976
977 977 if repository.NARROW_REQUIREMENT in requirements:
978 978 return revlognarrowfilestorage
979 979 else:
980 980 return revlogfilestorage
981 981
982 982
983 983 # List of repository interfaces and factory functions for them. Each
984 984 # will be called in order during ``makelocalrepository()`` to iteratively
985 985 # derive the final type for a local repository instance. We capture the
986 986 # function as a lambda so we don't hold a reference and the module-level
987 987 # functions can be wrapped.
988 988 REPO_INTERFACES = [
989 989 (repository.ilocalrepositorymain, lambda: makemain),
990 990 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
991 991 ]
992 992
993 993
994 994 @interfaceutil.implementer(repository.ilocalrepositorymain)
995 995 class localrepository(object):
996 996 """Main class for representing local repositories.
997 997
998 998 All local repositories are instances of this class.
999 999
1000 1000 Constructed on its own, instances of this class are not usable as
1001 1001 repository objects. To obtain a usable repository object, call
1002 1002 ``hg.repository()``, ``localrepo.instance()``, or
1003 1003 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1004 1004 ``instance()`` adds support for creating new repositories.
1005 1005 ``hg.repository()`` adds more extension integration, including calling
1006 1006 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1007 1007 used.
1008 1008 """
1009 1009
1010 1010 # obsolete experimental requirements:
1011 1011 # - manifestv2: An experimental new manifest format that allowed
1012 1012 # for stem compression of long paths. Experiment ended up not
1013 1013 # being successful (repository sizes went up due to worse delta
1014 1014 # chains), and the code was deleted in 4.6.
1015 1015 supportedformats = {
1016 1016 b'revlogv1',
1017 1017 b'generaldelta',
1018 1018 b'treemanifest',
1019 1019 COPIESSDC_REQUIREMENT,
1020 1020 REVLOGV2_REQUIREMENT,
1021 1021 SIDEDATA_REQUIREMENT,
1022 1022 SPARSEREVLOG_REQUIREMENT,
1023 1023 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1024 1024 }
1025 1025 _basesupported = supportedformats | {
1026 1026 b'store',
1027 1027 b'fncache',
1028 1028 b'shared',
1029 1029 b'relshared',
1030 1030 b'dotencode',
1031 1031 b'exp-sparse',
1032 1032 b'internal-phase',
1033 1033 }
1034 1034
1035 1035 # list of prefix for file which can be written without 'wlock'
1036 1036 # Extensions should extend this list when needed
1037 1037 _wlockfreeprefix = {
1038 1038 # We migh consider requiring 'wlock' for the next
1039 1039 # two, but pretty much all the existing code assume
1040 1040 # wlock is not needed so we keep them excluded for
1041 1041 # now.
1042 1042 b'hgrc',
1043 1043 b'requires',
1044 1044 # XXX cache is a complicatged business someone
1045 1045 # should investigate this in depth at some point
1046 1046 b'cache/',
1047 1047 # XXX shouldn't be dirstate covered by the wlock?
1048 1048 b'dirstate',
1049 1049 # XXX bisect was still a bit too messy at the time
1050 1050 # this changeset was introduced. Someone should fix
1051 1051 # the remainig bit and drop this line
1052 1052 b'bisect.state',
1053 1053 }
1054 1054
1055 1055 def __init__(
1056 1056 self,
1057 1057 baseui,
1058 1058 ui,
1059 1059 origroot,
1060 1060 wdirvfs,
1061 1061 hgvfs,
1062 1062 requirements,
1063 1063 supportedrequirements,
1064 1064 sharedpath,
1065 1065 store,
1066 1066 cachevfs,
1067 1067 wcachevfs,
1068 1068 features,
1069 1069 intents=None,
1070 1070 ):
1071 1071 """Create a new local repository instance.
1072 1072
1073 1073 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1074 1074 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1075 1075 object.
1076 1076
1077 1077 Arguments:
1078 1078
1079 1079 baseui
1080 1080 ``ui.ui`` instance that ``ui`` argument was based off of.
1081 1081
1082 1082 ui
1083 1083 ``ui.ui`` instance for use by the repository.
1084 1084
1085 1085 origroot
1086 1086 ``bytes`` path to working directory root of this repository.
1087 1087
1088 1088 wdirvfs
1089 1089 ``vfs.vfs`` rooted at the working directory.
1090 1090
1091 1091 hgvfs
1092 1092 ``vfs.vfs`` rooted at .hg/
1093 1093
1094 1094 requirements
1095 1095 ``set`` of bytestrings representing repository opening requirements.
1096 1096
1097 1097 supportedrequirements
1098 1098 ``set`` of bytestrings representing repository requirements that we
1099 1099 know how to open. May be a supetset of ``requirements``.
1100 1100
1101 1101 sharedpath
1102 1102 ``bytes`` Defining path to storage base directory. Points to a
1103 1103 ``.hg/`` directory somewhere.
1104 1104
1105 1105 store
1106 1106 ``store.basicstore`` (or derived) instance providing access to
1107 1107 versioned storage.
1108 1108
1109 1109 cachevfs
1110 1110 ``vfs.vfs`` used for cache files.
1111 1111
1112 1112 wcachevfs
1113 1113 ``vfs.vfs`` used for cache files related to the working copy.
1114 1114
1115 1115 features
1116 1116 ``set`` of bytestrings defining features/capabilities of this
1117 1117 instance.
1118 1118
1119 1119 intents
1120 1120 ``set`` of system strings indicating what this repo will be used
1121 1121 for.
1122 1122 """
1123 1123 self.baseui = baseui
1124 1124 self.ui = ui
1125 1125 self.origroot = origroot
1126 1126 # vfs rooted at working directory.
1127 1127 self.wvfs = wdirvfs
1128 1128 self.root = wdirvfs.base
1129 1129 # vfs rooted at .hg/. Used to access most non-store paths.
1130 1130 self.vfs = hgvfs
1131 1131 self.path = hgvfs.base
1132 1132 self.requirements = requirements
1133 1133 self.supported = supportedrequirements
1134 1134 self.sharedpath = sharedpath
1135 1135 self.store = store
1136 1136 self.cachevfs = cachevfs
1137 1137 self.wcachevfs = wcachevfs
1138 1138 self.features = features
1139 1139
1140 1140 self.filtername = None
1141 1141
1142 1142 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1143 1143 b'devel', b'check-locks'
1144 1144 ):
1145 1145 self.vfs.audit = self._getvfsward(self.vfs.audit)
1146 1146 # A list of callback to shape the phase if no data were found.
1147 1147 # Callback are in the form: func(repo, roots) --> processed root.
1148 1148 # This list it to be filled by extension during repo setup
1149 1149 self._phasedefaults = []
1150 1150
1151 1151 color.setup(self.ui)
1152 1152
1153 1153 self.spath = self.store.path
1154 1154 self.svfs = self.store.vfs
1155 1155 self.sjoin = self.store.join
1156 1156 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1157 1157 b'devel', b'check-locks'
1158 1158 ):
1159 1159 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1160 1160 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1161 1161 else: # standard vfs
1162 1162 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1163 1163
1164 1164 self._dirstatevalidatewarned = False
1165 1165
1166 1166 self._branchcaches = branchmap.BranchMapCache()
1167 1167 self._revbranchcache = None
1168 1168 self._filterpats = {}
1169 1169 self._datafilters = {}
1170 1170 self._transref = self._lockref = self._wlockref = None
1171 1171
1172 1172 # A cache for various files under .hg/ that tracks file changes,
1173 1173 # (used by the filecache decorator)
1174 1174 #
1175 1175 # Maps a property name to its util.filecacheentry
1176 1176 self._filecache = {}
1177 1177
1178 1178 # hold sets of revision to be filtered
1179 1179 # should be cleared when something might have changed the filter value:
1180 1180 # - new changesets,
1181 1181 # - phase change,
1182 1182 # - new obsolescence marker,
1183 1183 # - working directory parent change,
1184 1184 # - bookmark changes
1185 1185 self.filteredrevcache = {}
1186 1186
1187 1187 # post-dirstate-status hooks
1188 1188 self._postdsstatus = []
1189 1189
1190 1190 # generic mapping between names and nodes
1191 1191 self.names = namespaces.namespaces()
1192 1192
1193 1193 # Key to signature value.
1194 1194 self._sparsesignaturecache = {}
1195 1195 # Signature to cached matcher instance.
1196 1196 self._sparsematchercache = {}
1197 1197
1198 1198 self._extrafilterid = repoview.extrafilter(ui)
1199 1199
1200 1200 self.filecopiesmode = None
1201 1201 if COPIESSDC_REQUIREMENT in self.requirements:
1202 1202 self.filecopiesmode = b'changeset-sidedata'
1203 1203
1204 1204 def _getvfsward(self, origfunc):
1205 1205 """build a ward for self.vfs"""
1206 1206 rref = weakref.ref(self)
1207 1207
1208 1208 def checkvfs(path, mode=None):
1209 1209 ret = origfunc(path, mode=mode)
1210 1210 repo = rref()
1211 1211 if (
1212 1212 repo is None
1213 1213 or not util.safehasattr(repo, b'_wlockref')
1214 1214 or not util.safehasattr(repo, b'_lockref')
1215 1215 ):
1216 1216 return
1217 1217 if mode in (None, b'r', b'rb'):
1218 1218 return
1219 1219 if path.startswith(repo.path):
1220 1220 # truncate name relative to the repository (.hg)
1221 1221 path = path[len(repo.path) + 1 :]
1222 1222 if path.startswith(b'cache/'):
1223 1223 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1224 1224 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1225 1225 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1226 1226 # journal is covered by 'lock'
1227 1227 if repo._currentlock(repo._lockref) is None:
1228 1228 repo.ui.develwarn(
1229 1229 b'write with no lock: "%s"' % path,
1230 1230 stacklevel=3,
1231 1231 config=b'check-locks',
1232 1232 )
1233 1233 elif repo._currentlock(repo._wlockref) is None:
1234 1234 # rest of vfs files are covered by 'wlock'
1235 1235 #
1236 1236 # exclude special files
1237 1237 for prefix in self._wlockfreeprefix:
1238 1238 if path.startswith(prefix):
1239 1239 return
1240 1240 repo.ui.develwarn(
1241 1241 b'write with no wlock: "%s"' % path,
1242 1242 stacklevel=3,
1243 1243 config=b'check-locks',
1244 1244 )
1245 1245 return ret
1246 1246
1247 1247 return checkvfs
1248 1248
1249 1249 def _getsvfsward(self, origfunc):
1250 1250 """build a ward for self.svfs"""
1251 1251 rref = weakref.ref(self)
1252 1252
1253 1253 def checksvfs(path, mode=None):
1254 1254 ret = origfunc(path, mode=mode)
1255 1255 repo = rref()
1256 1256 if repo is None or not util.safehasattr(repo, b'_lockref'):
1257 1257 return
1258 1258 if mode in (None, b'r', b'rb'):
1259 1259 return
1260 1260 if path.startswith(repo.sharedpath):
1261 1261 # truncate name relative to the repository (.hg)
1262 1262 path = path[len(repo.sharedpath) + 1 :]
1263 1263 if repo._currentlock(repo._lockref) is None:
1264 1264 repo.ui.develwarn(
1265 1265 b'write with no lock: "%s"' % path, stacklevel=4
1266 1266 )
1267 1267 return ret
1268 1268
1269 1269 return checksvfs
1270 1270
1271 1271 def close(self):
1272 1272 self._writecaches()
1273 1273
1274 1274 def _writecaches(self):
1275 1275 if self._revbranchcache:
1276 1276 self._revbranchcache.write()
1277 1277
1278 1278 def _restrictcapabilities(self, caps):
1279 1279 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1280 1280 caps = set(caps)
1281 1281 capsblob = bundle2.encodecaps(
1282 1282 bundle2.getrepocaps(self, role=b'client')
1283 1283 )
1284 1284 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1285 1285 return caps
1286 1286
1287 1287 def _writerequirements(self):
1288 1288 scmutil.writerequires(self.vfs, self.requirements)
1289 1289
1290 1290 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1291 1291 # self -> auditor -> self._checknested -> self
1292 1292
1293 1293 @property
1294 1294 def auditor(self):
1295 1295 # This is only used by context.workingctx.match in order to
1296 1296 # detect files in subrepos.
1297 1297 return pathutil.pathauditor(self.root, callback=self._checknested)
1298 1298
1299 1299 @property
1300 1300 def nofsauditor(self):
1301 1301 # This is only used by context.basectx.match in order to detect
1302 1302 # files in subrepos.
1303 1303 return pathutil.pathauditor(
1304 1304 self.root, callback=self._checknested, realfs=False, cached=True
1305 1305 )
1306 1306
1307 1307 def _checknested(self, path):
1308 1308 """Determine if path is a legal nested repository."""
1309 1309 if not path.startswith(self.root):
1310 1310 return False
1311 1311 subpath = path[len(self.root) + 1 :]
1312 1312 normsubpath = util.pconvert(subpath)
1313 1313
1314 1314 # XXX: Checking against the current working copy is wrong in
1315 1315 # the sense that it can reject things like
1316 1316 #
1317 1317 # $ hg cat -r 10 sub/x.txt
1318 1318 #
1319 1319 # if sub/ is no longer a subrepository in the working copy
1320 1320 # parent revision.
1321 1321 #
1322 1322 # However, it can of course also allow things that would have
1323 1323 # been rejected before, such as the above cat command if sub/
1324 1324 # is a subrepository now, but was a normal directory before.
1325 1325 # The old path auditor would have rejected by mistake since it
1326 1326 # panics when it sees sub/.hg/.
1327 1327 #
1328 1328 # All in all, checking against the working copy seems sensible
1329 1329 # since we want to prevent access to nested repositories on
1330 1330 # the filesystem *now*.
1331 1331 ctx = self[None]
1332 1332 parts = util.splitpath(subpath)
1333 1333 while parts:
1334 1334 prefix = b'/'.join(parts)
1335 1335 if prefix in ctx.substate:
1336 1336 if prefix == normsubpath:
1337 1337 return True
1338 1338 else:
1339 1339 sub = ctx.sub(prefix)
1340 1340 return sub.checknested(subpath[len(prefix) + 1 :])
1341 1341 else:
1342 1342 parts.pop()
1343 1343 return False
1344 1344
1345 1345 def peer(self):
1346 1346 return localpeer(self) # not cached to avoid reference cycle
1347 1347
1348 1348 def unfiltered(self):
1349 1349 """Return unfiltered version of the repository
1350 1350
1351 1351 Intended to be overwritten by filtered repo."""
1352 1352 return self
1353 1353
1354 1354 def filtered(self, name, visibilityexceptions=None):
1355 1355 """Return a filtered version of a repository
1356 1356
1357 1357 The `name` parameter is the identifier of the requested view. This
1358 1358 will return a repoview object set "exactly" to the specified view.
1359 1359
1360 1360 This function does not apply recursive filtering to a repository. For
1361 1361 example calling `repo.filtered("served")` will return a repoview using
1362 1362 the "served" view, regardless of the initial view used by `repo`.
1363 1363
1364 1364 In other word, there is always only one level of `repoview` "filtering".
1365 1365 """
1366 1366 if self._extrafilterid is not None and b'%' not in name:
1367 1367 name = name + b'%' + self._extrafilterid
1368 1368
1369 1369 cls = repoview.newtype(self.unfiltered().__class__)
1370 1370 return cls(self, name, visibilityexceptions)
1371 1371
1372 1372 @mixedrepostorecache(
1373 1373 (b'bookmarks', b'plain'),
1374 1374 (b'bookmarks.current', b'plain'),
1375 1375 (b'bookmarks', b''),
1376 1376 (b'00changelog.i', b''),
1377 1377 )
1378 1378 def _bookmarks(self):
1379 1379 # Since the multiple files involved in the transaction cannot be
1380 1380 # written atomically (with current repository format), there is a race
1381 1381 # condition here.
1382 1382 #
1383 1383 # 1) changelog content A is read
1384 1384 # 2) outside transaction update changelog to content B
1385 1385 # 3) outside transaction update bookmark file referring to content B
1386 1386 # 4) bookmarks file content is read and filtered against changelog-A
1387 1387 #
1388 1388 # When this happens, bookmarks against nodes missing from A are dropped.
1389 1389 #
1390 1390 # Having this happening during read is not great, but it become worse
1391 1391 # when this happen during write because the bookmarks to the "unknown"
1392 1392 # nodes will be dropped for good. However, writes happen within locks.
1393 1393 # This locking makes it possible to have a race free consistent read.
1394 1394 # For this purpose data read from disc before locking are
1395 1395 # "invalidated" right after the locks are taken. This invalidations are
1396 1396 # "light", the `filecache` mechanism keep the data in memory and will
1397 1397 # reuse them if the underlying files did not changed. Not parsing the
1398 1398 # same data multiple times helps performances.
1399 1399 #
1400 1400 # Unfortunately in the case describe above, the files tracked by the
1401 1401 # bookmarks file cache might not have changed, but the in-memory
1402 1402 # content is still "wrong" because we used an older changelog content
1403 1403 # to process the on-disk data. So after locking, the changelog would be
1404 1404 # refreshed but `_bookmarks` would be preserved.
1405 1405 # Adding `00changelog.i` to the list of tracked file is not
1406 1406 # enough, because at the time we build the content for `_bookmarks` in
1407 1407 # (4), the changelog file has already diverged from the content used
1408 1408 # for loading `changelog` in (1)
1409 1409 #
1410 1410 # To prevent the issue, we force the changelog to be explicitly
1411 1411 # reloaded while computing `_bookmarks`. The data race can still happen
1412 1412 # without the lock (with a narrower window), but it would no longer go
1413 1413 # undetected during the lock time refresh.
1414 1414 #
1415 1415 # The new schedule is as follow
1416 1416 #
1417 1417 # 1) filecache logic detect that `_bookmarks` needs to be computed
1418 1418 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1419 1419 # 3) We force `changelog` filecache to be tested
1420 1420 # 4) cachestat for `changelog` are captured (for changelog)
1421 1421 # 5) `_bookmarks` is computed and cached
1422 1422 #
1423 1423 # The step in (3) ensure we have a changelog at least as recent as the
1424 1424 # cache stat computed in (1). As a result at locking time:
1425 1425 # * if the changelog did not changed since (1) -> we can reuse the data
1426 1426 # * otherwise -> the bookmarks get refreshed.
1427 1427 self._refreshchangelog()
1428 1428 return bookmarks.bmstore(self)
1429 1429
1430 1430 def _refreshchangelog(self):
1431 1431 """make sure the in memory changelog match the on-disk one"""
1432 1432 if 'changelog' in vars(self) and self.currenttransaction() is None:
1433 1433 del self.changelog
1434 1434
1435 1435 @property
1436 1436 def _activebookmark(self):
1437 1437 return self._bookmarks.active
1438 1438
1439 1439 # _phasesets depend on changelog. what we need is to call
1440 1440 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1441 1441 # can't be easily expressed in filecache mechanism.
1442 1442 @storecache(b'phaseroots', b'00changelog.i')
1443 1443 def _phasecache(self):
1444 1444 return phases.phasecache(self, self._phasedefaults)
1445 1445
1446 1446 @storecache(b'obsstore')
1447 1447 def obsstore(self):
1448 1448 return obsolete.makestore(self.ui, self)
1449 1449
1450 1450 @storecache(b'00changelog.i')
1451 1451 def changelog(self):
1452 1452 return self.store.changelog(txnutil.mayhavepending(self.root))
1453 1453
1454 1454 @storecache(b'00manifest.i')
1455 1455 def manifestlog(self):
1456 1456 return self.store.manifestlog(self, self._storenarrowmatch)
1457 1457
1458 1458 @repofilecache(b'dirstate')
1459 1459 def dirstate(self):
1460 1460 return self._makedirstate()
1461 1461
1462 1462 def _makedirstate(self):
1463 1463 """Extension point for wrapping the dirstate per-repo."""
1464 1464 sparsematchfn = lambda: sparse.matcher(self)
1465 1465
1466 1466 return dirstate.dirstate(
1467 1467 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1468 1468 )
1469 1469
1470 1470 def _dirstatevalidate(self, node):
1471 1471 try:
1472 1472 self.changelog.rev(node)
1473 1473 return node
1474 1474 except error.LookupError:
1475 1475 if not self._dirstatevalidatewarned:
1476 1476 self._dirstatevalidatewarned = True
1477 1477 self.ui.warn(
1478 1478 _(b"warning: ignoring unknown working parent %s!\n")
1479 1479 % short(node)
1480 1480 )
1481 1481 return nullid
1482 1482
1483 1483 @storecache(narrowspec.FILENAME)
1484 1484 def narrowpats(self):
1485 1485 """matcher patterns for this repository's narrowspec
1486 1486
1487 1487 A tuple of (includes, excludes).
1488 1488 """
1489 1489 return narrowspec.load(self)
1490 1490
1491 1491 @storecache(narrowspec.FILENAME)
1492 1492 def _storenarrowmatch(self):
1493 1493 if repository.NARROW_REQUIREMENT not in self.requirements:
1494 1494 return matchmod.always()
1495 1495 include, exclude = self.narrowpats
1496 1496 return narrowspec.match(self.root, include=include, exclude=exclude)
1497 1497
1498 1498 @storecache(narrowspec.FILENAME)
1499 1499 def _narrowmatch(self):
1500 1500 if repository.NARROW_REQUIREMENT not in self.requirements:
1501 1501 return matchmod.always()
1502 1502 narrowspec.checkworkingcopynarrowspec(self)
1503 1503 include, exclude = self.narrowpats
1504 1504 return narrowspec.match(self.root, include=include, exclude=exclude)
1505 1505
1506 1506 def narrowmatch(self, match=None, includeexact=False):
1507 1507 """matcher corresponding the the repo's narrowspec
1508 1508
1509 1509 If `match` is given, then that will be intersected with the narrow
1510 1510 matcher.
1511 1511
1512 1512 If `includeexact` is True, then any exact matches from `match` will
1513 1513 be included even if they're outside the narrowspec.
1514 1514 """
1515 1515 if match:
1516 1516 if includeexact and not self._narrowmatch.always():
1517 1517 # do not exclude explicitly-specified paths so that they can
1518 1518 # be warned later on
1519 1519 em = matchmod.exact(match.files())
1520 1520 nm = matchmod.unionmatcher([self._narrowmatch, em])
1521 1521 return matchmod.intersectmatchers(match, nm)
1522 1522 return matchmod.intersectmatchers(match, self._narrowmatch)
1523 1523 return self._narrowmatch
1524 1524
1525 1525 def setnarrowpats(self, newincludes, newexcludes):
1526 1526 narrowspec.save(self, newincludes, newexcludes)
1527 1527 self.invalidate(clearfilecache=True)
1528 1528
1529 1529 @unfilteredpropertycache
1530 1530 def _quick_access_changeid_null(self):
1531 1531 return {
1532 1532 b'null': (nullrev, nullid),
1533 1533 nullrev: (nullrev, nullid),
1534 1534 nullid: (nullrev, nullid),
1535 1535 }
1536 1536
1537 1537 @unfilteredpropertycache
1538 1538 def _quick_access_changeid_wc(self):
1539 1539 # also fast path access to the working copy parents
1540 1540 # however, only do it for filter that ensure wc is visible.
1541 1541 quick = {}
1542 1542 cl = self.unfiltered().changelog
1543 1543 for node in self.dirstate.parents():
1544 1544 if node == nullid:
1545 1545 continue
1546 1546 rev = cl.index.get_rev(node)
1547 1547 if rev is None:
1548 1548 # unknown working copy parent case:
1549 1549 #
1550 1550 # skip the fast path and let higher code deal with it
1551 1551 continue
1552 1552 pair = (rev, node)
1553 1553 quick[rev] = pair
1554 1554 quick[node] = pair
1555 1555 # also add the parents of the parents
1556 1556 for r in cl.parentrevs(rev):
1557 1557 if r == nullrev:
1558 1558 continue
1559 1559 n = cl.node(r)
1560 1560 pair = (r, n)
1561 1561 quick[r] = pair
1562 1562 quick[n] = pair
1563 1563 p1node = self.dirstate.p1()
1564 1564 if p1node != nullid:
1565 1565 quick[b'.'] = quick[p1node]
1566 1566 return quick
1567 1567
1568 1568 @unfilteredmethod
1569 1569 def _quick_access_changeid_invalidate(self):
1570 1570 if '_quick_access_changeid_wc' in vars(self):
1571 1571 del self.__dict__['_quick_access_changeid_wc']
1572 1572
1573 1573 @property
1574 1574 def _quick_access_changeid(self):
1575 1575 """an helper dictionnary for __getitem__ calls
1576 1576
1577 1577 This contains a list of symbol we can recognise right away without
1578 1578 further processing.
1579 1579 """
1580 1580 mapping = self._quick_access_changeid_null
1581 1581 if self.filtername in repoview.filter_has_wc:
1582 1582 mapping = mapping.copy()
1583 1583 mapping.update(self._quick_access_changeid_wc)
1584 1584 return mapping
1585 1585
1586 1586 def __getitem__(self, changeid):
1587 1587 # dealing with special cases
1588 1588 if changeid is None:
1589 1589 return context.workingctx(self)
1590 1590 if isinstance(changeid, context.basectx):
1591 1591 return changeid
1592 1592
1593 1593 # dealing with multiple revisions
1594 1594 if isinstance(changeid, slice):
1595 1595 # wdirrev isn't contiguous so the slice shouldn't include it
1596 1596 return [
1597 1597 self[i]
1598 1598 for i in pycompat.xrange(*changeid.indices(len(self)))
1599 1599 if i not in self.changelog.filteredrevs
1600 1600 ]
1601 1601
1602 1602 # dealing with some special values
1603 1603 quick_access = self._quick_access_changeid.get(changeid)
1604 1604 if quick_access is not None:
1605 1605 rev, node = quick_access
1606 1606 return context.changectx(self, rev, node, maybe_filtered=False)
1607 1607 if changeid == b'tip':
1608 1608 node = self.changelog.tip()
1609 1609 rev = self.changelog.rev(node)
1610 1610 return context.changectx(self, rev, node)
1611 1611
1612 1612 # dealing with arbitrary values
1613 1613 try:
1614 1614 if isinstance(changeid, int):
1615 1615 node = self.changelog.node(changeid)
1616 1616 rev = changeid
1617 1617 elif changeid == b'.':
1618 1618 # this is a hack to delay/avoid loading obsmarkers
1619 1619 # when we know that '.' won't be hidden
1620 1620 node = self.dirstate.p1()
1621 1621 rev = self.unfiltered().changelog.rev(node)
1622 1622 elif len(changeid) == 20:
1623 1623 try:
1624 1624 node = changeid
1625 1625 rev = self.changelog.rev(changeid)
1626 1626 except error.FilteredLookupError:
1627 1627 changeid = hex(changeid) # for the error message
1628 1628 raise
1629 1629 except LookupError:
1630 1630 # check if it might have come from damaged dirstate
1631 1631 #
1632 1632 # XXX we could avoid the unfiltered if we had a recognizable
1633 1633 # exception for filtered changeset access
1634 1634 if (
1635 1635 self.local()
1636 1636 and changeid in self.unfiltered().dirstate.parents()
1637 1637 ):
1638 1638 msg = _(b"working directory has unknown parent '%s'!")
1639 1639 raise error.Abort(msg % short(changeid))
1640 1640 changeid = hex(changeid) # for the error message
1641 1641 raise
1642 1642
1643 1643 elif len(changeid) == 40:
1644 1644 node = bin(changeid)
1645 1645 rev = self.changelog.rev(node)
1646 1646 else:
1647 1647 raise error.ProgrammingError(
1648 1648 b"unsupported changeid '%s' of type %s"
1649 1649 % (changeid, pycompat.bytestr(type(changeid)))
1650 1650 )
1651 1651
1652 1652 return context.changectx(self, rev, node)
1653 1653
1654 1654 except (error.FilteredIndexError, error.FilteredLookupError):
1655 1655 raise error.FilteredRepoLookupError(
1656 1656 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1657 1657 )
1658 1658 except (IndexError, LookupError):
1659 1659 raise error.RepoLookupError(
1660 1660 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1661 1661 )
1662 1662 except error.WdirUnsupported:
1663 1663 return context.workingctx(self)
1664 1664
1665 1665 def __contains__(self, changeid):
1666 1666 """True if the given changeid exists
1667 1667
1668 1668 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1669 1669 specified.
1670 1670 """
1671 1671 try:
1672 1672 self[changeid]
1673 1673 return True
1674 1674 except error.RepoLookupError:
1675 1675 return False
1676 1676
1677 1677 def __nonzero__(self):
1678 1678 return True
1679 1679
1680 1680 __bool__ = __nonzero__
1681 1681
1682 1682 def __len__(self):
1683 1683 # no need to pay the cost of repoview.changelog
1684 1684 unfi = self.unfiltered()
1685 1685 return len(unfi.changelog)
1686 1686
1687 1687 def __iter__(self):
1688 1688 return iter(self.changelog)
1689 1689
1690 1690 def revs(self, expr, *args):
1691 1691 '''Find revisions matching a revset.
1692 1692
1693 1693 The revset is specified as a string ``expr`` that may contain
1694 1694 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1695 1695
1696 1696 Revset aliases from the configuration are not expanded. To expand
1697 1697 user aliases, consider calling ``scmutil.revrange()`` or
1698 1698 ``repo.anyrevs([expr], user=True)``.
1699 1699
1700 1700 Returns a smartset.abstractsmartset, which is a list-like interface
1701 1701 that contains integer revisions.
1702 1702 '''
1703 1703 tree = revsetlang.spectree(expr, *args)
1704 1704 return revset.makematcher(tree)(self)
1705 1705
1706 1706 def set(self, expr, *args):
1707 1707 '''Find revisions matching a revset and emit changectx instances.
1708 1708
1709 1709 This is a convenience wrapper around ``revs()`` that iterates the
1710 1710 result and is a generator of changectx instances.
1711 1711
1712 1712 Revset aliases from the configuration are not expanded. To expand
1713 1713 user aliases, consider calling ``scmutil.revrange()``.
1714 1714 '''
1715 1715 for r in self.revs(expr, *args):
1716 1716 yield self[r]
1717 1717
1718 1718 def anyrevs(self, specs, user=False, localalias=None):
1719 1719 '''Find revisions matching one of the given revsets.
1720 1720
1721 1721 Revset aliases from the configuration are not expanded by default. To
1722 1722 expand user aliases, specify ``user=True``. To provide some local
1723 1723 definitions overriding user aliases, set ``localalias`` to
1724 1724 ``{name: definitionstring}``.
1725 1725 '''
1726 1726 if specs == [b'null']:
1727 1727 return revset.baseset([nullrev])
1728 1728 if specs == [b'.']:
1729 1729 quick_data = self._quick_access_changeid.get(b'.')
1730 1730 if quick_data is not None:
1731 1731 return revset.baseset([quick_data[0]])
1732 1732 if user:
1733 1733 m = revset.matchany(
1734 1734 self.ui,
1735 1735 specs,
1736 1736 lookup=revset.lookupfn(self),
1737 1737 localalias=localalias,
1738 1738 )
1739 1739 else:
1740 1740 m = revset.matchany(None, specs, localalias=localalias)
1741 1741 return m(self)
1742 1742
1743 1743 def url(self):
1744 1744 return b'file:' + self.root
1745 1745
1746 1746 def hook(self, name, throw=False, **args):
1747 1747 """Call a hook, passing this repo instance.
1748 1748
1749 1749 This a convenience method to aid invoking hooks. Extensions likely
1750 1750 won't call this unless they have registered a custom hook or are
1751 1751 replacing code that is expected to call a hook.
1752 1752 """
1753 1753 return hook.hook(self.ui, self, name, throw, **args)
1754 1754
1755 1755 @filteredpropertycache
1756 1756 def _tagscache(self):
1757 1757 '''Returns a tagscache object that contains various tags related
1758 1758 caches.'''
1759 1759
1760 1760 # This simplifies its cache management by having one decorated
1761 1761 # function (this one) and the rest simply fetch things from it.
1762 1762 class tagscache(object):
1763 1763 def __init__(self):
1764 1764 # These two define the set of tags for this repository. tags
1765 1765 # maps tag name to node; tagtypes maps tag name to 'global' or
1766 1766 # 'local'. (Global tags are defined by .hgtags across all
1767 1767 # heads, and local tags are defined in .hg/localtags.)
1768 1768 # They constitute the in-memory cache of tags.
1769 1769 self.tags = self.tagtypes = None
1770 1770
1771 1771 self.nodetagscache = self.tagslist = None
1772 1772
1773 1773 cache = tagscache()
1774 1774 cache.tags, cache.tagtypes = self._findtags()
1775 1775
1776 1776 return cache
1777 1777
1778 1778 def tags(self):
1779 1779 '''return a mapping of tag to node'''
1780 1780 t = {}
1781 1781 if self.changelog.filteredrevs:
1782 1782 tags, tt = self._findtags()
1783 1783 else:
1784 1784 tags = self._tagscache.tags
1785 1785 rev = self.changelog.rev
1786 1786 for k, v in pycompat.iteritems(tags):
1787 1787 try:
1788 1788 # ignore tags to unknown nodes
1789 1789 rev(v)
1790 1790 t[k] = v
1791 1791 except (error.LookupError, ValueError):
1792 1792 pass
1793 1793 return t
1794 1794
1795 1795 def _findtags(self):
1796 1796 '''Do the hard work of finding tags. Return a pair of dicts
1797 1797 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1798 1798 maps tag name to a string like \'global\' or \'local\'.
1799 1799 Subclasses or extensions are free to add their own tags, but
1800 1800 should be aware that the returned dicts will be retained for the
1801 1801 duration of the localrepo object.'''
1802 1802
1803 1803 # XXX what tagtype should subclasses/extensions use? Currently
1804 1804 # mq and bookmarks add tags, but do not set the tagtype at all.
1805 1805 # Should each extension invent its own tag type? Should there
1806 1806 # be one tagtype for all such "virtual" tags? Or is the status
1807 1807 # quo fine?
1808 1808
1809 1809 # map tag name to (node, hist)
1810 1810 alltags = tagsmod.findglobaltags(self.ui, self)
1811 1811 # map tag name to tag type
1812 1812 tagtypes = dict((tag, b'global') for tag in alltags)
1813 1813
1814 1814 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1815 1815
1816 1816 # Build the return dicts. Have to re-encode tag names because
1817 1817 # the tags module always uses UTF-8 (in order not to lose info
1818 1818 # writing to the cache), but the rest of Mercurial wants them in
1819 1819 # local encoding.
1820 1820 tags = {}
1821 1821 for (name, (node, hist)) in pycompat.iteritems(alltags):
1822 1822 if node != nullid:
1823 1823 tags[encoding.tolocal(name)] = node
1824 1824 tags[b'tip'] = self.changelog.tip()
1825 1825 tagtypes = dict(
1826 1826 [
1827 1827 (encoding.tolocal(name), value)
1828 1828 for (name, value) in pycompat.iteritems(tagtypes)
1829 1829 ]
1830 1830 )
1831 1831 return (tags, tagtypes)
1832 1832
1833 1833 def tagtype(self, tagname):
1834 1834 '''
1835 1835 return the type of the given tag. result can be:
1836 1836
1837 1837 'local' : a local tag
1838 1838 'global' : a global tag
1839 1839 None : tag does not exist
1840 1840 '''
1841 1841
1842 1842 return self._tagscache.tagtypes.get(tagname)
1843 1843
1844 1844 def tagslist(self):
1845 1845 '''return a list of tags ordered by revision'''
1846 1846 if not self._tagscache.tagslist:
1847 1847 l = []
1848 1848 for t, n in pycompat.iteritems(self.tags()):
1849 1849 l.append((self.changelog.rev(n), t, n))
1850 1850 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1851 1851
1852 1852 return self._tagscache.tagslist
1853 1853
1854 1854 def nodetags(self, node):
1855 1855 '''return the tags associated with a node'''
1856 1856 if not self._tagscache.nodetagscache:
1857 1857 nodetagscache = {}
1858 1858 for t, n in pycompat.iteritems(self._tagscache.tags):
1859 1859 nodetagscache.setdefault(n, []).append(t)
1860 1860 for tags in pycompat.itervalues(nodetagscache):
1861 1861 tags.sort()
1862 1862 self._tagscache.nodetagscache = nodetagscache
1863 1863 return self._tagscache.nodetagscache.get(node, [])
1864 1864
1865 1865 def nodebookmarks(self, node):
1866 1866 """return the list of bookmarks pointing to the specified node"""
1867 1867 return self._bookmarks.names(node)
1868 1868
1869 1869 def branchmap(self):
1870 1870 '''returns a dictionary {branch: [branchheads]} with branchheads
1871 1871 ordered by increasing revision number'''
1872 1872 return self._branchcaches[self]
1873 1873
1874 1874 @unfilteredmethod
1875 1875 def revbranchcache(self):
1876 1876 if not self._revbranchcache:
1877 1877 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1878 1878 return self._revbranchcache
1879 1879
1880 1880 def branchtip(self, branch, ignoremissing=False):
1881 1881 '''return the tip node for a given branch
1882 1882
1883 1883 If ignoremissing is True, then this method will not raise an error.
1884 1884 This is helpful for callers that only expect None for a missing branch
1885 1885 (e.g. namespace).
1886 1886
1887 1887 '''
1888 1888 try:
1889 1889 return self.branchmap().branchtip(branch)
1890 1890 except KeyError:
1891 1891 if not ignoremissing:
1892 1892 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1893 1893 else:
1894 1894 pass
1895 1895
1896 1896 def lookup(self, key):
1897 1897 node = scmutil.revsymbol(self, key).node()
1898 1898 if node is None:
1899 1899 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1900 1900 return node
1901 1901
1902 1902 def lookupbranch(self, key):
1903 1903 if self.branchmap().hasbranch(key):
1904 1904 return key
1905 1905
1906 1906 return scmutil.revsymbol(self, key).branch()
1907 1907
1908 1908 def known(self, nodes):
1909 1909 cl = self.changelog
1910 1910 get_rev = cl.index.get_rev
1911 1911 filtered = cl.filteredrevs
1912 1912 result = []
1913 1913 for n in nodes:
1914 1914 r = get_rev(n)
1915 1915 resp = not (r is None or r in filtered)
1916 1916 result.append(resp)
1917 1917 return result
1918 1918
1919 1919 def local(self):
1920 1920 return self
1921 1921
1922 1922 def publishing(self):
1923 1923 # it's safe (and desirable) to trust the publish flag unconditionally
1924 1924 # so that we don't finalize changes shared between users via ssh or nfs
1925 1925 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1926 1926
1927 1927 def cancopy(self):
1928 1928 # so statichttprepo's override of local() works
1929 1929 if not self.local():
1930 1930 return False
1931 1931 if not self.publishing():
1932 1932 return True
1933 1933 # if publishing we can't copy if there is filtered content
1934 1934 return not self.filtered(b'visible').changelog.filteredrevs
1935 1935
1936 1936 def shared(self):
1937 1937 '''the type of shared repository (None if not shared)'''
1938 1938 if self.sharedpath != self.path:
1939 1939 return b'store'
1940 1940 return None
1941 1941
1942 1942 def wjoin(self, f, *insidef):
1943 1943 return self.vfs.reljoin(self.root, f, *insidef)
1944 1944
1945 1945 def setparents(self, p1, p2=nullid):
1946 1946 self[None].setparents(p1, p2)
1947 1947 self._quick_access_changeid_invalidate()
1948 1948
1949 1949 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1950 1950 """changeid must be a changeset revision, if specified.
1951 1951 fileid can be a file revision or node."""
1952 1952 return context.filectx(
1953 1953 self, path, changeid, fileid, changectx=changectx
1954 1954 )
1955 1955
1956 1956 def getcwd(self):
1957 1957 return self.dirstate.getcwd()
1958 1958
1959 1959 def pathto(self, f, cwd=None):
1960 1960 return self.dirstate.pathto(f, cwd)
1961 1961
1962 1962 def _loadfilter(self, filter):
1963 1963 if filter not in self._filterpats:
1964 1964 l = []
1965 1965 for pat, cmd in self.ui.configitems(filter):
1966 1966 if cmd == b'!':
1967 1967 continue
1968 1968 mf = matchmod.match(self.root, b'', [pat])
1969 1969 fn = None
1970 1970 params = cmd
1971 1971 for name, filterfn in pycompat.iteritems(self._datafilters):
1972 1972 if cmd.startswith(name):
1973 1973 fn = filterfn
1974 1974 params = cmd[len(name) :].lstrip()
1975 1975 break
1976 1976 if not fn:
1977 1977 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1978 1978 fn.__name__ = 'commandfilter'
1979 1979 # Wrap old filters not supporting keyword arguments
1980 1980 if not pycompat.getargspec(fn)[2]:
1981 1981 oldfn = fn
1982 1982 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1983 1983 fn.__name__ = 'compat-' + oldfn.__name__
1984 1984 l.append((mf, fn, params))
1985 1985 self._filterpats[filter] = l
1986 1986 return self._filterpats[filter]
1987 1987
1988 1988 def _filter(self, filterpats, filename, data):
1989 1989 for mf, fn, cmd in filterpats:
1990 1990 if mf(filename):
1991 1991 self.ui.debug(
1992 1992 b"filtering %s through %s\n"
1993 1993 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1994 1994 )
1995 1995 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1996 1996 break
1997 1997
1998 1998 return data
1999 1999
2000 2000 @unfilteredpropertycache
2001 2001 def _encodefilterpats(self):
2002 2002 return self._loadfilter(b'encode')
2003 2003
2004 2004 @unfilteredpropertycache
2005 2005 def _decodefilterpats(self):
2006 2006 return self._loadfilter(b'decode')
2007 2007
2008 2008 def adddatafilter(self, name, filter):
2009 2009 self._datafilters[name] = filter
2010 2010
2011 2011 def wread(self, filename):
2012 2012 if self.wvfs.islink(filename):
2013 2013 data = self.wvfs.readlink(filename)
2014 2014 else:
2015 2015 data = self.wvfs.read(filename)
2016 2016 return self._filter(self._encodefilterpats, filename, data)
2017 2017
2018 2018 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2019 2019 """write ``data`` into ``filename`` in the working directory
2020 2020
2021 2021 This returns length of written (maybe decoded) data.
2022 2022 """
2023 2023 data = self._filter(self._decodefilterpats, filename, data)
2024 2024 if b'l' in flags:
2025 2025 self.wvfs.symlink(data, filename)
2026 2026 else:
2027 2027 self.wvfs.write(
2028 2028 filename, data, backgroundclose=backgroundclose, **kwargs
2029 2029 )
2030 2030 if b'x' in flags:
2031 2031 self.wvfs.setflags(filename, False, True)
2032 2032 else:
2033 2033 self.wvfs.setflags(filename, False, False)
2034 2034 return len(data)
2035 2035
2036 2036 def wwritedata(self, filename, data):
2037 2037 return self._filter(self._decodefilterpats, filename, data)
2038 2038
2039 2039 def currenttransaction(self):
2040 2040 """return the current transaction or None if non exists"""
2041 2041 if self._transref:
2042 2042 tr = self._transref()
2043 2043 else:
2044 2044 tr = None
2045 2045
2046 2046 if tr and tr.running():
2047 2047 return tr
2048 2048 return None
2049 2049
2050 2050 def transaction(self, desc, report=None):
2051 2051 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2052 2052 b'devel', b'check-locks'
2053 2053 ):
2054 2054 if self._currentlock(self._lockref) is None:
2055 2055 raise error.ProgrammingError(b'transaction requires locking')
2056 2056 tr = self.currenttransaction()
2057 2057 if tr is not None:
2058 2058 return tr.nest(name=desc)
2059 2059
2060 2060 # abort here if the journal already exists
2061 2061 if self.svfs.exists(b"journal"):
2062 2062 raise error.RepoError(
2063 2063 _(b"abandoned transaction found"),
2064 2064 hint=_(b"run 'hg recover' to clean up transaction"),
2065 2065 )
2066 2066
2067 2067 idbase = b"%.40f#%f" % (random.random(), time.time())
2068 2068 ha = hex(hashutil.sha1(idbase).digest())
2069 2069 txnid = b'TXN:' + ha
2070 2070 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2071 2071
2072 2072 self._writejournal(desc)
2073 2073 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2074 2074 if report:
2075 2075 rp = report
2076 2076 else:
2077 2077 rp = self.ui.warn
2078 2078 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2079 2079 # we must avoid cyclic reference between repo and transaction.
2080 2080 reporef = weakref.ref(self)
2081 2081 # Code to track tag movement
2082 2082 #
2083 2083 # Since tags are all handled as file content, it is actually quite hard
2084 2084 # to track these movement from a code perspective. So we fallback to a
2085 2085 # tracking at the repository level. One could envision to track changes
2086 2086 # to the '.hgtags' file through changegroup apply but that fails to
2087 2087 # cope with case where transaction expose new heads without changegroup
2088 2088 # being involved (eg: phase movement).
2089 2089 #
2090 2090 # For now, We gate the feature behind a flag since this likely comes
2091 2091 # with performance impacts. The current code run more often than needed
2092 2092 # and do not use caches as much as it could. The current focus is on
2093 2093 # the behavior of the feature so we disable it by default. The flag
2094 2094 # will be removed when we are happy with the performance impact.
2095 2095 #
2096 2096 # Once this feature is no longer experimental move the following
2097 2097 # documentation to the appropriate help section:
2098 2098 #
2099 2099 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2100 2100 # tags (new or changed or deleted tags). In addition the details of
2101 2101 # these changes are made available in a file at:
2102 2102 # ``REPOROOT/.hg/changes/tags.changes``.
2103 2103 # Make sure you check for HG_TAG_MOVED before reading that file as it
2104 2104 # might exist from a previous transaction even if no tag were touched
2105 2105 # in this one. Changes are recorded in a line base format::
2106 2106 #
2107 2107 # <action> <hex-node> <tag-name>\n
2108 2108 #
2109 2109 # Actions are defined as follow:
2110 2110 # "-R": tag is removed,
2111 2111 # "+A": tag is added,
2112 2112 # "-M": tag is moved (old value),
2113 2113 # "+M": tag is moved (new value),
2114 2114 tracktags = lambda x: None
2115 2115 # experimental config: experimental.hook-track-tags
2116 2116 shouldtracktags = self.ui.configbool(
2117 2117 b'experimental', b'hook-track-tags'
2118 2118 )
2119 2119 if desc != b'strip' and shouldtracktags:
2120 2120 oldheads = self.changelog.headrevs()
2121 2121
2122 2122 def tracktags(tr2):
2123 2123 repo = reporef()
2124 2124 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2125 2125 newheads = repo.changelog.headrevs()
2126 2126 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2127 2127 # notes: we compare lists here.
2128 2128 # As we do it only once buiding set would not be cheaper
2129 2129 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2130 2130 if changes:
2131 2131 tr2.hookargs[b'tag_moved'] = b'1'
2132 2132 with repo.vfs(
2133 2133 b'changes/tags.changes', b'w', atomictemp=True
2134 2134 ) as changesfile:
2135 2135 # note: we do not register the file to the transaction
2136 2136 # because we needs it to still exist on the transaction
2137 2137 # is close (for txnclose hooks)
2138 2138 tagsmod.writediff(changesfile, changes)
2139 2139
2140 2140 def validate(tr2):
2141 2141 """will run pre-closing hooks"""
2142 2142 # XXX the transaction API is a bit lacking here so we take a hacky
2143 2143 # path for now
2144 2144 #
2145 2145 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2146 2146 # dict is copied before these run. In addition we needs the data
2147 2147 # available to in memory hooks too.
2148 2148 #
2149 2149 # Moreover, we also need to make sure this runs before txnclose
2150 2150 # hooks and there is no "pending" mechanism that would execute
2151 2151 # logic only if hooks are about to run.
2152 2152 #
2153 2153 # Fixing this limitation of the transaction is also needed to track
2154 2154 # other families of changes (bookmarks, phases, obsolescence).
2155 2155 #
2156 2156 # This will have to be fixed before we remove the experimental
2157 2157 # gating.
2158 2158 tracktags(tr2)
2159 2159 repo = reporef()
2160 2160
2161 2161 singleheadopt = (b'experimental', b'single-head-per-branch')
2162 2162 singlehead = repo.ui.configbool(*singleheadopt)
2163 2163 if singlehead:
2164 2164 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2165 2165 accountclosed = singleheadsub.get(
2166 2166 b"account-closed-heads", False
2167 2167 )
2168 2168 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2169 2169 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2170 2170 for name, (old, new) in sorted(
2171 2171 tr.changes[b'bookmarks'].items()
2172 2172 ):
2173 2173 args = tr.hookargs.copy()
2174 2174 args.update(bookmarks.preparehookargs(name, old, new))
2175 2175 repo.hook(
2176 2176 b'pretxnclose-bookmark',
2177 2177 throw=True,
2178 2178 **pycompat.strkwargs(args)
2179 2179 )
2180 2180 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2181 2181 cl = repo.unfiltered().changelog
2182 2182 for rev, (old, new) in tr.changes[b'phases'].items():
2183 2183 args = tr.hookargs.copy()
2184 2184 node = hex(cl.node(rev))
2185 2185 args.update(phases.preparehookargs(node, old, new))
2186 2186 repo.hook(
2187 2187 b'pretxnclose-phase',
2188 2188 throw=True,
2189 2189 **pycompat.strkwargs(args)
2190 2190 )
2191 2191
2192 2192 repo.hook(
2193 2193 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2194 2194 )
2195 2195
2196 2196 def releasefn(tr, success):
2197 2197 repo = reporef()
2198 2198 if repo is None:
2199 2199 # If the repo has been GC'd (and this release function is being
2200 2200 # called from transaction.__del__), there's not much we can do,
2201 2201 # so just leave the unfinished transaction there and let the
2202 2202 # user run `hg recover`.
2203 2203 return
2204 2204 if success:
2205 2205 # this should be explicitly invoked here, because
2206 2206 # in-memory changes aren't written out at closing
2207 2207 # transaction, if tr.addfilegenerator (via
2208 2208 # dirstate.write or so) isn't invoked while
2209 2209 # transaction running
2210 2210 repo.dirstate.write(None)
2211 2211 else:
2212 2212 # discard all changes (including ones already written
2213 2213 # out) in this transaction
2214 2214 narrowspec.restorebackup(self, b'journal.narrowspec')
2215 2215 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2216 2216 repo.dirstate.restorebackup(None, b'journal.dirstate')
2217 2217
2218 2218 repo.invalidate(clearfilecache=True)
2219 2219
2220 2220 tr = transaction.transaction(
2221 2221 rp,
2222 2222 self.svfs,
2223 2223 vfsmap,
2224 2224 b"journal",
2225 2225 b"undo",
2226 2226 aftertrans(renames),
2227 2227 self.store.createmode,
2228 2228 validator=validate,
2229 2229 releasefn=releasefn,
2230 2230 checkambigfiles=_cachedfiles,
2231 2231 name=desc,
2232 2232 )
2233 2233 tr.changes[b'origrepolen'] = len(self)
2234 2234 tr.changes[b'obsmarkers'] = set()
2235 2235 tr.changes[b'phases'] = {}
2236 2236 tr.changes[b'bookmarks'] = {}
2237 2237
2238 2238 tr.hookargs[b'txnid'] = txnid
2239 2239 tr.hookargs[b'txnname'] = desc
2240 2240 # note: writing the fncache only during finalize mean that the file is
2241 2241 # outdated when running hooks. As fncache is used for streaming clone,
2242 2242 # this is not expected to break anything that happen during the hooks.
2243 2243 tr.addfinalize(b'flush-fncache', self.store.write)
2244 2244
2245 2245 def txnclosehook(tr2):
2246 2246 """To be run if transaction is successful, will schedule a hook run
2247 2247 """
2248 2248 # Don't reference tr2 in hook() so we don't hold a reference.
2249 2249 # This reduces memory consumption when there are multiple
2250 2250 # transactions per lock. This can likely go away if issue5045
2251 2251 # fixes the function accumulation.
2252 2252 hookargs = tr2.hookargs
2253 2253
2254 2254 def hookfunc(unused_success):
2255 2255 repo = reporef()
2256 2256 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2257 2257 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2258 2258 for name, (old, new) in bmchanges:
2259 2259 args = tr.hookargs.copy()
2260 2260 args.update(bookmarks.preparehookargs(name, old, new))
2261 2261 repo.hook(
2262 2262 b'txnclose-bookmark',
2263 2263 throw=False,
2264 2264 **pycompat.strkwargs(args)
2265 2265 )
2266 2266
2267 2267 if hook.hashook(repo.ui, b'txnclose-phase'):
2268 2268 cl = repo.unfiltered().changelog
2269 2269 phasemv = sorted(tr.changes[b'phases'].items())
2270 2270 for rev, (old, new) in phasemv:
2271 2271 args = tr.hookargs.copy()
2272 2272 node = hex(cl.node(rev))
2273 2273 args.update(phases.preparehookargs(node, old, new))
2274 2274 repo.hook(
2275 2275 b'txnclose-phase',
2276 2276 throw=False,
2277 2277 **pycompat.strkwargs(args)
2278 2278 )
2279 2279
2280 2280 repo.hook(
2281 2281 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2282 2282 )
2283 2283
2284 2284 reporef()._afterlock(hookfunc)
2285 2285
2286 2286 tr.addfinalize(b'txnclose-hook', txnclosehook)
2287 2287 # Include a leading "-" to make it happen before the transaction summary
2288 2288 # reports registered via scmutil.registersummarycallback() whose names
2289 2289 # are 00-txnreport etc. That way, the caches will be warm when the
2290 2290 # callbacks run.
2291 2291 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2292 2292
2293 2293 def txnaborthook(tr2):
2294 2294 """To be run if transaction is aborted
2295 2295 """
2296 2296 reporef().hook(
2297 2297 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2298 2298 )
2299 2299
2300 2300 tr.addabort(b'txnabort-hook', txnaborthook)
2301 2301 # avoid eager cache invalidation. in-memory data should be identical
2302 2302 # to stored data if transaction has no error.
2303 2303 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2304 2304 self._transref = weakref.ref(tr)
2305 2305 scmutil.registersummarycallback(self, tr, desc)
2306 2306 return tr
2307 2307
2308 2308 def _journalfiles(self):
2309 2309 return (
2310 2310 (self.svfs, b'journal'),
2311 2311 (self.svfs, b'journal.narrowspec'),
2312 2312 (self.vfs, b'journal.narrowspec.dirstate'),
2313 2313 (self.vfs, b'journal.dirstate'),
2314 2314 (self.vfs, b'journal.branch'),
2315 2315 (self.vfs, b'journal.desc'),
2316 2316 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2317 2317 (self.svfs, b'journal.phaseroots'),
2318 2318 )
2319 2319
2320 2320 def undofiles(self):
2321 2321 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2322 2322
2323 2323 @unfilteredmethod
2324 2324 def _writejournal(self, desc):
2325 2325 self.dirstate.savebackup(None, b'journal.dirstate')
2326 2326 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2327 2327 narrowspec.savebackup(self, b'journal.narrowspec')
2328 2328 self.vfs.write(
2329 2329 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2330 2330 )
2331 2331 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2332 2332 bookmarksvfs = bookmarks.bookmarksvfs(self)
2333 2333 bookmarksvfs.write(
2334 2334 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2335 2335 )
2336 2336 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2337 2337
2338 2338 def recover(self):
2339 2339 with self.lock():
2340 2340 if self.svfs.exists(b"journal"):
2341 2341 self.ui.status(_(b"rolling back interrupted transaction\n"))
2342 2342 vfsmap = {
2343 2343 b'': self.svfs,
2344 2344 b'plain': self.vfs,
2345 2345 }
2346 2346 transaction.rollback(
2347 2347 self.svfs,
2348 2348 vfsmap,
2349 2349 b"journal",
2350 2350 self.ui.warn,
2351 2351 checkambigfiles=_cachedfiles,
2352 2352 )
2353 2353 self.invalidate()
2354 2354 return True
2355 2355 else:
2356 2356 self.ui.warn(_(b"no interrupted transaction available\n"))
2357 2357 return False
2358 2358
2359 2359 def rollback(self, dryrun=False, force=False):
2360 2360 wlock = lock = dsguard = None
2361 2361 try:
2362 2362 wlock = self.wlock()
2363 2363 lock = self.lock()
2364 2364 if self.svfs.exists(b"undo"):
2365 2365 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2366 2366
2367 2367 return self._rollback(dryrun, force, dsguard)
2368 2368 else:
2369 2369 self.ui.warn(_(b"no rollback information available\n"))
2370 2370 return 1
2371 2371 finally:
2372 2372 release(dsguard, lock, wlock)
2373 2373
2374 2374 @unfilteredmethod # Until we get smarter cache management
2375 2375 def _rollback(self, dryrun, force, dsguard):
2376 2376 ui = self.ui
2377 2377 try:
2378 2378 args = self.vfs.read(b'undo.desc').splitlines()
2379 2379 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2380 2380 if len(args) >= 3:
2381 2381 detail = args[2]
2382 2382 oldtip = oldlen - 1
2383 2383
2384 2384 if detail and ui.verbose:
2385 2385 msg = _(
2386 2386 b'repository tip rolled back to revision %d'
2387 2387 b' (undo %s: %s)\n'
2388 2388 ) % (oldtip, desc, detail)
2389 2389 else:
2390 2390 msg = _(
2391 2391 b'repository tip rolled back to revision %d (undo %s)\n'
2392 2392 ) % (oldtip, desc)
2393 2393 except IOError:
2394 2394 msg = _(b'rolling back unknown transaction\n')
2395 2395 desc = None
2396 2396
2397 2397 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2398 2398 raise error.Abort(
2399 2399 _(
2400 2400 b'rollback of last commit while not checked out '
2401 2401 b'may lose data'
2402 2402 ),
2403 2403 hint=_(b'use -f to force'),
2404 2404 )
2405 2405
2406 2406 ui.status(msg)
2407 2407 if dryrun:
2408 2408 return 0
2409 2409
2410 2410 parents = self.dirstate.parents()
2411 2411 self.destroying()
2412 2412 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2413 2413 transaction.rollback(
2414 2414 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2415 2415 )
2416 2416 bookmarksvfs = bookmarks.bookmarksvfs(self)
2417 2417 if bookmarksvfs.exists(b'undo.bookmarks'):
2418 2418 bookmarksvfs.rename(
2419 2419 b'undo.bookmarks', b'bookmarks', checkambig=True
2420 2420 )
2421 2421 if self.svfs.exists(b'undo.phaseroots'):
2422 2422 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2423 2423 self.invalidate()
2424 2424
2425 2425 has_node = self.changelog.index.has_node
2426 2426 parentgone = any(not has_node(p) for p in parents)
2427 2427 if parentgone:
2428 2428 # prevent dirstateguard from overwriting already restored one
2429 2429 dsguard.close()
2430 2430
2431 2431 narrowspec.restorebackup(self, b'undo.narrowspec')
2432 2432 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2433 2433 self.dirstate.restorebackup(None, b'undo.dirstate')
2434 2434 try:
2435 2435 branch = self.vfs.read(b'undo.branch')
2436 2436 self.dirstate.setbranch(encoding.tolocal(branch))
2437 2437 except IOError:
2438 2438 ui.warn(
2439 2439 _(
2440 2440 b'named branch could not be reset: '
2441 2441 b'current branch is still \'%s\'\n'
2442 2442 )
2443 2443 % self.dirstate.branch()
2444 2444 )
2445 2445
2446 2446 parents = tuple([p.rev() for p in self[None].parents()])
2447 2447 if len(parents) > 1:
2448 2448 ui.status(
2449 2449 _(
2450 2450 b'working directory now based on '
2451 2451 b'revisions %d and %d\n'
2452 2452 )
2453 2453 % parents
2454 2454 )
2455 2455 else:
2456 2456 ui.status(
2457 2457 _(b'working directory now based on revision %d\n') % parents
2458 2458 )
2459 2459 mergemod.mergestate.clean(self, self[b'.'].node())
2460 2460
2461 2461 # TODO: if we know which new heads may result from this rollback, pass
2462 2462 # them to destroy(), which will prevent the branchhead cache from being
2463 2463 # invalidated.
2464 2464 self.destroyed()
2465 2465 return 0
2466 2466
2467 2467 def _buildcacheupdater(self, newtransaction):
2468 2468 """called during transaction to build the callback updating cache
2469 2469
2470 2470 Lives on the repository to help extension who might want to augment
2471 2471 this logic. For this purpose, the created transaction is passed to the
2472 2472 method.
2473 2473 """
2474 2474 # we must avoid cyclic reference between repo and transaction.
2475 2475 reporef = weakref.ref(self)
2476 2476
2477 2477 def updater(tr):
2478 2478 repo = reporef()
2479 2479 repo.updatecaches(tr)
2480 2480
2481 2481 return updater
2482 2482
2483 2483 @unfilteredmethod
2484 2484 def updatecaches(self, tr=None, full=False):
2485 2485 """warm appropriate caches
2486 2486
2487 2487 If this function is called after a transaction closed. The transaction
2488 2488 will be available in the 'tr' argument. This can be used to selectively
2489 2489 update caches relevant to the changes in that transaction.
2490 2490
2491 2491 If 'full' is set, make sure all caches the function knows about have
2492 2492 up-to-date data. Even the ones usually loaded more lazily.
2493 2493 """
2494 2494 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2495 2495 # During strip, many caches are invalid but
2496 2496 # later call to `destroyed` will refresh them.
2497 2497 return
2498 2498
2499 2499 if tr is None or tr.changes[b'origrepolen'] < len(self):
2500 2500 # accessing the 'ser ved' branchmap should refresh all the others,
2501 2501 self.ui.debug(b'updating the branch cache\n')
2502 2502 self.filtered(b'served').branchmap()
2503 2503 self.filtered(b'served.hidden').branchmap()
2504 2504
2505 2505 if full:
2506 2506 unfi = self.unfiltered()
2507 2507 rbc = unfi.revbranchcache()
2508 2508 for r in unfi.changelog:
2509 2509 rbc.branchinfo(r)
2510 2510 rbc.write()
2511 2511
2512 2512 # ensure the working copy parents are in the manifestfulltextcache
2513 2513 for ctx in self[b'.'].parents():
2514 2514 ctx.manifest() # accessing the manifest is enough
2515 2515
2516 2516 # accessing fnode cache warms the cache
2517 2517 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2518 2518 # accessing tags warm the cache
2519 2519 self.tags()
2520 2520 self.filtered(b'served').tags()
2521 2521
2522 2522 # The `full` arg is documented as updating even the lazily-loaded
2523 2523 # caches immediately, so we're forcing a write to cause these caches
2524 2524 # to be warmed up even if they haven't explicitly been requested
2525 2525 # yet (if they've never been used by hg, they won't ever have been
2526 2526 # written, even if they're a subset of another kind of cache that
2527 2527 # *has* been used).
2528 2528 for filt in repoview.filtertable.keys():
2529 2529 filtered = self.filtered(filt)
2530 2530 filtered.branchmap().write(filtered)
2531 2531
2532 2532 def invalidatecaches(self):
2533 2533
2534 2534 if '_tagscache' in vars(self):
2535 2535 # can't use delattr on proxy
2536 2536 del self.__dict__['_tagscache']
2537 2537
2538 2538 self._branchcaches.clear()
2539 2539 self.invalidatevolatilesets()
2540 2540 self._sparsesignaturecache.clear()
2541 2541
2542 2542 def invalidatevolatilesets(self):
2543 2543 self.filteredrevcache.clear()
2544 2544 obsolete.clearobscaches(self)
2545 2545 self._quick_access_changeid_invalidate()
2546 2546
2547 2547 def invalidatedirstate(self):
2548 2548 '''Invalidates the dirstate, causing the next call to dirstate
2549 2549 to check if it was modified since the last time it was read,
2550 2550 rereading it if it has.
2551 2551
2552 2552 This is different to dirstate.invalidate() that it doesn't always
2553 2553 rereads the dirstate. Use dirstate.invalidate() if you want to
2554 2554 explicitly read the dirstate again (i.e. restoring it to a previous
2555 2555 known good state).'''
2556 2556 if hasunfilteredcache(self, 'dirstate'):
2557 2557 for k in self.dirstate._filecache:
2558 2558 try:
2559 2559 delattr(self.dirstate, k)
2560 2560 except AttributeError:
2561 2561 pass
2562 2562 delattr(self.unfiltered(), 'dirstate')
2563 2563
2564 2564 def invalidate(self, clearfilecache=False):
2565 2565 '''Invalidates both store and non-store parts other than dirstate
2566 2566
2567 2567 If a transaction is running, invalidation of store is omitted,
2568 2568 because discarding in-memory changes might cause inconsistency
2569 2569 (e.g. incomplete fncache causes unintentional failure, but
2570 2570 redundant one doesn't).
2571 2571 '''
2572 2572 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2573 2573 for k in list(self._filecache.keys()):
2574 2574 # dirstate is invalidated separately in invalidatedirstate()
2575 2575 if k == b'dirstate':
2576 2576 continue
2577 2577 if (
2578 2578 k == b'changelog'
2579 2579 and self.currenttransaction()
2580 2580 and self.changelog._delayed
2581 2581 ):
2582 2582 # The changelog object may store unwritten revisions. We don't
2583 2583 # want to lose them.
2584 2584 # TODO: Solve the problem instead of working around it.
2585 2585 continue
2586 2586
2587 2587 if clearfilecache:
2588 2588 del self._filecache[k]
2589 2589 try:
2590 2590 delattr(unfiltered, k)
2591 2591 except AttributeError:
2592 2592 pass
2593 2593 self.invalidatecaches()
2594 2594 if not self.currenttransaction():
2595 2595 # TODO: Changing contents of store outside transaction
2596 2596 # causes inconsistency. We should make in-memory store
2597 2597 # changes detectable, and abort if changed.
2598 2598 self.store.invalidatecaches()
2599 2599
2600 2600 def invalidateall(self):
2601 2601 '''Fully invalidates both store and non-store parts, causing the
2602 2602 subsequent operation to reread any outside changes.'''
2603 2603 # extension should hook this to invalidate its caches
2604 2604 self.invalidate()
2605 2605 self.invalidatedirstate()
2606 2606
2607 2607 @unfilteredmethod
2608 2608 def _refreshfilecachestats(self, tr):
2609 2609 """Reload stats of cached files so that they are flagged as valid"""
2610 2610 for k, ce in self._filecache.items():
2611 2611 k = pycompat.sysstr(k)
2612 2612 if k == 'dirstate' or k not in self.__dict__:
2613 2613 continue
2614 2614 ce.refresh()
2615 2615
2616 2616 def _lock(
2617 2617 self,
2618 2618 vfs,
2619 2619 lockname,
2620 2620 wait,
2621 2621 releasefn,
2622 2622 acquirefn,
2623 2623 desc,
2624 2624 inheritchecker=None,
2625 2625 parentenvvar=None,
2626 2626 ):
2627 2627 parentlock = None
2628 2628 # the contents of parentenvvar are used by the underlying lock to
2629 2629 # determine whether it can be inherited
2630 2630 if parentenvvar is not None:
2631 2631 parentlock = encoding.environ.get(parentenvvar)
2632 2632
2633 2633 timeout = 0
2634 2634 warntimeout = 0
2635 2635 if wait:
2636 2636 timeout = self.ui.configint(b"ui", b"timeout")
2637 2637 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2638 2638 # internal config: ui.signal-safe-lock
2639 2639 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2640 2640
2641 2641 l = lockmod.trylock(
2642 2642 self.ui,
2643 2643 vfs,
2644 2644 lockname,
2645 2645 timeout,
2646 2646 warntimeout,
2647 2647 releasefn=releasefn,
2648 2648 acquirefn=acquirefn,
2649 2649 desc=desc,
2650 2650 inheritchecker=inheritchecker,
2651 2651 parentlock=parentlock,
2652 2652 signalsafe=signalsafe,
2653 2653 )
2654 2654 return l
2655 2655
2656 2656 def _afterlock(self, callback):
2657 2657 """add a callback to be run when the repository is fully unlocked
2658 2658
2659 2659 The callback will be executed when the outermost lock is released
2660 2660 (with wlock being higher level than 'lock')."""
2661 2661 for ref in (self._wlockref, self._lockref):
2662 2662 l = ref and ref()
2663 2663 if l and l.held:
2664 2664 l.postrelease.append(callback)
2665 2665 break
2666 2666 else: # no lock have been found.
2667 2667 callback(True)
2668 2668
2669 2669 def lock(self, wait=True):
2670 2670 '''Lock the repository store (.hg/store) and return a weak reference
2671 2671 to the lock. Use this before modifying the store (e.g. committing or
2672 2672 stripping). If you are opening a transaction, get a lock as well.)
2673 2673
2674 2674 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2675 2675 'wlock' first to avoid a dead-lock hazard.'''
2676 2676 l = self._currentlock(self._lockref)
2677 2677 if l is not None:
2678 2678 l.lock()
2679 2679 return l
2680 2680
2681 2681 l = self._lock(
2682 2682 vfs=self.svfs,
2683 2683 lockname=b"lock",
2684 2684 wait=wait,
2685 2685 releasefn=None,
2686 2686 acquirefn=self.invalidate,
2687 2687 desc=_(b'repository %s') % self.origroot,
2688 2688 )
2689 2689 self._lockref = weakref.ref(l)
2690 2690 return l
2691 2691
2692 2692 def _wlockchecktransaction(self):
2693 2693 if self.currenttransaction() is not None:
2694 2694 raise error.LockInheritanceContractViolation(
2695 2695 b'wlock cannot be inherited in the middle of a transaction'
2696 2696 )
2697 2697
2698 2698 def wlock(self, wait=True):
2699 2699 '''Lock the non-store parts of the repository (everything under
2700 2700 .hg except .hg/store) and return a weak reference to the lock.
2701 2701
2702 2702 Use this before modifying files in .hg.
2703 2703
2704 2704 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2705 2705 'wlock' first to avoid a dead-lock hazard.'''
2706 2706 l = self._wlockref and self._wlockref()
2707 2707 if l is not None and l.held:
2708 2708 l.lock()
2709 2709 return l
2710 2710
2711 2711 # We do not need to check for non-waiting lock acquisition. Such
2712 2712 # acquisition would not cause dead-lock as they would just fail.
2713 2713 if wait and (
2714 2714 self.ui.configbool(b'devel', b'all-warnings')
2715 2715 or self.ui.configbool(b'devel', b'check-locks')
2716 2716 ):
2717 2717 if self._currentlock(self._lockref) is not None:
2718 2718 self.ui.develwarn(b'"wlock" acquired after "lock"')
2719 2719
2720 2720 def unlock():
2721 2721 if self.dirstate.pendingparentchange():
2722 2722 self.dirstate.invalidate()
2723 2723 else:
2724 2724 self.dirstate.write(None)
2725 2725
2726 2726 self._filecache[b'dirstate'].refresh()
2727 2727
2728 2728 l = self._lock(
2729 2729 self.vfs,
2730 2730 b"wlock",
2731 2731 wait,
2732 2732 unlock,
2733 2733 self.invalidatedirstate,
2734 2734 _(b'working directory of %s') % self.origroot,
2735 2735 inheritchecker=self._wlockchecktransaction,
2736 2736 parentenvvar=b'HG_WLOCK_LOCKER',
2737 2737 )
2738 2738 self._wlockref = weakref.ref(l)
2739 2739 return l
2740 2740
2741 2741 def _currentlock(self, lockref):
2742 2742 """Returns the lock if it's held, or None if it's not."""
2743 2743 if lockref is None:
2744 2744 return None
2745 2745 l = lockref()
2746 2746 if l is None or not l.held:
2747 2747 return None
2748 2748 return l
2749 2749
2750 2750 def currentwlock(self):
2751 2751 """Returns the wlock if it's held, or None if it's not."""
2752 2752 return self._currentlock(self._wlockref)
2753 2753
2754 2754 def _filecommit(
2755 2755 self,
2756 2756 fctx,
2757 2757 manifest1,
2758 2758 manifest2,
2759 2759 linkrev,
2760 2760 tr,
2761 2761 changelist,
2762 2762 includecopymeta,
2763 2763 ):
2764 2764 """
2765 2765 commit an individual file as part of a larger transaction
2766 2766 """
2767 2767
2768 2768 fname = fctx.path()
2769 2769 fparent1 = manifest1.get(fname, nullid)
2770 2770 fparent2 = manifest2.get(fname, nullid)
2771 2771 if isinstance(fctx, context.filectx):
2772 2772 node = fctx.filenode()
2773 2773 if node in [fparent1, fparent2]:
2774 2774 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2775 2775 if (
2776 2776 fparent1 != nullid
2777 2777 and manifest1.flags(fname) != fctx.flags()
2778 2778 ) or (
2779 2779 fparent2 != nullid
2780 2780 and manifest2.flags(fname) != fctx.flags()
2781 2781 ):
2782 2782 changelist.append(fname)
2783 2783 return node
2784 2784
2785 2785 flog = self.file(fname)
2786 2786 meta = {}
2787 2787 cfname = fctx.copysource()
2788 2788 if cfname and cfname != fname:
2789 2789 # Mark the new revision of this file as a copy of another
2790 2790 # file. This copy data will effectively act as a parent
2791 2791 # of this new revision. If this is a merge, the first
2792 2792 # parent will be the nullid (meaning "look up the copy data")
2793 2793 # and the second one will be the other parent. For example:
2794 2794 #
2795 2795 # 0 --- 1 --- 3 rev1 changes file foo
2796 2796 # \ / rev2 renames foo to bar and changes it
2797 2797 # \- 2 -/ rev3 should have bar with all changes and
2798 2798 # should record that bar descends from
2799 2799 # bar in rev2 and foo in rev1
2800 2800 #
2801 2801 # this allows this merge to succeed:
2802 2802 #
2803 2803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2804 2804 # \ / merging rev3 and rev4 should use bar@rev2
2805 2805 # \- 2 --- 4 as the merge base
2806 2806 #
2807 2807
2808 2808 cnode = manifest1.get(cfname)
2809 2809 newfparent = fparent2
2810 2810
2811 2811 if manifest2: # branch merge
2812 2812 if fparent2 == nullid or cnode is None: # copied on remote side
2813 2813 if cfname in manifest2:
2814 2814 cnode = manifest2[cfname]
2815 2815 newfparent = fparent1
2816 2816
2817 2817 # Here, we used to search backwards through history to try to find
2818 2818 # where the file copy came from if the source of a copy was not in
2819 2819 # the parent directory. However, this doesn't actually make sense to
2820 2820 # do (what does a copy from something not in your working copy even
2821 2821 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2822 2822 # the user that copy information was dropped, so if they didn't
2823 2823 # expect this outcome it can be fixed, but this is the correct
2824 2824 # behavior in this circumstance.
2825 2825
2826 2826 if cnode:
2827 2827 self.ui.debug(
2828 2828 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2829 2829 )
2830 2830 if includecopymeta:
2831 2831 meta[b"copy"] = cfname
2832 2832 meta[b"copyrev"] = hex(cnode)
2833 2833 fparent1, fparent2 = nullid, newfparent
2834 2834 else:
2835 2835 self.ui.warn(
2836 2836 _(
2837 2837 b"warning: can't find ancestor for '%s' "
2838 2838 b"copied from '%s'!\n"
2839 2839 )
2840 2840 % (fname, cfname)
2841 2841 )
2842 2842
2843 2843 elif fparent1 == nullid:
2844 2844 fparent1, fparent2 = fparent2, nullid
2845 2845 elif fparent2 != nullid:
2846 2846 # is one parent an ancestor of the other?
2847 2847 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2848 2848 if fparent1 in fparentancestors:
2849 2849 fparent1, fparent2 = fparent2, nullid
2850 2850 elif fparent2 in fparentancestors:
2851 2851 fparent2 = nullid
2852 2852
2853 2853 # is the file changed?
2854 2854 text = fctx.data()
2855 2855 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2856 2856 changelist.append(fname)
2857 2857 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2858 2858 # are just the flags changed during merge?
2859 2859 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2860 2860 changelist.append(fname)
2861 2861
2862 2862 return fparent1
2863 2863
2864 2864 def checkcommitpatterns(self, wctx, match, status, fail):
2865 2865 """check for commit arguments that aren't committable"""
2866 2866 if match.isexact() or match.prefix():
2867 2867 matched = set(status.modified + status.added + status.removed)
2868 2868
2869 2869 for f in match.files():
2870 2870 f = self.dirstate.normalize(f)
2871 2871 if f == b'.' or f in matched or f in wctx.substate:
2872 2872 continue
2873 2873 if f in status.deleted:
2874 2874 fail(f, _(b'file not found!'))
2875 2875 # Is it a directory that exists or used to exist?
2876 2876 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2877 2877 d = f + b'/'
2878 2878 for mf in matched:
2879 2879 if mf.startswith(d):
2880 2880 break
2881 2881 else:
2882 2882 fail(f, _(b"no match under directory!"))
2883 2883 elif f not in self.dirstate:
2884 2884 fail(f, _(b"file not tracked!"))
2885 2885
2886 2886 @unfilteredmethod
2887 2887 def commit(
2888 2888 self,
2889 2889 text=b"",
2890 2890 user=None,
2891 2891 date=None,
2892 2892 match=None,
2893 2893 force=False,
2894 2894 editor=None,
2895 2895 extra=None,
2896 2896 ):
2897 2897 """Add a new revision to current repository.
2898 2898
2899 2899 Revision information is gathered from the working directory,
2900 2900 match can be used to filter the committed files. If editor is
2901 2901 supplied, it is called to get a commit message.
2902 2902 """
2903 2903 if extra is None:
2904 2904 extra = {}
2905 2905
2906 2906 def fail(f, msg):
2907 2907 raise error.Abort(b'%s: %s' % (f, msg))
2908 2908
2909 2909 if not match:
2910 2910 match = matchmod.always()
2911 2911
2912 2912 if not force:
2913 2913 match.bad = fail
2914 2914
2915 2915 # lock() for recent changelog (see issue4368)
2916 2916 with self.wlock(), self.lock():
2917 2917 wctx = self[None]
2918 2918 merge = len(wctx.parents()) > 1
2919 2919
2920 2920 if not force and merge and not match.always():
2921 2921 raise error.Abort(
2922 2922 _(
2923 2923 b'cannot partially commit a merge '
2924 2924 b'(do not specify files or patterns)'
2925 2925 )
2926 2926 )
2927 2927
2928 2928 status = self.status(match=match, clean=force)
2929 2929 if force:
2930 2930 status.modified.extend(
2931 2931 status.clean
2932 2932 ) # mq may commit clean files
2933 2933
2934 2934 # check subrepos
2935 2935 subs, commitsubs, newstate = subrepoutil.precommit(
2936 2936 self.ui, wctx, status, match, force=force
2937 2937 )
2938 2938
2939 2939 # make sure all explicit patterns are matched
2940 2940 if not force:
2941 2941 self.checkcommitpatterns(wctx, match, status, fail)
2942 2942
2943 2943 cctx = context.workingcommitctx(
2944 2944 self, status, text, user, date, extra
2945 2945 )
2946 2946
2947 2947 # internal config: ui.allowemptycommit
2948 2948 allowemptycommit = (
2949 2949 wctx.branch() != wctx.p1().branch()
2950 2950 or extra.get(b'close')
2951 2951 or merge
2952 2952 or cctx.files()
2953 2953 or self.ui.configbool(b'ui', b'allowemptycommit')
2954 2954 )
2955 2955 if not allowemptycommit:
2956 2956 return None
2957 2957
2958 2958 if merge and cctx.deleted():
2959 2959 raise error.Abort(_(b"cannot commit merge with missing files"))
2960 2960
2961 2961 ms = mergemod.mergestate.read(self)
2962 2962 mergeutil.checkunresolved(ms)
2963 2963
2964 2964 if editor:
2965 2965 cctx._text = editor(self, cctx, subs)
2966 2966 edited = text != cctx._text
2967 2967
2968 2968 # Save commit message in case this transaction gets rolled back
2969 2969 # (e.g. by a pretxncommit hook). Leave the content alone on
2970 2970 # the assumption that the user will use the same editor again.
2971 2971 msgfn = self.savecommitmessage(cctx._text)
2972 2972
2973 2973 # commit subs and write new state
2974 2974 if subs:
2975 2975 uipathfn = scmutil.getuipathfn(self)
2976 2976 for s in sorted(commitsubs):
2977 2977 sub = wctx.sub(s)
2978 2978 self.ui.status(
2979 2979 _(b'committing subrepository %s\n')
2980 2980 % uipathfn(subrepoutil.subrelpath(sub))
2981 2981 )
2982 2982 sr = sub.commit(cctx._text, user, date)
2983 2983 newstate[s] = (newstate[s][0], sr)
2984 2984 subrepoutil.writestate(self, newstate)
2985 2985
2986 2986 p1, p2 = self.dirstate.parents()
2987 2987 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2988 2988 try:
2989 2989 self.hook(
2990 2990 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2991 2991 )
2992 2992 with self.transaction(b'commit'):
2993 2993 ret = self.commitctx(cctx, True)
2994 2994 # update bookmarks, dirstate and mergestate
2995 2995 bookmarks.update(self, [p1, p2], ret)
2996 2996 cctx.markcommitted(ret)
2997 2997 ms.reset()
2998 2998 except: # re-raises
2999 2999 if edited:
3000 3000 self.ui.write(
3001 3001 _(b'note: commit message saved in %s\n') % msgfn
3002 3002 )
3003 3003 raise
3004 3004
3005 3005 def commithook(unused_success):
3006 3006 # hack for command that use a temporary commit (eg: histedit)
3007 3007 # temporary commit got stripped before hook release
3008 3008 if self.changelog.hasnode(ret):
3009 3009 self.hook(
3010 3010 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3011 3011 )
3012 3012
3013 3013 self._afterlock(commithook)
3014 3014 return ret
3015 3015
3016 3016 @unfilteredmethod
3017 3017 def commitctx(self, ctx, error=False, origctx=None):
3018 3018 """Add a new revision to current repository.
3019 3019 Revision information is passed via the context argument.
3020 3020
3021 3021 ctx.files() should list all files involved in this commit, i.e.
3022 3022 modified/added/removed files. On merge, it may be wider than the
3023 3023 ctx.files() to be committed, since any file nodes derived directly
3024 3024 from p1 or p2 are excluded from the committed ctx.files().
3025 3025
3026 3026 origctx is for convert to work around the problem that bug
3027 3027 fixes to the files list in changesets change hashes. For
3028 3028 convert to be the identity, it can pass an origctx and this
3029 3029 function will use the same files list when it makes sense to
3030 3030 do so.
3031 3031 """
3032 3032
3033 3033 p1, p2 = ctx.p1(), ctx.p2()
3034 3034 user = ctx.user()
3035 3035
3036 3036 if self.filecopiesmode == b'changeset-sidedata':
3037 3037 writechangesetcopy = True
3038 3038 writefilecopymeta = True
3039 3039 writecopiesto = None
3040 3040 else:
3041 3041 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3042 3042 writefilecopymeta = writecopiesto != b'changeset-only'
3043 3043 writechangesetcopy = writecopiesto in (
3044 3044 b'changeset-only',
3045 3045 b'compatibility',
3046 3046 )
3047 3047 p1copies, p2copies = None, None
3048 3048 if writechangesetcopy:
3049 3049 p1copies = ctx.p1copies()
3050 3050 p2copies = ctx.p2copies()
3051 3051 filesadded, filesremoved = None, None
3052 3052 with self.lock(), self.transaction(b"commit") as tr:
3053 3053 trp = weakref.proxy(tr)
3054 3054
3055 3055 if ctx.manifestnode():
3056 3056 # reuse an existing manifest revision
3057 3057 self.ui.debug(b'reusing known manifest\n')
3058 3058 mn = ctx.manifestnode()
3059 3059 files = ctx.files()
3060 3060 if writechangesetcopy:
3061 3061 filesadded = ctx.filesadded()
3062 3062 filesremoved = ctx.filesremoved()
3063 3063 elif ctx.files():
3064 3064 m1ctx = p1.manifestctx()
3065 3065 m2ctx = p2.manifestctx()
3066 3066 mctx = m1ctx.copy()
3067 3067
3068 3068 m = mctx.read()
3069 3069 m1 = m1ctx.read()
3070 3070 m2 = m2ctx.read()
3071 3071
3072 3072 # check in files
3073 3073 added = []
3074 3074 changed = []
3075 3075 removed = list(ctx.removed())
3076 3076 linkrev = len(self)
3077 3077 self.ui.note(_(b"committing files:\n"))
3078 3078 uipathfn = scmutil.getuipathfn(self)
3079 3079 for f in sorted(ctx.modified() + ctx.added()):
3080 3080 self.ui.note(uipathfn(f) + b"\n")
3081 3081 try:
3082 3082 fctx = ctx[f]
3083 3083 if fctx is None:
3084 3084 removed.append(f)
3085 3085 else:
3086 3086 added.append(f)
3087 3087 m[f] = self._filecommit(
3088 3088 fctx,
3089 3089 m1,
3090 3090 m2,
3091 3091 linkrev,
3092 3092 trp,
3093 3093 changed,
3094 3094 writefilecopymeta,
3095 3095 )
3096 3096 m.setflag(f, fctx.flags())
3097 3097 except OSError:
3098 3098 self.ui.warn(
3099 3099 _(b"trouble committing %s!\n") % uipathfn(f)
3100 3100 )
3101 3101 raise
3102 3102 except IOError as inst:
3103 3103 errcode = getattr(inst, 'errno', errno.ENOENT)
3104 3104 if error or errcode and errcode != errno.ENOENT:
3105 3105 self.ui.warn(
3106 3106 _(b"trouble committing %s!\n") % uipathfn(f)
3107 3107 )
3108 3108 raise
3109 3109
3110 3110 # update manifest
3111 3111 removed = [f for f in removed if f in m1 or f in m2]
3112 3112 drop = sorted([f for f in removed if f in m])
3113 3113 for f in drop:
3114 3114 del m[f]
3115 3115 if p2.rev() != nullrev:
3116 3116
3117 3117 @util.cachefunc
3118 3118 def mas():
3119 3119 p1n = p1.node()
3120 3120 p2n = p2.node()
3121 3121 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3122 3122 if not cahs:
3123 3123 cahs = [nullrev]
3124 3124 return [self[r].manifest() for r in cahs]
3125 3125
3126 3126 def deletionfromparent(f):
3127 3127 # When a file is removed relative to p1 in a merge, this
3128 3128 # function determines whether the absence is due to a
3129 3129 # deletion from a parent, or whether the merge commit
3130 3130 # itself deletes the file. We decide this by doing a
3131 3131 # simplified three way merge of the manifest entry for
3132 3132 # the file. There are two ways we decide the merge
3133 3133 # itself didn't delete a file:
3134 3134 # - neither parent (nor the merge) contain the file
3135 3135 # - exactly one parent contains the file, and that
3136 3136 # parent has the same filelog entry as the merge
3137 3137 # ancestor (or all of them if there two). In other
3138 3138 # words, that parent left the file unchanged while the
3139 3139 # other one deleted it.
3140 3140 # One way to think about this is that deleting a file is
3141 3141 # similar to emptying it, so the list of changed files
3142 3142 # should be similar either way. The computation
3143 3143 # described above is not done directly in _filecommit
3144 3144 # when creating the list of changed files, however
3145 3145 # it does something very similar by comparing filelog
3146 3146 # nodes.
3147 3147 if f in m1:
3148 3148 return f not in m2 and all(
3149 3149 f in ma and ma.find(f) == m1.find(f)
3150 3150 for ma in mas()
3151 3151 )
3152 3152 elif f in m2:
3153 3153 return all(
3154 3154 f in ma and ma.find(f) == m2.find(f)
3155 3155 for ma in mas()
3156 3156 )
3157 3157 else:
3158 3158 return True
3159 3159
3160 3160 removed = [f for f in removed if not deletionfromparent(f)]
3161 3161
3162 3162 files = changed + removed
3163 3163 md = None
3164 3164 if not files:
3165 3165 # if no "files" actually changed in terms of the changelog,
3166 3166 # try hard to detect unmodified manifest entry so that the
3167 3167 # exact same commit can be reproduced later on convert.
3168 3168 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3169 3169 if not files and md:
3170 3170 self.ui.debug(
3171 3171 b'not reusing manifest (no file change in '
3172 3172 b'changelog, but manifest differs)\n'
3173 3173 )
3174 3174 if files or md:
3175 3175 self.ui.note(_(b"committing manifest\n"))
3176 3176 # we're using narrowmatch here since it's already applied at
3177 3177 # other stages (such as dirstate.walk), so we're already
3178 3178 # ignoring things outside of narrowspec in most cases. The
3179 3179 # one case where we might have files outside the narrowspec
3180 3180 # at this point is merges, and we already error out in the
3181 3181 # case where the merge has files outside of the narrowspec,
3182 3182 # so this is safe.
3183 3183 mn = mctx.write(
3184 3184 trp,
3185 3185 linkrev,
3186 3186 p1.manifestnode(),
3187 3187 p2.manifestnode(),
3188 3188 added,
3189 3189 drop,
3190 3190 match=self.narrowmatch(),
3191 3191 )
3192 3192
3193 3193 if writechangesetcopy:
3194 3194 filesadded = [
3195 3195 f for f in changed if not (f in m1 or f in m2)
3196 3196 ]
3197 3197 filesremoved = removed
3198 3198 else:
3199 3199 self.ui.debug(
3200 3200 b'reusing manifest from p1 (listed files '
3201 3201 b'actually unchanged)\n'
3202 3202 )
3203 3203 mn = p1.manifestnode()
3204 3204 else:
3205 3205 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3206 3206 mn = p1.manifestnode()
3207 3207 files = []
3208 3208
3209 3209 if writecopiesto == b'changeset-only':
3210 3210 # If writing only to changeset extras, use None to indicate that
3211 3211 # no entry should be written. If writing to both, write an empty
3212 3212 # entry to prevent the reader from falling back to reading
3213 3213 # filelogs.
3214 3214 p1copies = p1copies or None
3215 3215 p2copies = p2copies or None
3216 3216 filesadded = filesadded or None
3217 3217 filesremoved = filesremoved or None
3218 3218
3219 3219 if origctx and origctx.manifestnode() == mn:
3220 3220 files = origctx.files()
3221 3221
3222 3222 # update changelog
3223 3223 self.ui.note(_(b"committing changelog\n"))
3224 3224 self.changelog.delayupdate(tr)
3225 3225 n = self.changelog.add(
3226 3226 mn,
3227 3227 files,
3228 3228 ctx.description(),
3229 3229 trp,
3230 3230 p1.node(),
3231 3231 p2.node(),
3232 3232 user,
3233 3233 ctx.date(),
3234 3234 ctx.extra().copy(),
3235 3235 p1copies,
3236 3236 p2copies,
3237 3237 filesadded,
3238 3238 filesremoved,
3239 3239 )
3240 3240 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3241 3241 self.hook(
3242 3242 b'pretxncommit',
3243 3243 throw=True,
3244 3244 node=hex(n),
3245 3245 parent1=xp1,
3246 3246 parent2=xp2,
3247 3247 )
3248 3248 # set the new commit is proper phase
3249 3249 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3250 3250 if targetphase:
3251 3251 # retract boundary do not alter parent changeset.
3252 3252 # if a parent have higher the resulting phase will
3253 3253 # be compliant anyway
3254 3254 #
3255 3255 # if minimal phase was 0 we don't need to retract anything
3256 3256 phases.registernew(self, tr, targetphase, [n])
3257 3257 return n
3258 3258
3259 3259 @unfilteredmethod
3260 3260 def destroying(self):
3261 3261 '''Inform the repository that nodes are about to be destroyed.
3262 3262 Intended for use by strip and rollback, so there's a common
3263 3263 place for anything that has to be done before destroying history.
3264 3264
3265 3265 This is mostly useful for saving state that is in memory and waiting
3266 3266 to be flushed when the current lock is released. Because a call to
3267 3267 destroyed is imminent, the repo will be invalidated causing those
3268 3268 changes to stay in memory (waiting for the next unlock), or vanish
3269 3269 completely.
3270 3270 '''
3271 3271 # When using the same lock to commit and strip, the phasecache is left
3272 3272 # dirty after committing. Then when we strip, the repo is invalidated,
3273 3273 # causing those changes to disappear.
3274 3274 if '_phasecache' in vars(self):
3275 3275 self._phasecache.write()
3276 3276
3277 3277 @unfilteredmethod
3278 3278 def destroyed(self):
3279 3279 '''Inform the repository that nodes have been destroyed.
3280 3280 Intended for use by strip and rollback, so there's a common
3281 3281 place for anything that has to be done after destroying history.
3282 3282 '''
3283 3283 # When one tries to:
3284 3284 # 1) destroy nodes thus calling this method (e.g. strip)
3285 3285 # 2) use phasecache somewhere (e.g. commit)
3286 3286 #
3287 3287 # then 2) will fail because the phasecache contains nodes that were
3288 3288 # removed. We can either remove phasecache from the filecache,
3289 3289 # causing it to reload next time it is accessed, or simply filter
3290 3290 # the removed nodes now and write the updated cache.
3291 3291 self._phasecache.filterunknown(self)
3292 3292 self._phasecache.write()
3293 3293
3294 3294 # refresh all repository caches
3295 3295 self.updatecaches()
3296 3296
3297 3297 # Ensure the persistent tag cache is updated. Doing it now
3298 3298 # means that the tag cache only has to worry about destroyed
3299 3299 # heads immediately after a strip/rollback. That in turn
3300 3300 # guarantees that "cachetip == currenttip" (comparing both rev
3301 3301 # and node) always means no nodes have been added or destroyed.
3302 3302
3303 3303 # XXX this is suboptimal when qrefresh'ing: we strip the current
3304 3304 # head, refresh the tag cache, then immediately add a new head.
3305 3305 # But I think doing it this way is necessary for the "instant
3306 3306 # tag cache retrieval" case to work.
3307 3307 self.invalidate()
3308 3308
3309 3309 def status(
3310 3310 self,
3311 3311 node1=b'.',
3312 3312 node2=None,
3313 3313 match=None,
3314 3314 ignored=False,
3315 3315 clean=False,
3316 3316 unknown=False,
3317 3317 listsubrepos=False,
3318 3318 ):
3319 3319 '''a convenience method that calls node1.status(node2)'''
3320 3320 return self[node1].status(
3321 3321 node2, match, ignored, clean, unknown, listsubrepos
3322 3322 )
3323 3323
3324 3324 def addpostdsstatus(self, ps):
3325 3325 """Add a callback to run within the wlock, at the point at which status
3326 3326 fixups happen.
3327 3327
3328 3328 On status completion, callback(wctx, status) will be called with the
3329 3329 wlock held, unless the dirstate has changed from underneath or the wlock
3330 3330 couldn't be grabbed.
3331 3331
3332 3332 Callbacks should not capture and use a cached copy of the dirstate --
3333 3333 it might change in the meanwhile. Instead, they should access the
3334 3334 dirstate via wctx.repo().dirstate.
3335 3335
3336 3336 This list is emptied out after each status run -- extensions should
3337 3337 make sure it adds to this list each time dirstate.status is called.
3338 3338 Extensions should also make sure they don't call this for statuses
3339 3339 that don't involve the dirstate.
3340 3340 """
3341 3341
3342 3342 # The list is located here for uniqueness reasons -- it is actually
3343 3343 # managed by the workingctx, but that isn't unique per-repo.
3344 3344 self._postdsstatus.append(ps)
3345 3345
3346 3346 def postdsstatus(self):
3347 3347 """Used by workingctx to get the list of post-dirstate-status hooks."""
3348 3348 return self._postdsstatus
3349 3349
3350 3350 def clearpostdsstatus(self):
3351 3351 """Used by workingctx to clear post-dirstate-status hooks."""
3352 3352 del self._postdsstatus[:]
3353 3353
3354 3354 def heads(self, start=None):
3355 3355 if start is None:
3356 3356 cl = self.changelog
3357 3357 headrevs = reversed(cl.headrevs())
3358 3358 return [cl.node(rev) for rev in headrevs]
3359 3359
3360 3360 heads = self.changelog.heads(start)
3361 3361 # sort the output in rev descending order
3362 3362 return sorted(heads, key=self.changelog.rev, reverse=True)
3363 3363
3364 3364 def branchheads(self, branch=None, start=None, closed=False):
3365 3365 '''return a (possibly filtered) list of heads for the given branch
3366 3366
3367 3367 Heads are returned in topological order, from newest to oldest.
3368 3368 If branch is None, use the dirstate branch.
3369 3369 If start is not None, return only heads reachable from start.
3370 3370 If closed is True, return heads that are marked as closed as well.
3371 3371 '''
3372 3372 if branch is None:
3373 3373 branch = self[None].branch()
3374 3374 branches = self.branchmap()
3375 3375 if not branches.hasbranch(branch):
3376 3376 return []
3377 3377 # the cache returns heads ordered lowest to highest
3378 3378 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3379 3379 if start is not None:
3380 3380 # filter out the heads that cannot be reached from startrev
3381 3381 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3382 3382 bheads = [h for h in bheads if h in fbheads]
3383 3383 return bheads
3384 3384
3385 3385 def branches(self, nodes):
3386 3386 if not nodes:
3387 3387 nodes = [self.changelog.tip()]
3388 3388 b = []
3389 3389 for n in nodes:
3390 3390 t = n
3391 3391 while True:
3392 3392 p = self.changelog.parents(n)
3393 3393 if p[1] != nullid or p[0] == nullid:
3394 3394 b.append((t, n, p[0], p[1]))
3395 3395 break
3396 3396 n = p[0]
3397 3397 return b
3398 3398
3399 3399 def between(self, pairs):
3400 3400 r = []
3401 3401
3402 3402 for top, bottom in pairs:
3403 3403 n, l, i = top, [], 0
3404 3404 f = 1
3405 3405
3406 3406 while n != bottom and n != nullid:
3407 3407 p = self.changelog.parents(n)[0]
3408 3408 if i == f:
3409 3409 l.append(n)
3410 3410 f = f * 2
3411 3411 n = p
3412 3412 i += 1
3413 3413
3414 3414 r.append(l)
3415 3415
3416 3416 return r
3417 3417
3418 3418 def checkpush(self, pushop):
3419 3419 """Extensions can override this function if additional checks have
3420 3420 to be performed before pushing, or call it if they override push
3421 3421 command.
3422 3422 """
3423 3423
3424 3424 @unfilteredpropertycache
3425 3425 def prepushoutgoinghooks(self):
3426 3426 """Return util.hooks consists of a pushop with repo, remote, outgoing
3427 3427 methods, which are called before pushing changesets.
3428 3428 """
3429 3429 return util.hooks()
3430 3430
3431 3431 def pushkey(self, namespace, key, old, new):
3432 3432 try:
3433 3433 tr = self.currenttransaction()
3434 3434 hookargs = {}
3435 3435 if tr is not None:
3436 3436 hookargs.update(tr.hookargs)
3437 3437 hookargs = pycompat.strkwargs(hookargs)
3438 3438 hookargs['namespace'] = namespace
3439 3439 hookargs['key'] = key
3440 3440 hookargs['old'] = old
3441 3441 hookargs['new'] = new
3442 3442 self.hook(b'prepushkey', throw=True, **hookargs)
3443 3443 except error.HookAbort as exc:
3444 3444 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3445 3445 if exc.hint:
3446 3446 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3447 3447 return False
3448 3448 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3449 3449 ret = pushkey.push(self, namespace, key, old, new)
3450 3450
3451 3451 def runhook(unused_success):
3452 3452 self.hook(
3453 3453 b'pushkey',
3454 3454 namespace=namespace,
3455 3455 key=key,
3456 3456 old=old,
3457 3457 new=new,
3458 3458 ret=ret,
3459 3459 )
3460 3460
3461 3461 self._afterlock(runhook)
3462 3462 return ret
3463 3463
3464 3464 def listkeys(self, namespace):
3465 3465 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3466 3466 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3467 3467 values = pushkey.list(self, namespace)
3468 3468 self.hook(b'listkeys', namespace=namespace, values=values)
3469 3469 return values
3470 3470
3471 3471 def debugwireargs(self, one, two, three=None, four=None, five=None):
3472 3472 '''used to test argument passing over the wire'''
3473 3473 return b"%s %s %s %s %s" % (
3474 3474 one,
3475 3475 two,
3476 3476 pycompat.bytestr(three),
3477 3477 pycompat.bytestr(four),
3478 3478 pycompat.bytestr(five),
3479 3479 )
3480 3480
3481 3481 def savecommitmessage(self, text):
3482 3482 fp = self.vfs(b'last-message.txt', b'wb')
3483 3483 try:
3484 3484 fp.write(text)
3485 3485 finally:
3486 3486 fp.close()
3487 3487 return self.pathto(fp.name[len(self.root) + 1 :])
3488 3488
3489 3489
3490 3490 # used to avoid circular references so destructors work
3491 3491 def aftertrans(files):
3492 3492 renamefiles = [tuple(t) for t in files]
3493 3493
3494 3494 def a():
3495 3495 for vfs, src, dest in renamefiles:
3496 3496 # if src and dest refer to a same file, vfs.rename is a no-op,
3497 3497 # leaving both src and dest on disk. delete dest to make sure
3498 3498 # the rename couldn't be such a no-op.
3499 3499 vfs.tryunlink(dest)
3500 3500 try:
3501 3501 vfs.rename(src, dest)
3502 3502 except OSError: # journal file does not yet exist
3503 3503 pass
3504 3504
3505 3505 return a
3506 3506
3507 3507
3508 3508 def undoname(fn):
3509 3509 base, name = os.path.split(fn)
3510 3510 assert name.startswith(b'journal')
3511 3511 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3512 3512
3513 3513
3514 3514 def instance(ui, path, create, intents=None, createopts=None):
3515 3515 localpath = util.urllocalpath(path)
3516 3516 if create:
3517 3517 createrepository(ui, localpath, createopts=createopts)
3518 3518
3519 3519 return makelocalrepository(ui, localpath, intents=intents)
3520 3520
3521 3521
3522 3522 def islocal(path):
3523 3523 return True
3524 3524
3525 3525
3526 3526 def defaultcreateopts(ui, createopts=None):
3527 3527 """Populate the default creation options for a repository.
3528 3528
3529 3529 A dictionary of explicitly requested creation options can be passed
3530 3530 in. Missing keys will be populated.
3531 3531 """
3532 3532 createopts = dict(createopts or {})
3533 3533
3534 3534 if b'backend' not in createopts:
3535 3535 # experimental config: storage.new-repo-backend
3536 3536 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3537 3537
3538 3538 return createopts
3539 3539
3540 3540
3541 3541 def newreporequirements(ui, createopts):
3542 3542 """Determine the set of requirements for a new local repository.
3543 3543
3544 3544 Extensions can wrap this function to specify custom requirements for
3545 3545 new repositories.
3546 3546 """
3547 3547 # If the repo is being created from a shared repository, we copy
3548 3548 # its requirements.
3549 3549 if b'sharedrepo' in createopts:
3550 3550 requirements = set(createopts[b'sharedrepo'].requirements)
3551 3551 if createopts.get(b'sharedrelative'):
3552 3552 requirements.add(b'relshared')
3553 3553 else:
3554 3554 requirements.add(b'shared')
3555 3555
3556 3556 return requirements
3557 3557
3558 3558 if b'backend' not in createopts:
3559 3559 raise error.ProgrammingError(
3560 3560 b'backend key not present in createopts; '
3561 3561 b'was defaultcreateopts() called?'
3562 3562 )
3563 3563
3564 3564 if createopts[b'backend'] != b'revlogv1':
3565 3565 raise error.Abort(
3566 3566 _(
3567 3567 b'unable to determine repository requirements for '
3568 3568 b'storage backend: %s'
3569 3569 )
3570 3570 % createopts[b'backend']
3571 3571 )
3572 3572
3573 3573 requirements = {b'revlogv1'}
3574 3574 if ui.configbool(b'format', b'usestore'):
3575 3575 requirements.add(b'store')
3576 3576 if ui.configbool(b'format', b'usefncache'):
3577 3577 requirements.add(b'fncache')
3578 3578 if ui.configbool(b'format', b'dotencode'):
3579 3579 requirements.add(b'dotencode')
3580 3580
3581 compengine = ui.config(b'format', b'revlog-compression')
3582 if compengine not in util.compengines:
3581 compengines = ui.configlist(b'format', b'revlog-compression')
3582 for compengine in compengines:
3583 if compengine in util.compengines:
3584 break
3585 else:
3583 3586 raise error.Abort(
3584 3587 _(
3585 b'compression engine %s defined by '
3588 b'compression engines %s defined by '
3586 3589 b'format.revlog-compression not available'
3587 3590 )
3588 % compengine,
3591 % b', '.join(b'"%s"' % e for e in compengines),
3589 3592 hint=_(
3590 3593 b'run "hg debuginstall" to list available '
3591 3594 b'compression engines'
3592 3595 ),
3593 3596 )
3594 3597
3595 3598 # zlib is the historical default and doesn't need an explicit requirement.
3596 elif compengine == b'zstd':
3599 if compengine == b'zstd':
3597 3600 requirements.add(b'revlog-compression-zstd')
3598 3601 elif compengine != b'zlib':
3599 3602 requirements.add(b'exp-compression-%s' % compengine)
3600 3603
3601 3604 if scmutil.gdinitconfig(ui):
3602 3605 requirements.add(b'generaldelta')
3603 3606 if ui.configbool(b'format', b'sparse-revlog'):
3604 3607 requirements.add(SPARSEREVLOG_REQUIREMENT)
3605 3608
3606 3609 # experimental config: format.exp-use-side-data
3607 3610 if ui.configbool(b'format', b'exp-use-side-data'):
3608 3611 requirements.add(SIDEDATA_REQUIREMENT)
3609 3612 # experimental config: format.exp-use-copies-side-data-changeset
3610 3613 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3611 3614 requirements.add(SIDEDATA_REQUIREMENT)
3612 3615 requirements.add(COPIESSDC_REQUIREMENT)
3613 3616 if ui.configbool(b'experimental', b'treemanifest'):
3614 3617 requirements.add(b'treemanifest')
3615 3618
3616 3619 revlogv2 = ui.config(b'experimental', b'revlogv2')
3617 3620 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3618 3621 requirements.remove(b'revlogv1')
3619 3622 # generaldelta is implied by revlogv2.
3620 3623 requirements.discard(b'generaldelta')
3621 3624 requirements.add(REVLOGV2_REQUIREMENT)
3622 3625 # experimental config: format.internal-phase
3623 3626 if ui.configbool(b'format', b'internal-phase'):
3624 3627 requirements.add(b'internal-phase')
3625 3628
3626 3629 if createopts.get(b'narrowfiles'):
3627 3630 requirements.add(repository.NARROW_REQUIREMENT)
3628 3631
3629 3632 if createopts.get(b'lfs'):
3630 3633 requirements.add(b'lfs')
3631 3634
3632 3635 if ui.configbool(b'format', b'bookmarks-in-store'):
3633 3636 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3634 3637
3635 3638 return requirements
3636 3639
3637 3640
3638 3641 def filterknowncreateopts(ui, createopts):
3639 3642 """Filters a dict of repo creation options against options that are known.
3640 3643
3641 3644 Receives a dict of repo creation options and returns a dict of those
3642 3645 options that we don't know how to handle.
3643 3646
3644 3647 This function is called as part of repository creation. If the
3645 3648 returned dict contains any items, repository creation will not
3646 3649 be allowed, as it means there was a request to create a repository
3647 3650 with options not recognized by loaded code.
3648 3651
3649 3652 Extensions can wrap this function to filter out creation options
3650 3653 they know how to handle.
3651 3654 """
3652 3655 known = {
3653 3656 b'backend',
3654 3657 b'lfs',
3655 3658 b'narrowfiles',
3656 3659 b'sharedrepo',
3657 3660 b'sharedrelative',
3658 3661 b'shareditems',
3659 3662 b'shallowfilestore',
3660 3663 }
3661 3664
3662 3665 return {k: v for k, v in createopts.items() if k not in known}
3663 3666
3664 3667
3665 3668 def createrepository(ui, path, createopts=None):
3666 3669 """Create a new repository in a vfs.
3667 3670
3668 3671 ``path`` path to the new repo's working directory.
3669 3672 ``createopts`` options for the new repository.
3670 3673
3671 3674 The following keys for ``createopts`` are recognized:
3672 3675
3673 3676 backend
3674 3677 The storage backend to use.
3675 3678 lfs
3676 3679 Repository will be created with ``lfs`` requirement. The lfs extension
3677 3680 will automatically be loaded when the repository is accessed.
3678 3681 narrowfiles
3679 3682 Set up repository to support narrow file storage.
3680 3683 sharedrepo
3681 3684 Repository object from which storage should be shared.
3682 3685 sharedrelative
3683 3686 Boolean indicating if the path to the shared repo should be
3684 3687 stored as relative. By default, the pointer to the "parent" repo
3685 3688 is stored as an absolute path.
3686 3689 shareditems
3687 3690 Set of items to share to the new repository (in addition to storage).
3688 3691 shallowfilestore
3689 3692 Indicates that storage for files should be shallow (not all ancestor
3690 3693 revisions are known).
3691 3694 """
3692 3695 createopts = defaultcreateopts(ui, createopts=createopts)
3693 3696
3694 3697 unknownopts = filterknowncreateopts(ui, createopts)
3695 3698
3696 3699 if not isinstance(unknownopts, dict):
3697 3700 raise error.ProgrammingError(
3698 3701 b'filterknowncreateopts() did not return a dict'
3699 3702 )
3700 3703
3701 3704 if unknownopts:
3702 3705 raise error.Abort(
3703 3706 _(
3704 3707 b'unable to create repository because of unknown '
3705 3708 b'creation option: %s'
3706 3709 )
3707 3710 % b', '.join(sorted(unknownopts)),
3708 3711 hint=_(b'is a required extension not loaded?'),
3709 3712 )
3710 3713
3711 3714 requirements = newreporequirements(ui, createopts=createopts)
3712 3715
3713 3716 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3714 3717
3715 3718 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3716 3719 if hgvfs.exists():
3717 3720 raise error.RepoError(_(b'repository %s already exists') % path)
3718 3721
3719 3722 if b'sharedrepo' in createopts:
3720 3723 sharedpath = createopts[b'sharedrepo'].sharedpath
3721 3724
3722 3725 if createopts.get(b'sharedrelative'):
3723 3726 try:
3724 3727 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3725 3728 except (IOError, ValueError) as e:
3726 3729 # ValueError is raised on Windows if the drive letters differ
3727 3730 # on each path.
3728 3731 raise error.Abort(
3729 3732 _(b'cannot calculate relative path'),
3730 3733 hint=stringutil.forcebytestr(e),
3731 3734 )
3732 3735
3733 3736 if not wdirvfs.exists():
3734 3737 wdirvfs.makedirs()
3735 3738
3736 3739 hgvfs.makedir(notindexed=True)
3737 3740 if b'sharedrepo' not in createopts:
3738 3741 hgvfs.mkdir(b'cache')
3739 3742 hgvfs.mkdir(b'wcache')
3740 3743
3741 3744 if b'store' in requirements and b'sharedrepo' not in createopts:
3742 3745 hgvfs.mkdir(b'store')
3743 3746
3744 3747 # We create an invalid changelog outside the store so very old
3745 3748 # Mercurial versions (which didn't know about the requirements
3746 3749 # file) encounter an error on reading the changelog. This
3747 3750 # effectively locks out old clients and prevents them from
3748 3751 # mucking with a repo in an unknown format.
3749 3752 #
3750 3753 # The revlog header has version 2, which won't be recognized by
3751 3754 # such old clients.
3752 3755 hgvfs.append(
3753 3756 b'00changelog.i',
3754 3757 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3755 3758 b'layout',
3756 3759 )
3757 3760
3758 3761 scmutil.writerequires(hgvfs, requirements)
3759 3762
3760 3763 # Write out file telling readers where to find the shared store.
3761 3764 if b'sharedrepo' in createopts:
3762 3765 hgvfs.write(b'sharedpath', sharedpath)
3763 3766
3764 3767 if createopts.get(b'shareditems'):
3765 3768 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3766 3769 hgvfs.write(b'shared', shared)
3767 3770
3768 3771
3769 3772 def poisonrepository(repo):
3770 3773 """Poison a repository instance so it can no longer be used."""
3771 3774 # Perform any cleanup on the instance.
3772 3775 repo.close()
3773 3776
3774 3777 # Our strategy is to replace the type of the object with one that
3775 3778 # has all attribute lookups result in error.
3776 3779 #
3777 3780 # But we have to allow the close() method because some constructors
3778 3781 # of repos call close() on repo references.
3779 3782 class poisonedrepository(object):
3780 3783 def __getattribute__(self, item):
3781 3784 if item == 'close':
3782 3785 return object.__getattribute__(self, item)
3783 3786
3784 3787 raise error.ProgrammingError(
3785 3788 b'repo instances should not be used after unshare'
3786 3789 )
3787 3790
3788 3791 def close(self):
3789 3792 pass
3790 3793
3791 3794 # We may have a repoview, which intercepts __setattr__. So be sure
3792 3795 # we operate at the lowest level possible.
3793 3796 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1393 +1,1400 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 changelog,
16 16 copies,
17 17 error,
18 18 filelog,
19 19 hg,
20 20 localrepo,
21 21 manifest,
22 22 pycompat,
23 23 revlog,
24 24 scmutil,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28
29 29 from .utils import compression
30 30
31 31 # list of requirements that request a clone of all revlog if added/removed
32 32 RECLONES_REQUIREMENTS = {
33 33 b'generaldelta',
34 34 localrepo.SPARSEREVLOG_REQUIREMENT,
35 35 }
36 36
37 37
38 38 def requiredsourcerequirements(repo):
39 39 """Obtain requirements required to be present to upgrade a repo.
40 40
41 41 An upgrade will not be allowed if the repository doesn't have the
42 42 requirements returned by this function.
43 43 """
44 44 return {
45 45 # Introduced in Mercurial 0.9.2.
46 46 b'revlogv1',
47 47 # Introduced in Mercurial 0.9.2.
48 48 b'store',
49 49 }
50 50
51 51
52 52 def blocksourcerequirements(repo):
53 53 """Obtain requirements that will prevent an upgrade from occurring.
54 54
55 55 An upgrade cannot be performed if the source repository contains a
56 56 requirements in the returned set.
57 57 """
58 58 return {
59 59 # The upgrade code does not yet support these experimental features.
60 60 # This is an artificial limitation.
61 61 b'treemanifest',
62 62 # This was a precursor to generaldelta and was never enabled by default.
63 63 # It should (hopefully) not exist in the wild.
64 64 b'parentdelta',
65 65 # Upgrade should operate on the actual store, not the shared link.
66 66 b'shared',
67 67 }
68 68
69 69
70 70 def supportremovedrequirements(repo):
71 71 """Obtain requirements that can be removed during an upgrade.
72 72
73 73 If an upgrade were to create a repository that dropped a requirement,
74 74 the dropped requirement must appear in the returned set for the upgrade
75 75 to be allowed.
76 76 """
77 77 supported = {
78 78 localrepo.SPARSEREVLOG_REQUIREMENT,
79 79 localrepo.SIDEDATA_REQUIREMENT,
80 80 localrepo.COPIESSDC_REQUIREMENT,
81 81 }
82 82 for name in compression.compengines:
83 83 engine = compression.compengines[name]
84 84 if engine.available() and engine.revlogheader():
85 85 supported.add(b'exp-compression-%s' % name)
86 86 if engine.name() == b'zstd':
87 87 supported.add(b'revlog-compression-zstd')
88 88 return supported
89 89
90 90
91 91 def supporteddestrequirements(repo):
92 92 """Obtain requirements that upgrade supports in the destination.
93 93
94 94 If the result of the upgrade would create requirements not in this set,
95 95 the upgrade is disallowed.
96 96
97 97 Extensions should monkeypatch this to add their custom requirements.
98 98 """
99 99 supported = {
100 100 b'dotencode',
101 101 b'fncache',
102 102 b'generaldelta',
103 103 b'revlogv1',
104 104 b'store',
105 105 localrepo.SPARSEREVLOG_REQUIREMENT,
106 106 localrepo.SIDEDATA_REQUIREMENT,
107 107 localrepo.COPIESSDC_REQUIREMENT,
108 108 }
109 109 for name in compression.compengines:
110 110 engine = compression.compengines[name]
111 111 if engine.available() and engine.revlogheader():
112 112 supported.add(b'exp-compression-%s' % name)
113 113 if engine.name() == b'zstd':
114 114 supported.add(b'revlog-compression-zstd')
115 115 return supported
116 116
117 117
118 118 def allowednewrequirements(repo):
119 119 """Obtain requirements that can be added to a repository during upgrade.
120 120
121 121 This is used to disallow proposed requirements from being added when
122 122 they weren't present before.
123 123
124 124 We use a list of allowed requirement additions instead of a list of known
125 125 bad additions because the whitelist approach is safer and will prevent
126 126 future, unknown requirements from accidentally being added.
127 127 """
128 128 supported = {
129 129 b'dotencode',
130 130 b'fncache',
131 131 b'generaldelta',
132 132 localrepo.SPARSEREVLOG_REQUIREMENT,
133 133 localrepo.SIDEDATA_REQUIREMENT,
134 134 localrepo.COPIESSDC_REQUIREMENT,
135 135 }
136 136 for name in compression.compengines:
137 137 engine = compression.compengines[name]
138 138 if engine.available() and engine.revlogheader():
139 139 supported.add(b'exp-compression-%s' % name)
140 140 if engine.name() == b'zstd':
141 141 supported.add(b'revlog-compression-zstd')
142 142 return supported
143 143
144 144
145 145 def preservedrequirements(repo):
146 146 return set()
147 147
148 148
149 149 deficiency = b'deficiency'
150 150 optimisation = b'optimization'
151 151
152 152
153 153 class improvement(object):
154 154 """Represents an improvement that can be made as part of an upgrade.
155 155
156 156 The following attributes are defined on each instance:
157 157
158 158 name
159 159 Machine-readable string uniquely identifying this improvement. It
160 160 will be mapped to an action later in the upgrade process.
161 161
162 162 type
163 163 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
164 164 problem. An optimization is an action (sometimes optional) that
165 165 can be taken to further improve the state of the repository.
166 166
167 167 description
168 168 Message intended for humans explaining the improvement in more detail,
169 169 including the implications of it. For ``deficiency`` types, should be
170 170 worded in the present tense. For ``optimisation`` types, should be
171 171 worded in the future tense.
172 172
173 173 upgrademessage
174 174 Message intended for humans explaining what an upgrade addressing this
175 175 issue will do. Should be worded in the future tense.
176 176 """
177 177
178 178 def __init__(self, name, type, description, upgrademessage):
179 179 self.name = name
180 180 self.type = type
181 181 self.description = description
182 182 self.upgrademessage = upgrademessage
183 183
184 184 def __eq__(self, other):
185 185 if not isinstance(other, improvement):
186 186 # This is what python tell use to do
187 187 return NotImplemented
188 188 return self.name == other.name
189 189
190 190 def __ne__(self, other):
191 191 return not (self == other)
192 192
193 193 def __hash__(self):
194 194 return hash(self.name)
195 195
196 196
197 197 allformatvariant = []
198 198
199 199
200 200 def registerformatvariant(cls):
201 201 allformatvariant.append(cls)
202 202 return cls
203 203
204 204
205 205 class formatvariant(improvement):
206 206 """an improvement subclass dedicated to repository format"""
207 207
208 208 type = deficiency
209 209 ### The following attributes should be defined for each class:
210 210
211 211 # machine-readable string uniquely identifying this improvement. it will be
212 212 # mapped to an action later in the upgrade process.
213 213 name = None
214 214
215 215 # message intended for humans explaining the improvement in more detail,
216 216 # including the implications of it ``deficiency`` types, should be worded
217 217 # in the present tense.
218 218 description = None
219 219
220 220 # message intended for humans explaining what an upgrade addressing this
221 221 # issue will do. should be worded in the future tense.
222 222 upgrademessage = None
223 223
224 224 # value of current Mercurial default for new repository
225 225 default = None
226 226
227 227 def __init__(self):
228 228 raise NotImplementedError()
229 229
230 230 @staticmethod
231 231 def fromrepo(repo):
232 232 """current value of the variant in the repository"""
233 233 raise NotImplementedError()
234 234
235 235 @staticmethod
236 236 def fromconfig(repo):
237 237 """current value of the variant in the configuration"""
238 238 raise NotImplementedError()
239 239
240 240
241 241 class requirementformatvariant(formatvariant):
242 242 """formatvariant based on a 'requirement' name.
243 243
244 244 Many format variant are controlled by a 'requirement'. We define a small
245 245 subclass to factor the code.
246 246 """
247 247
248 248 # the requirement that control this format variant
249 249 _requirement = None
250 250
251 251 @staticmethod
252 252 def _newreporequirements(ui):
253 253 return localrepo.newreporequirements(
254 254 ui, localrepo.defaultcreateopts(ui)
255 255 )
256 256
257 257 @classmethod
258 258 def fromrepo(cls, repo):
259 259 assert cls._requirement is not None
260 260 return cls._requirement in repo.requirements
261 261
262 262 @classmethod
263 263 def fromconfig(cls, repo):
264 264 assert cls._requirement is not None
265 265 return cls._requirement in cls._newreporequirements(repo.ui)
266 266
267 267
268 268 @registerformatvariant
269 269 class fncache(requirementformatvariant):
270 270 name = b'fncache'
271 271
272 272 _requirement = b'fncache'
273 273
274 274 default = True
275 275
276 276 description = _(
277 277 b'long and reserved filenames may not work correctly; '
278 278 b'repository performance is sub-optimal'
279 279 )
280 280
281 281 upgrademessage = _(
282 282 b'repository will be more resilient to storing '
283 283 b'certain paths and performance of certain '
284 284 b'operations should be improved'
285 285 )
286 286
287 287
288 288 @registerformatvariant
289 289 class dotencode(requirementformatvariant):
290 290 name = b'dotencode'
291 291
292 292 _requirement = b'dotencode'
293 293
294 294 default = True
295 295
296 296 description = _(
297 297 b'storage of filenames beginning with a period or '
298 298 b'space may not work correctly'
299 299 )
300 300
301 301 upgrademessage = _(
302 302 b'repository will be better able to store files '
303 303 b'beginning with a space or period'
304 304 )
305 305
306 306
307 307 @registerformatvariant
308 308 class generaldelta(requirementformatvariant):
309 309 name = b'generaldelta'
310 310
311 311 _requirement = b'generaldelta'
312 312
313 313 default = True
314 314
315 315 description = _(
316 316 b'deltas within internal storage are unable to '
317 317 b'choose optimal revisions; repository is larger and '
318 318 b'slower than it could be; interaction with other '
319 319 b'repositories may require extra network and CPU '
320 320 b'resources, making "hg push" and "hg pull" slower'
321 321 )
322 322
323 323 upgrademessage = _(
324 324 b'repository storage will be able to create '
325 325 b'optimal deltas; new repository data will be '
326 326 b'smaller and read times should decrease; '
327 327 b'interacting with other repositories using this '
328 328 b'storage model should require less network and '
329 329 b'CPU resources, making "hg push" and "hg pull" '
330 330 b'faster'
331 331 )
332 332
333 333
334 334 @registerformatvariant
335 335 class sparserevlog(requirementformatvariant):
336 336 name = b'sparserevlog'
337 337
338 338 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
339 339
340 340 default = True
341 341
342 342 description = _(
343 343 b'in order to limit disk reading and memory usage on older '
344 344 b'version, the span of a delta chain from its root to its '
345 345 b'end is limited, whatever the relevant data in this span. '
346 346 b'This can severly limit Mercurial ability to build good '
347 347 b'chain of delta resulting is much more storage space being '
348 348 b'taken and limit reusability of on disk delta during '
349 349 b'exchange.'
350 350 )
351 351
352 352 upgrademessage = _(
353 353 b'Revlog supports delta chain with more unused data '
354 354 b'between payload. These gaps will be skipped at read '
355 355 b'time. This allows for better delta chains, making a '
356 356 b'better compression and faster exchange with server.'
357 357 )
358 358
359 359
360 360 @registerformatvariant
361 361 class sidedata(requirementformatvariant):
362 362 name = b'sidedata'
363 363
364 364 _requirement = localrepo.SIDEDATA_REQUIREMENT
365 365
366 366 default = False
367 367
368 368 description = _(
369 369 b'Allows storage of extra data alongside a revision, '
370 370 b'unlocking various caching options.'
371 371 )
372 372
373 373 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
374 374
375 375
376 376 @registerformatvariant
377 377 class copiessdc(requirementformatvariant):
378 378 name = b'copies-sdc'
379 379
380 380 _requirement = localrepo.COPIESSDC_REQUIREMENT
381 381
382 382 default = False
383 383
384 384 description = _(b'Stores copies information alongside changesets.')
385 385
386 386 upgrademessage = _(
387 387 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
388 388 )
389 389
390 390
391 391 @registerformatvariant
392 392 class removecldeltachain(formatvariant):
393 393 name = b'plain-cl-delta'
394 394
395 395 default = True
396 396
397 397 description = _(
398 398 b'changelog storage is using deltas instead of '
399 399 b'raw entries; changelog reading and any '
400 400 b'operation relying on changelog data are slower '
401 401 b'than they could be'
402 402 )
403 403
404 404 upgrademessage = _(
405 405 b'changelog storage will be reformated to '
406 406 b'store raw entries; changelog reading will be '
407 407 b'faster; changelog size may be reduced'
408 408 )
409 409
410 410 @staticmethod
411 411 def fromrepo(repo):
412 412 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
413 413 # changelogs with deltas.
414 414 cl = repo.changelog
415 415 chainbase = cl.chainbase
416 416 return all(rev == chainbase(rev) for rev in cl)
417 417
418 418 @staticmethod
419 419 def fromconfig(repo):
420 420 return True
421 421
422 422
423 423 @registerformatvariant
424 424 class compressionengine(formatvariant):
425 425 name = b'compression'
426 426 default = b'zlib'
427 427
428 428 description = _(
429 429 b'Compresion algorithm used to compress data. '
430 430 b'Some engine are faster than other'
431 431 )
432 432
433 433 upgrademessage = _(
434 434 b'revlog content will be recompressed with the new algorithm.'
435 435 )
436 436
437 437 @classmethod
438 438 def fromrepo(cls, repo):
439 439 # we allow multiple compression engine requirement to co-exist because
440 440 # strickly speaking, revlog seems to support mixed compression style.
441 441 #
442 442 # The compression used for new entries will be "the last one"
443 443 compression = b'zlib'
444 444 for req in repo.requirements:
445 445 prefix = req.startswith
446 446 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
447 447 compression = req.split(b'-', 2)[2]
448 448 return compression
449 449
450 450 @classmethod
451 451 def fromconfig(cls, repo):
452 return repo.ui.config(b'format', b'revlog-compression')
452 compengines = repo.ui.configlist(b'format', b'revlog-compression')
453 # return the first valid value as the selection code would do
454 for comp in compengines:
455 if comp in util.compengines:
456 return comp
457
458 # no valide compression found lets display it all for clarity
459 return b','.join(compengines)
453 460
454 461
455 462 @registerformatvariant
456 463 class compressionlevel(formatvariant):
457 464 name = b'compression-level'
458 465 default = b'default'
459 466
460 467 description = _(b'compression level')
461 468
462 469 upgrademessage = _(b'revlog content will be recompressed')
463 470
464 471 @classmethod
465 472 def fromrepo(cls, repo):
466 473 comp = compressionengine.fromrepo(repo)
467 474 level = None
468 475 if comp == b'zlib':
469 476 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
470 477 elif comp == b'zstd':
471 478 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
472 479 if level is None:
473 480 return b'default'
474 481 return bytes(level)
475 482
476 483 @classmethod
477 484 def fromconfig(cls, repo):
478 485 comp = compressionengine.fromconfig(repo)
479 486 level = None
480 487 if comp == b'zlib':
481 488 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
482 489 elif comp == b'zstd':
483 490 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
484 491 if level is None:
485 492 return b'default'
486 493 return bytes(level)
487 494
488 495
489 496 def finddeficiencies(repo):
490 497 """returns a list of deficiencies that the repo suffer from"""
491 498 deficiencies = []
492 499
493 500 # We could detect lack of revlogv1 and store here, but they were added
494 501 # in 0.9.2 and we don't support upgrading repos without these
495 502 # requirements, so let's not bother.
496 503
497 504 for fv in allformatvariant:
498 505 if not fv.fromrepo(repo):
499 506 deficiencies.append(fv)
500 507
501 508 return deficiencies
502 509
503 510
504 511 # search without '-' to support older form on newer client.
505 512 #
506 513 # We don't enforce backward compatibility for debug command so this
507 514 # might eventually be dropped. However, having to use two different
508 515 # forms in script when comparing result is anoying enough to add
509 516 # backward compatibility for a while.
510 517 legacy_opts_map = {
511 518 b'redeltaparent': b're-delta-parent',
512 519 b'redeltamultibase': b're-delta-multibase',
513 520 b'redeltaall': b're-delta-all',
514 521 b'redeltafulladd': b're-delta-fulladd',
515 522 }
516 523
517 524
518 525 def findoptimizations(repo):
519 526 """Determine optimisation that could be used during upgrade"""
520 527 # These are unconditionally added. There is logic later that figures out
521 528 # which ones to apply.
522 529 optimizations = []
523 530
524 531 optimizations.append(
525 532 improvement(
526 533 name=b're-delta-parent',
527 534 type=optimisation,
528 535 description=_(
529 536 b'deltas within internal storage will be recalculated to '
530 537 b'choose an optimal base revision where this was not '
531 538 b'already done; the size of the repository may shrink and '
532 539 b'various operations may become faster; the first time '
533 540 b'this optimization is performed could slow down upgrade '
534 541 b'execution considerably; subsequent invocations should '
535 542 b'not run noticeably slower'
536 543 ),
537 544 upgrademessage=_(
538 545 b'deltas within internal storage will choose a new '
539 546 b'base revision if needed'
540 547 ),
541 548 )
542 549 )
543 550
544 551 optimizations.append(
545 552 improvement(
546 553 name=b're-delta-multibase',
547 554 type=optimisation,
548 555 description=_(
549 556 b'deltas within internal storage will be recalculated '
550 557 b'against multiple base revision and the smallest '
551 558 b'difference will be used; the size of the repository may '
552 559 b'shrink significantly when there are many merges; this '
553 560 b'optimization will slow down execution in proportion to '
554 561 b'the number of merges in the repository and the amount '
555 562 b'of files in the repository; this slow down should not '
556 563 b'be significant unless there are tens of thousands of '
557 564 b'files and thousands of merges'
558 565 ),
559 566 upgrademessage=_(
560 567 b'deltas within internal storage will choose an '
561 568 b'optimal delta by computing deltas against multiple '
562 569 b'parents; may slow down execution time '
563 570 b'significantly'
564 571 ),
565 572 )
566 573 )
567 574
568 575 optimizations.append(
569 576 improvement(
570 577 name=b're-delta-all',
571 578 type=optimisation,
572 579 description=_(
573 580 b'deltas within internal storage will always be '
574 581 b'recalculated without reusing prior deltas; this will '
575 582 b'likely make execution run several times slower; this '
576 583 b'optimization is typically not needed'
577 584 ),
578 585 upgrademessage=_(
579 586 b'deltas within internal storage will be fully '
580 587 b'recomputed; this will likely drastically slow down '
581 588 b'execution time'
582 589 ),
583 590 )
584 591 )
585 592
586 593 optimizations.append(
587 594 improvement(
588 595 name=b're-delta-fulladd',
589 596 type=optimisation,
590 597 description=_(
591 598 b'every revision will be re-added as if it was new '
592 599 b'content. It will go through the full storage '
593 600 b'mechanism giving extensions a chance to process it '
594 601 b'(eg. lfs). This is similar to "re-delta-all" but even '
595 602 b'slower since more logic is involved.'
596 603 ),
597 604 upgrademessage=_(
598 605 b'each revision will be added as new content to the '
599 606 b'internal storage; this will likely drastically slow '
600 607 b'down execution time, but some extensions might need '
601 608 b'it'
602 609 ),
603 610 )
604 611 )
605 612
606 613 return optimizations
607 614
608 615
609 616 def determineactions(repo, deficiencies, sourcereqs, destreqs):
610 617 """Determine upgrade actions that will be performed.
611 618
612 619 Given a list of improvements as returned by ``finddeficiencies`` and
613 620 ``findoptimizations``, determine the list of upgrade actions that
614 621 will be performed.
615 622
616 623 The role of this function is to filter improvements if needed, apply
617 624 recommended optimizations from the improvements list that make sense,
618 625 etc.
619 626
620 627 Returns a list of action names.
621 628 """
622 629 newactions = []
623 630
624 631 knownreqs = supporteddestrequirements(repo)
625 632
626 633 for d in deficiencies:
627 634 name = d.name
628 635
629 636 # If the action is a requirement that doesn't show up in the
630 637 # destination requirements, prune the action.
631 638 if name in knownreqs and name not in destreqs:
632 639 continue
633 640
634 641 newactions.append(d)
635 642
636 643 # FUTURE consider adding some optimizations here for certain transitions.
637 644 # e.g. adding generaldelta could schedule parent redeltas.
638 645
639 646 return newactions
640 647
641 648
642 649 def _revlogfrompath(repo, path):
643 650 """Obtain a revlog from a repo path.
644 651
645 652 An instance of the appropriate class is returned.
646 653 """
647 654 if path == b'00changelog.i':
648 655 return changelog.changelog(repo.svfs)
649 656 elif path.endswith(b'00manifest.i'):
650 657 mandir = path[: -len(b'00manifest.i')]
651 658 return manifest.manifestrevlog(repo.svfs, tree=mandir)
652 659 else:
653 660 # reverse of "/".join(("data", path + ".i"))
654 661 return filelog.filelog(repo.svfs, path[5:-2])
655 662
656 663
657 664 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
658 665 """copy all relevant files for `oldrl` into `destrepo` store
659 666
660 667 Files are copied "as is" without any transformation. The copy is performed
661 668 without extra checks. Callers are responsible for making sure the copied
662 669 content is compatible with format of the destination repository.
663 670 """
664 671 oldrl = getattr(oldrl, '_revlog', oldrl)
665 672 newrl = _revlogfrompath(destrepo, unencodedname)
666 673 newrl = getattr(newrl, '_revlog', newrl)
667 674
668 675 oldvfs = oldrl.opener
669 676 newvfs = newrl.opener
670 677 oldindex = oldvfs.join(oldrl.indexfile)
671 678 newindex = newvfs.join(newrl.indexfile)
672 679 olddata = oldvfs.join(oldrl.datafile)
673 680 newdata = newvfs.join(newrl.datafile)
674 681
675 682 with newvfs(newrl.indexfile, b'w'):
676 683 pass # create all the directories
677 684
678 685 util.copyfile(oldindex, newindex)
679 686 copydata = oldrl.opener.exists(oldrl.datafile)
680 687 if copydata:
681 688 util.copyfile(olddata, newdata)
682 689
683 690 if not (
684 691 unencodedname.endswith(b'00changelog.i')
685 692 or unencodedname.endswith(b'00manifest.i')
686 693 ):
687 694 destrepo.svfs.fncache.add(unencodedname)
688 695 if copydata:
689 696 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
690 697
691 698
692 699 UPGRADE_CHANGELOG = object()
693 700 UPGRADE_MANIFEST = object()
694 701 UPGRADE_FILELOG = object()
695 702
696 703 UPGRADE_ALL_REVLOGS = frozenset(
697 704 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
698 705 )
699 706
700 707
701 708 def getsidedatacompanion(srcrepo, dstrepo):
702 709 sidedatacompanion = None
703 710 removedreqs = srcrepo.requirements - dstrepo.requirements
704 711 addedreqs = dstrepo.requirements - srcrepo.requirements
705 712 if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
706 713
707 714 def sidedatacompanion(rl, rev):
708 715 rl = getattr(rl, '_revlog', rl)
709 716 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
710 717 return True, (), {}
711 718 return False, (), {}
712 719
713 720 elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
714 721 sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
715 722 elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
716 723 sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
717 724 return sidedatacompanion
718 725
719 726
720 727 def matchrevlog(revlogfilter, entry):
721 728 """check is a revlog is selected for cloning
722 729
723 730 The store entry is checked against the passed filter"""
724 731 if entry.endswith(b'00changelog.i'):
725 732 return UPGRADE_CHANGELOG in revlogfilter
726 733 elif entry.endswith(b'00manifest.i'):
727 734 return UPGRADE_MANIFEST in revlogfilter
728 735 return UPGRADE_FILELOG in revlogfilter
729 736
730 737
731 738 def _clonerevlogs(
732 739 ui,
733 740 srcrepo,
734 741 dstrepo,
735 742 tr,
736 743 deltareuse,
737 744 forcedeltabothparents,
738 745 revlogs=UPGRADE_ALL_REVLOGS,
739 746 ):
740 747 """Copy revlogs between 2 repos."""
741 748 revcount = 0
742 749 srcsize = 0
743 750 srcrawsize = 0
744 751 dstsize = 0
745 752 fcount = 0
746 753 frevcount = 0
747 754 fsrcsize = 0
748 755 frawsize = 0
749 756 fdstsize = 0
750 757 mcount = 0
751 758 mrevcount = 0
752 759 msrcsize = 0
753 760 mrawsize = 0
754 761 mdstsize = 0
755 762 crevcount = 0
756 763 csrcsize = 0
757 764 crawsize = 0
758 765 cdstsize = 0
759 766
760 767 alldatafiles = list(srcrepo.store.walk())
761 768
762 769 # Perform a pass to collect metadata. This validates we can open all
763 770 # source files and allows a unified progress bar to be displayed.
764 771 for unencoded, encoded, size in alldatafiles:
765 772 if unencoded.endswith(b'.d'):
766 773 continue
767 774
768 775 rl = _revlogfrompath(srcrepo, unencoded)
769 776
770 777 info = rl.storageinfo(
771 778 exclusivefiles=True,
772 779 revisionscount=True,
773 780 trackedsize=True,
774 781 storedsize=True,
775 782 )
776 783
777 784 revcount += info[b'revisionscount'] or 0
778 785 datasize = info[b'storedsize'] or 0
779 786 rawsize = info[b'trackedsize'] or 0
780 787
781 788 srcsize += datasize
782 789 srcrawsize += rawsize
783 790
784 791 # This is for the separate progress bars.
785 792 if isinstance(rl, changelog.changelog):
786 793 crevcount += len(rl)
787 794 csrcsize += datasize
788 795 crawsize += rawsize
789 796 elif isinstance(rl, manifest.manifestrevlog):
790 797 mcount += 1
791 798 mrevcount += len(rl)
792 799 msrcsize += datasize
793 800 mrawsize += rawsize
794 801 elif isinstance(rl, filelog.filelog):
795 802 fcount += 1
796 803 frevcount += len(rl)
797 804 fsrcsize += datasize
798 805 frawsize += rawsize
799 806 else:
800 807 error.ProgrammingError(b'unknown revlog type')
801 808
802 809 if not revcount:
803 810 return
804 811
805 812 ui.write(
806 813 _(
807 814 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
808 815 b'%d in changelog)\n'
809 816 )
810 817 % (revcount, frevcount, mrevcount, crevcount)
811 818 )
812 819 ui.write(
813 820 _(b'migrating %s in store; %s tracked data\n')
814 821 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
815 822 )
816 823
817 824 # Used to keep track of progress.
818 825 progress = None
819 826
820 827 def oncopiedrevision(rl, rev, node):
821 828 progress.increment()
822 829
823 830 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
824 831
825 832 # Do the actual copying.
826 833 # FUTURE this operation can be farmed off to worker processes.
827 834 seen = set()
828 835 for unencoded, encoded, size in alldatafiles:
829 836 if unencoded.endswith(b'.d'):
830 837 continue
831 838
832 839 oldrl = _revlogfrompath(srcrepo, unencoded)
833 840
834 841 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
835 842 ui.write(
836 843 _(
837 844 b'finished migrating %d manifest revisions across %d '
838 845 b'manifests; change in size: %s\n'
839 846 )
840 847 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
841 848 )
842 849
843 850 ui.write(
844 851 _(
845 852 b'migrating changelog containing %d revisions '
846 853 b'(%s in store; %s tracked data)\n'
847 854 )
848 855 % (
849 856 crevcount,
850 857 util.bytecount(csrcsize),
851 858 util.bytecount(crawsize),
852 859 )
853 860 )
854 861 seen.add(b'c')
855 862 progress = srcrepo.ui.makeprogress(
856 863 _(b'changelog revisions'), total=crevcount
857 864 )
858 865 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
859 866 ui.write(
860 867 _(
861 868 b'finished migrating %d filelog revisions across %d '
862 869 b'filelogs; change in size: %s\n'
863 870 )
864 871 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
865 872 )
866 873
867 874 ui.write(
868 875 _(
869 876 b'migrating %d manifests containing %d revisions '
870 877 b'(%s in store; %s tracked data)\n'
871 878 )
872 879 % (
873 880 mcount,
874 881 mrevcount,
875 882 util.bytecount(msrcsize),
876 883 util.bytecount(mrawsize),
877 884 )
878 885 )
879 886 seen.add(b'm')
880 887 if progress:
881 888 progress.complete()
882 889 progress = srcrepo.ui.makeprogress(
883 890 _(b'manifest revisions'), total=mrevcount
884 891 )
885 892 elif b'f' not in seen:
886 893 ui.write(
887 894 _(
888 895 b'migrating %d filelogs containing %d revisions '
889 896 b'(%s in store; %s tracked data)\n'
890 897 )
891 898 % (
892 899 fcount,
893 900 frevcount,
894 901 util.bytecount(fsrcsize),
895 902 util.bytecount(frawsize),
896 903 )
897 904 )
898 905 seen.add(b'f')
899 906 if progress:
900 907 progress.complete()
901 908 progress = srcrepo.ui.makeprogress(
902 909 _(b'file revisions'), total=frevcount
903 910 )
904 911
905 912 if matchrevlog(revlogs, unencoded):
906 913 ui.note(
907 914 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
908 915 )
909 916 newrl = _revlogfrompath(dstrepo, unencoded)
910 917 oldrl.clone(
911 918 tr,
912 919 newrl,
913 920 addrevisioncb=oncopiedrevision,
914 921 deltareuse=deltareuse,
915 922 forcedeltabothparents=forcedeltabothparents,
916 923 sidedatacompanion=sidedatacompanion,
917 924 )
918 925 else:
919 926 msg = _(b'blindly copying %s containing %i revisions\n')
920 927 ui.note(msg % (unencoded, len(oldrl)))
921 928 _copyrevlog(tr, dstrepo, oldrl, unencoded)
922 929
923 930 newrl = _revlogfrompath(dstrepo, unencoded)
924 931
925 932 info = newrl.storageinfo(storedsize=True)
926 933 datasize = info[b'storedsize'] or 0
927 934
928 935 dstsize += datasize
929 936
930 937 if isinstance(newrl, changelog.changelog):
931 938 cdstsize += datasize
932 939 elif isinstance(newrl, manifest.manifestrevlog):
933 940 mdstsize += datasize
934 941 else:
935 942 fdstsize += datasize
936 943
937 944 progress.complete()
938 945
939 946 ui.write(
940 947 _(
941 948 b'finished migrating %d changelog revisions; change in size: '
942 949 b'%s\n'
943 950 )
944 951 % (crevcount, util.bytecount(cdstsize - csrcsize))
945 952 )
946 953
947 954 ui.write(
948 955 _(
949 956 b'finished migrating %d total revisions; total change in store '
950 957 b'size: %s\n'
951 958 )
952 959 % (revcount, util.bytecount(dstsize - srcsize))
953 960 )
954 961
955 962
956 963 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
957 964 """Determine whether to copy a store file during upgrade.
958 965
959 966 This function is called when migrating store files from ``srcrepo`` to
960 967 ``dstrepo`` as part of upgrading a repository.
961 968
962 969 Args:
963 970 srcrepo: repo we are copying from
964 971 dstrepo: repo we are copying to
965 972 requirements: set of requirements for ``dstrepo``
966 973 path: store file being examined
967 974 mode: the ``ST_MODE`` file type of ``path``
968 975 st: ``stat`` data structure for ``path``
969 976
970 977 Function should return ``True`` if the file is to be copied.
971 978 """
972 979 # Skip revlogs.
973 980 if path.endswith((b'.i', b'.d')):
974 981 return False
975 982 # Skip transaction related files.
976 983 if path.startswith(b'undo'):
977 984 return False
978 985 # Only copy regular files.
979 986 if mode != stat.S_IFREG:
980 987 return False
981 988 # Skip other skipped files.
982 989 if path in (b'lock', b'fncache'):
983 990 return False
984 991
985 992 return True
986 993
987 994
988 995 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
989 996 """Hook point for extensions to perform additional actions during upgrade.
990 997
991 998 This function is called after revlogs and store files have been copied but
992 999 before the new store is swapped into the original location.
993 1000 """
994 1001
995 1002
996 1003 def _upgraderepo(
997 1004 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
998 1005 ):
999 1006 """Do the low-level work of upgrading a repository.
1000 1007
1001 1008 The upgrade is effectively performed as a copy between a source
1002 1009 repository and a temporary destination repository.
1003 1010
1004 1011 The source repository is unmodified for as long as possible so the
1005 1012 upgrade can abort at any time without causing loss of service for
1006 1013 readers and without corrupting the source repository.
1007 1014 """
1008 1015 assert srcrepo.currentwlock()
1009 1016 assert dstrepo.currentwlock()
1010 1017
1011 1018 ui.write(
1012 1019 _(
1013 1020 b'(it is safe to interrupt this process any time before '
1014 1021 b'data migration completes)\n'
1015 1022 )
1016 1023 )
1017 1024
1018 1025 if b're-delta-all' in actions:
1019 1026 deltareuse = revlog.revlog.DELTAREUSENEVER
1020 1027 elif b're-delta-parent' in actions:
1021 1028 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1022 1029 elif b're-delta-multibase' in actions:
1023 1030 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1024 1031 elif b're-delta-fulladd' in actions:
1025 1032 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1026 1033 else:
1027 1034 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1028 1035
1029 1036 with dstrepo.transaction(b'upgrade') as tr:
1030 1037 _clonerevlogs(
1031 1038 ui,
1032 1039 srcrepo,
1033 1040 dstrepo,
1034 1041 tr,
1035 1042 deltareuse,
1036 1043 b're-delta-multibase' in actions,
1037 1044 revlogs=revlogs,
1038 1045 )
1039 1046
1040 1047 # Now copy other files in the store directory.
1041 1048 # The sorted() makes execution deterministic.
1042 1049 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1043 1050 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1044 1051 continue
1045 1052
1046 1053 srcrepo.ui.write(_(b'copying %s\n') % p)
1047 1054 src = srcrepo.store.rawvfs.join(p)
1048 1055 dst = dstrepo.store.rawvfs.join(p)
1049 1056 util.copyfile(src, dst, copystat=True)
1050 1057
1051 1058 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1052 1059
1053 1060 ui.write(_(b'data fully migrated to temporary repository\n'))
1054 1061
1055 1062 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1056 1063 backupvfs = vfsmod.vfs(backuppath)
1057 1064
1058 1065 # Make a backup of requires file first, as it is the first to be modified.
1059 1066 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1060 1067
1061 1068 # We install an arbitrary requirement that clients must not support
1062 1069 # as a mechanism to lock out new clients during the data swap. This is
1063 1070 # better than allowing a client to continue while the repository is in
1064 1071 # an inconsistent state.
1065 1072 ui.write(
1066 1073 _(
1067 1074 b'marking source repository as being upgraded; clients will be '
1068 1075 b'unable to read from repository\n'
1069 1076 )
1070 1077 )
1071 1078 scmutil.writerequires(
1072 1079 srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
1073 1080 )
1074 1081
1075 1082 ui.write(_(b'starting in-place swap of repository data\n'))
1076 1083 ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
1077 1084
1078 1085 # Now swap in the new store directory. Doing it as a rename should make
1079 1086 # the operation nearly instantaneous and atomic (at least in well-behaved
1080 1087 # environments).
1081 1088 ui.write(_(b'replacing store...\n'))
1082 1089 tstart = util.timer()
1083 1090 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1084 1091 util.rename(dstrepo.spath, srcrepo.spath)
1085 1092 elapsed = util.timer() - tstart
1086 1093 ui.write(
1087 1094 _(
1088 1095 b'store replacement complete; repository was inconsistent for '
1089 1096 b'%0.1fs\n'
1090 1097 )
1091 1098 % elapsed
1092 1099 )
1093 1100
1094 1101 # We first write the requirements file. Any new requirements will lock
1095 1102 # out legacy clients.
1096 1103 ui.write(
1097 1104 _(
1098 1105 b'finalizing requirements file and making repository readable '
1099 1106 b'again\n'
1100 1107 )
1101 1108 )
1102 1109 scmutil.writerequires(srcrepo.vfs, requirements)
1103 1110
1104 1111 # The lock file from the old store won't be removed because nothing has a
1105 1112 # reference to its new location. So clean it up manually. Alternatively, we
1106 1113 # could update srcrepo.svfs and other variables to point to the new
1107 1114 # location. This is simpler.
1108 1115 backupvfs.unlink(b'store/lock')
1109 1116
1110 1117 return backuppath
1111 1118
1112 1119
1113 1120 def upgraderepo(
1114 1121 ui,
1115 1122 repo,
1116 1123 run=False,
1117 1124 optimize=None,
1118 1125 backup=True,
1119 1126 manifest=None,
1120 1127 changelog=None,
1121 1128 ):
1122 1129 """Upgrade a repository in place."""
1123 1130 if optimize is None:
1124 1131 optimize = []
1125 1132 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
1126 1133 repo = repo.unfiltered()
1127 1134
1128 1135 revlogs = set(UPGRADE_ALL_REVLOGS)
1129 1136 specentries = ((b'c', changelog), (b'm', manifest))
1130 1137 specified = [(y, x) for (y, x) in specentries if x is not None]
1131 1138 if specified:
1132 1139 # we have some limitation on revlogs to be recloned
1133 1140 if any(x for y, x in specified):
1134 1141 revlogs = set()
1135 1142 for r, enabled in specified:
1136 1143 if enabled:
1137 1144 if r == b'c':
1138 1145 revlogs.add(UPGRADE_CHANGELOG)
1139 1146 elif r == b'm':
1140 1147 revlogs.add(UPGRADE_MANIFEST)
1141 1148 else:
1142 1149 # none are enabled
1143 1150 for r, __ in specified:
1144 1151 if r == b'c':
1145 1152 revlogs.discard(UPGRADE_CHANGELOG)
1146 1153 elif r == b'm':
1147 1154 revlogs.discard(UPGRADE_MANIFEST)
1148 1155
1149 1156 # Ensure the repository can be upgraded.
1150 1157 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1151 1158 if missingreqs:
1152 1159 raise error.Abort(
1153 1160 _(b'cannot upgrade repository; requirement missing: %s')
1154 1161 % _(b', ').join(sorted(missingreqs))
1155 1162 )
1156 1163
1157 1164 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1158 1165 if blockedreqs:
1159 1166 raise error.Abort(
1160 1167 _(
1161 1168 b'cannot upgrade repository; unsupported source '
1162 1169 b'requirement: %s'
1163 1170 )
1164 1171 % _(b', ').join(sorted(blockedreqs))
1165 1172 )
1166 1173
1167 1174 # FUTURE there is potentially a need to control the wanted requirements via
1168 1175 # command arguments or via an extension hook point.
1169 1176 newreqs = localrepo.newreporequirements(
1170 1177 repo.ui, localrepo.defaultcreateopts(repo.ui)
1171 1178 )
1172 1179 newreqs.update(preservedrequirements(repo))
1173 1180
1174 1181 noremovereqs = (
1175 1182 repo.requirements - newreqs - supportremovedrequirements(repo)
1176 1183 )
1177 1184 if noremovereqs:
1178 1185 raise error.Abort(
1179 1186 _(
1180 1187 b'cannot upgrade repository; requirement would be '
1181 1188 b'removed: %s'
1182 1189 )
1183 1190 % _(b', ').join(sorted(noremovereqs))
1184 1191 )
1185 1192
1186 1193 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1187 1194 if noaddreqs:
1188 1195 raise error.Abort(
1189 1196 _(
1190 1197 b'cannot upgrade repository; do not support adding '
1191 1198 b'requirement: %s'
1192 1199 )
1193 1200 % _(b', ').join(sorted(noaddreqs))
1194 1201 )
1195 1202
1196 1203 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1197 1204 if unsupportedreqs:
1198 1205 raise error.Abort(
1199 1206 _(
1200 1207 b'cannot upgrade repository; do not support '
1201 1208 b'destination requirement: %s'
1202 1209 )
1203 1210 % _(b', ').join(sorted(unsupportedreqs))
1204 1211 )
1205 1212
1206 1213 # Find and validate all improvements that can be made.
1207 1214 alloptimizations = findoptimizations(repo)
1208 1215
1209 1216 # Apply and Validate arguments.
1210 1217 optimizations = []
1211 1218 for o in alloptimizations:
1212 1219 if o.name in optimize:
1213 1220 optimizations.append(o)
1214 1221 optimize.discard(o.name)
1215 1222
1216 1223 if optimize: # anything left is unknown
1217 1224 raise error.Abort(
1218 1225 _(b'unknown optimization action requested: %s')
1219 1226 % b', '.join(sorted(optimize)),
1220 1227 hint=_(b'run without arguments to see valid optimizations'),
1221 1228 )
1222 1229
1223 1230 deficiencies = finddeficiencies(repo)
1224 1231 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1225 1232 actions.extend(
1226 1233 o
1227 1234 for o in sorted(optimizations)
1228 1235 # determineactions could have added optimisation
1229 1236 if o not in actions
1230 1237 )
1231 1238
1232 1239 removedreqs = repo.requirements - newreqs
1233 1240 addedreqs = newreqs - repo.requirements
1234 1241
1235 1242 if revlogs != UPGRADE_ALL_REVLOGS:
1236 1243 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1237 1244 if incompatible:
1238 1245 msg = _(
1239 1246 b'ignoring revlogs selection flags, format requirements '
1240 1247 b'change: %s\n'
1241 1248 )
1242 1249 ui.warn(msg % b', '.join(sorted(incompatible)))
1243 1250 revlogs = UPGRADE_ALL_REVLOGS
1244 1251
1245 1252 def write_labeled(l, label):
1246 1253 first = True
1247 1254 for r in sorted(l):
1248 1255 if not first:
1249 1256 ui.write(b', ')
1250 1257 ui.write(r, label=label)
1251 1258 first = False
1252 1259
1253 1260 def printrequirements():
1254 1261 ui.write(_(b'requirements\n'))
1255 1262 ui.write(_(b' preserved: '))
1256 1263 write_labeled(
1257 1264 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1258 1265 )
1259 1266 ui.write((b'\n'))
1260 1267 removed = repo.requirements - newreqs
1261 1268 if repo.requirements - newreqs:
1262 1269 ui.write(_(b' removed: '))
1263 1270 write_labeled(removed, "upgrade-repo.requirement.removed")
1264 1271 ui.write((b'\n'))
1265 1272 added = newreqs - repo.requirements
1266 1273 if added:
1267 1274 ui.write(_(b' added: '))
1268 1275 write_labeled(added, "upgrade-repo.requirement.added")
1269 1276 ui.write((b'\n'))
1270 1277 ui.write(b'\n')
1271 1278
1272 1279 def printupgradeactions():
1273 1280 for a in actions:
1274 1281 ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1275 1282
1276 1283 if not run:
1277 1284 fromconfig = []
1278 1285 onlydefault = []
1279 1286
1280 1287 for d in deficiencies:
1281 1288 if d.fromconfig(repo):
1282 1289 fromconfig.append(d)
1283 1290 elif d.default:
1284 1291 onlydefault.append(d)
1285 1292
1286 1293 if fromconfig or onlydefault:
1287 1294
1288 1295 if fromconfig:
1289 1296 ui.write(
1290 1297 _(
1291 1298 b'repository lacks features recommended by '
1292 1299 b'current config options:\n\n'
1293 1300 )
1294 1301 )
1295 1302 for i in fromconfig:
1296 1303 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1297 1304
1298 1305 if onlydefault:
1299 1306 ui.write(
1300 1307 _(
1301 1308 b'repository lacks features used by the default '
1302 1309 b'config options:\n\n'
1303 1310 )
1304 1311 )
1305 1312 for i in onlydefault:
1306 1313 ui.write(b'%s\n %s\n\n' % (i.name, i.description))
1307 1314
1308 1315 ui.write(b'\n')
1309 1316 else:
1310 1317 ui.write(
1311 1318 _(
1312 1319 b'(no feature deficiencies found in existing '
1313 1320 b'repository)\n'
1314 1321 )
1315 1322 )
1316 1323
1317 1324 ui.write(
1318 1325 _(
1319 1326 b'performing an upgrade with "--run" will make the following '
1320 1327 b'changes:\n\n'
1321 1328 )
1322 1329 )
1323 1330
1324 1331 printrequirements()
1325 1332 printupgradeactions()
1326 1333
1327 1334 unusedoptimize = [i for i in alloptimizations if i not in actions]
1328 1335
1329 1336 if unusedoptimize:
1330 1337 ui.write(
1331 1338 _(
1332 1339 b'additional optimizations are available by specifying '
1333 1340 b'"--optimize <name>":\n\n'
1334 1341 )
1335 1342 )
1336 1343 for i in unusedoptimize:
1337 1344 ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
1338 1345 return
1339 1346
1340 1347 # Else we're in the run=true case.
1341 1348 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1342 1349 printrequirements()
1343 1350 printupgradeactions()
1344 1351
1345 1352 upgradeactions = [a.name for a in actions]
1346 1353
1347 1354 ui.write(_(b'beginning upgrade...\n'))
1348 1355 with repo.wlock(), repo.lock():
1349 1356 ui.write(_(b'repository locked and read-only\n'))
1350 1357 # Our strategy for upgrading the repository is to create a new,
1351 1358 # temporary repository, write data to it, then do a swap of the
1352 1359 # data. There are less heavyweight ways to do this, but it is easier
1353 1360 # to create a new repo object than to instantiate all the components
1354 1361 # (like the store) separately.
1355 1362 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1356 1363 backuppath = None
1357 1364 try:
1358 1365 ui.write(
1359 1366 _(
1360 1367 b'creating temporary repository to stage migrated '
1361 1368 b'data: %s\n'
1362 1369 )
1363 1370 % tmppath
1364 1371 )
1365 1372
1366 1373 # clone ui without using ui.copy because repo.ui is protected
1367 1374 repoui = repo.ui.__class__(repo.ui)
1368 1375 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1369 1376
1370 1377 with dstrepo.wlock(), dstrepo.lock():
1371 1378 backuppath = _upgraderepo(
1372 1379 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1373 1380 )
1374 1381 if not (backup or backuppath is None):
1375 1382 ui.write(_(b'removing old repository content%s\n') % backuppath)
1376 1383 repo.vfs.rmtree(backuppath, forcibly=True)
1377 1384 backuppath = None
1378 1385
1379 1386 finally:
1380 1387 ui.write(_(b'removing temporary repository %s\n') % tmppath)
1381 1388 repo.vfs.rmtree(tmppath, forcibly=True)
1382 1389
1383 1390 if backuppath:
1384 1391 ui.warn(
1385 1392 _(b'copy of old repository backed up at %s\n') % backuppath
1386 1393 )
1387 1394 ui.warn(
1388 1395 _(
1389 1396 b'the old repository will not be deleted; remove '
1390 1397 b'it to free up disk space once the upgraded '
1391 1398 b'repository is verified\n'
1392 1399 )
1393 1400 )
@@ -1,199 +1,204 b''
1 1 A new repository uses zlib storage, which doesn't need a requirement
2 2
3 3 $ hg init default
4 4 $ cd default
5 5 $ cat .hg/requires
6 6 dotencode
7 7 fncache
8 8 generaldelta
9 9 revlogv1
10 10 sparserevlog
11 11 store
12 12 testonly-simplestore (reposimplestore !)
13 13
14 14 $ touch foo
15 15 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
16 16 $ hg debugrevlog -c | grep 0x78
17 17 0x78 (x) : 1 (100.00%)
18 18 0x78 (x) : 110 (100.00%)
19 19
20 20 $ cd ..
21 21
22 22 Unknown compression engine to format.compression aborts
23 23
24 24 $ hg --config format.revlog-compression=unknown init unknown
25 abort: compression engine unknown defined by format.revlog-compression not available
25 abort: compression engines "unknown" defined by format.revlog-compression not available
26 26 (run "hg debuginstall" to list available compression engines)
27 27 [255]
28 28
29 unknown compression engine in a list with known one works fine
30
31 $ hg --config format.revlog-compression=zlib,unknown init zlib-before-unknow
32 $ hg --config format.revlog-compression=unknown,zlib init unknown-before-zlib
33
29 34 A requirement specifying an unknown compression engine results in bail
30 35
31 36 $ hg init unknownrequirement
32 37 $ cd unknownrequirement
33 38 $ echo exp-compression-unknown >> .hg/requires
34 39 $ hg log
35 40 abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
36 41 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
37 42 [255]
38 43
39 44 $ cd ..
40 45
41 46 #if zstd
42 47
43 48 $ hg --config format.revlog-compression=zstd init zstd
44 49 $ cd zstd
45 50 $ cat .hg/requires
46 51 dotencode
47 52 fncache
48 53 generaldelta
49 54 revlog-compression-zstd
50 55 revlogv1
51 56 sparserevlog
52 57 store
53 58 testonly-simplestore (reposimplestore !)
54 59
55 60 $ touch foo
56 61 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
57 62
58 63 $ hg debugrevlog -c | grep 0x28
59 64 0x28 : 1 (100.00%)
60 65 0x28 : 98 (100.00%)
61 66
62 67 $ cd ..
63 68
64 69 Specifying a new format.compression on an existing repo won't introduce data
65 70 with that engine or a requirement
66 71
67 72 $ cd default
68 73 $ touch bar
69 74 $ hg --config format.revlog-compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
70 75
71 76 $ cat .hg/requires
72 77 dotencode
73 78 fncache
74 79 generaldelta
75 80 revlogv1
76 81 sparserevlog
77 82 store
78 83 testonly-simplestore (reposimplestore !)
79 84
80 85 $ hg debugrevlog -c | grep 0x78
81 86 0x78 (x) : 2 (100.00%)
82 87 0x78 (x) : 199 (100.00%)
83 88
84 89 #endif
85 90
86 91 checking zlib options
87 92 =====================
88 93
89 94 $ hg init zlib-level-default
90 95 $ hg init zlib-level-1
91 96 $ cat << EOF >> zlib-level-1/.hg/hgrc
92 97 > [storage]
93 98 > revlog.zlib.level=1
94 99 > EOF
95 100 $ hg init zlib-level-9
96 101 $ cat << EOF >> zlib-level-9/.hg/hgrc
97 102 > [storage]
98 103 > revlog.zlib.level=9
99 104 > EOF
100 105
101 106
102 107 $ commitone() {
103 108 > repo=$1
104 109 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
105 110 > hg -R $repo add $repo/a
106 111 > hg -R $repo commit -m some-commit
107 112 > }
108 113
109 114 $ for repo in zlib-level-default zlib-level-1 zlib-level-9; do
110 115 > commitone $repo
111 116 > done
112 117
113 118 $ $RUNTESTDIR/f -s */.hg/store/data/*
114 119 default/.hg/store/data/foo.i: size=64 (pure !)
115 120 zlib-level-1/.hg/store/data/a.i: size=4146
116 121 zlib-level-9/.hg/store/data/a.i: size=4138
117 122 zlib-level-default/.hg/store/data/a.i: size=4138
118 123
119 124 Test error cases
120 125
121 126 $ hg init zlib-level-invalid
122 127 $ cat << EOF >> zlib-level-invalid/.hg/hgrc
123 128 > [storage]
124 129 > revlog.zlib.level=foobar
125 130 > EOF
126 131 $ commitone zlib-level-invalid
127 132 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
128 133 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
129 134 [255]
130 135
131 136 $ hg init zlib-level-out-of-range
132 137 $ cat << EOF >> zlib-level-out-of-range/.hg/hgrc
133 138 > [storage]
134 139 > revlog.zlib.level=42
135 140 > EOF
136 141
137 142 $ commitone zlib-level-out-of-range
138 143 abort: invalid value for `storage.revlog.zlib.level` config: 42
139 144 abort: invalid value for `storage.revlog.zlib.level` config: 42
140 145 [255]
141 146
142 147 #if zstd
143 148
144 149 checking zstd options
145 150 =====================
146 151
147 152 $ hg init zstd-level-default --config format.revlog-compression=zstd
148 153 $ hg init zstd-level-1 --config format.revlog-compression=zstd
149 154 $ cat << EOF >> zstd-level-1/.hg/hgrc
150 155 > [storage]
151 156 > revlog.zstd.level=1
152 157 > EOF
153 158 $ hg init zstd-level-22 --config format.revlog-compression=zstd
154 159 $ cat << EOF >> zstd-level-22/.hg/hgrc
155 160 > [storage]
156 161 > revlog.zstd.level=22
157 162 > EOF
158 163
159 164
160 165 $ commitone() {
161 166 > repo=$1
162 167 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
163 168 > hg -R $repo add $repo/a
164 169 > hg -R $repo commit -m some-commit
165 170 > }
166 171
167 172 $ for repo in zstd-level-default zstd-level-1 zstd-level-22; do
168 173 > commitone $repo
169 174 > done
170 175
171 176 $ $RUNTESTDIR/f -s zstd-*/.hg/store/data/*
172 177 zstd-level-1/.hg/store/data/a.i: size=4114
173 178 zstd-level-22/.hg/store/data/a.i: size=4091
174 179 zstd-level-default/\.hg/store/data/a\.i: size=(4094|4102) (re)
175 180
176 181 Test error cases
177 182
178 183 $ hg init zstd-level-invalid --config format.revlog-compression=zstd
179 184 $ cat << EOF >> zstd-level-invalid/.hg/hgrc
180 185 > [storage]
181 186 > revlog.zstd.level=foobar
182 187 > EOF
183 188 $ commitone zstd-level-invalid
184 189 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
185 190 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
186 191 [255]
187 192
188 193 $ hg init zstd-level-out-of-range --config format.revlog-compression=zstd
189 194 $ cat << EOF >> zstd-level-out-of-range/.hg/hgrc
190 195 > [storage]
191 196 > revlog.zstd.level=42
192 197 > EOF
193 198
194 199 $ commitone zstd-level-out-of-range
195 200 abort: invalid value for `storage.revlog.zstd.level` config: 42
196 201 abort: invalid value for `storage.revlog.zstd.level` config: 42
197 202 [255]
198 203
199 204 #endif
General Comments 0
You need to be logged in to leave comments. Login now