##// END OF EJS Templates
delta-find: add a way to control the number of bases tested at the same time...
marmoute -
r50552:f5f113f1 default
parent child Browse files
Show More
@@ -1,2896 +1,2901 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrite config item '%s.%s'"
25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
587 b'debug',
588 b'revlog.debug-delta',
588 b'revlog.debug-delta',
589 default=False,
589 default=False,
590 )
590 )
591 # display extra information about the bundling process
591 # display extra information about the bundling process
592 coreconfigitem(
592 coreconfigitem(
593 b'debug',
593 b'debug',
594 b'bundling-stats',
594 b'bundling-stats',
595 default=False,
595 default=False,
596 )
596 )
597 # display extra information about the unbundling process
597 # display extra information about the unbundling process
598 coreconfigitem(
598 coreconfigitem(
599 b'debug',
599 b'debug',
600 b'unbundling-stats',
600 b'unbundling-stats',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'defaults',
604 b'defaults',
605 b'.*',
605 b'.*',
606 default=None,
606 default=None,
607 generic=True,
607 generic=True,
608 )
608 )
609 coreconfigitem(
609 coreconfigitem(
610 b'devel',
610 b'devel',
611 b'all-warnings',
611 b'all-warnings',
612 default=False,
612 default=False,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'devel',
615 b'devel',
616 b'bundle2.debug',
616 b'bundle2.debug',
617 default=False,
617 default=False,
618 )
618 )
619 coreconfigitem(
619 coreconfigitem(
620 b'devel',
620 b'devel',
621 b'bundle.delta',
621 b'bundle.delta',
622 default=b'',
622 default=b'',
623 )
623 )
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'cache-vfs',
626 b'cache-vfs',
627 default=None,
627 default=None,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'check-locks',
631 b'check-locks',
632 default=False,
632 default=False,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'check-relroot',
636 b'check-relroot',
637 default=False,
637 default=False,
638 )
638 )
639 # Track copy information for all file, not just "added" one (very slow)
639 # Track copy information for all file, not just "added" one (very slow)
640 coreconfigitem(
640 coreconfigitem(
641 b'devel',
641 b'devel',
642 b'copy-tracing.trace-all-files',
642 b'copy-tracing.trace-all-files',
643 default=False,
643 default=False,
644 )
644 )
645 coreconfigitem(
645 coreconfigitem(
646 b'devel',
646 b'devel',
647 b'default-date',
647 b'default-date',
648 default=None,
648 default=None,
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'devel',
651 b'devel',
652 b'deprec-warn',
652 b'deprec-warn',
653 default=False,
653 default=False,
654 )
654 )
655 coreconfigitem(
655 coreconfigitem(
656 b'devel',
656 b'devel',
657 b'disableloaddefaultcerts',
657 b'disableloaddefaultcerts',
658 default=False,
658 default=False,
659 )
659 )
660 coreconfigitem(
660 coreconfigitem(
661 b'devel',
661 b'devel',
662 b'warn-empty-changegroup',
662 b'warn-empty-changegroup',
663 default=False,
663 default=False,
664 )
664 )
665 coreconfigitem(
665 coreconfigitem(
666 b'devel',
666 b'devel',
667 b'legacy.exchange',
667 b'legacy.exchange',
668 default=list,
668 default=list,
669 )
669 )
670 # When True, revlogs use a special reference version of the nodemap, that is not
670 # When True, revlogs use a special reference version of the nodemap, that is not
671 # performant but is "known" to behave properly.
671 # performant but is "known" to behave properly.
672 coreconfigitem(
672 coreconfigitem(
673 b'devel',
673 b'devel',
674 b'persistent-nodemap',
674 b'persistent-nodemap',
675 default=False,
675 default=False,
676 )
676 )
677 coreconfigitem(
677 coreconfigitem(
678 b'devel',
678 b'devel',
679 b'servercafile',
679 b'servercafile',
680 default=b'',
680 default=b'',
681 )
681 )
682 coreconfigitem(
682 coreconfigitem(
683 b'devel',
683 b'devel',
684 b'serverexactprotocol',
684 b'serverexactprotocol',
685 default=b'',
685 default=b'',
686 )
686 )
687 coreconfigitem(
687 coreconfigitem(
688 b'devel',
688 b'devel',
689 b'serverrequirecert',
689 b'serverrequirecert',
690 default=False,
690 default=False,
691 )
691 )
692 coreconfigitem(
692 coreconfigitem(
693 b'devel',
693 b'devel',
694 b'strip-obsmarkers',
694 b'strip-obsmarkers',
695 default=True,
695 default=True,
696 )
696 )
697 coreconfigitem(
697 coreconfigitem(
698 b'devel',
698 b'devel',
699 b'warn-config',
699 b'warn-config',
700 default=None,
700 default=None,
701 )
701 )
702 coreconfigitem(
702 coreconfigitem(
703 b'devel',
703 b'devel',
704 b'warn-config-default',
704 b'warn-config-default',
705 default=None,
705 default=None,
706 )
706 )
707 coreconfigitem(
707 coreconfigitem(
708 b'devel',
708 b'devel',
709 b'user.obsmarker',
709 b'user.obsmarker',
710 default=None,
710 default=None,
711 )
711 )
712 coreconfigitem(
712 coreconfigitem(
713 b'devel',
713 b'devel',
714 b'warn-config-unknown',
714 b'warn-config-unknown',
715 default=None,
715 default=None,
716 )
716 )
717 coreconfigitem(
717 coreconfigitem(
718 b'devel',
718 b'devel',
719 b'debug.copies',
719 b'debug.copies',
720 default=False,
720 default=False,
721 )
721 )
722 coreconfigitem(
722 coreconfigitem(
723 b'devel',
723 b'devel',
724 b'copy-tracing.multi-thread',
724 b'copy-tracing.multi-thread',
725 default=True,
725 default=True,
726 )
726 )
727 coreconfigitem(
727 coreconfigitem(
728 b'devel',
728 b'devel',
729 b'debug.extensions',
729 b'debug.extensions',
730 default=False,
730 default=False,
731 )
731 )
732 coreconfigitem(
732 coreconfigitem(
733 b'devel',
733 b'devel',
734 b'debug.repo-filters',
734 b'debug.repo-filters',
735 default=False,
735 default=False,
736 )
736 )
737 coreconfigitem(
737 coreconfigitem(
738 b'devel',
738 b'devel',
739 b'debug.peer-request',
739 b'debug.peer-request',
740 default=False,
740 default=False,
741 )
741 )
742 # If discovery.exchange-heads is False, the discovery will not start with
742 # If discovery.exchange-heads is False, the discovery will not start with
743 # remote head fetching and local head querying.
743 # remote head fetching and local head querying.
744 coreconfigitem(
744 coreconfigitem(
745 b'devel',
745 b'devel',
746 b'discovery.exchange-heads',
746 b'discovery.exchange-heads',
747 default=True,
747 default=True,
748 )
748 )
749 # If discovery.grow-sample is False, the sample size used in set discovery will
749 # If discovery.grow-sample is False, the sample size used in set discovery will
750 # not be increased through the process
750 # not be increased through the process
751 coreconfigitem(
751 coreconfigitem(
752 b'devel',
752 b'devel',
753 b'discovery.grow-sample',
753 b'discovery.grow-sample',
754 default=True,
754 default=True,
755 )
755 )
756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
756 # When discovery.grow-sample.dynamic is True, the default, the sample size is
757 # adapted to the shape of the undecided set (it is set to the max of:
757 # adapted to the shape of the undecided set (it is set to the max of:
758 # <target-size>, len(roots(undecided)), len(heads(undecided)
758 # <target-size>, len(roots(undecided)), len(heads(undecided)
759 coreconfigitem(
759 coreconfigitem(
760 b'devel',
760 b'devel',
761 b'discovery.grow-sample.dynamic',
761 b'discovery.grow-sample.dynamic',
762 default=True,
762 default=True,
763 )
763 )
764 # discovery.grow-sample.rate control the rate at which the sample grow
764 # discovery.grow-sample.rate control the rate at which the sample grow
765 coreconfigitem(
765 coreconfigitem(
766 b'devel',
766 b'devel',
767 b'discovery.grow-sample.rate',
767 b'discovery.grow-sample.rate',
768 default=1.05,
768 default=1.05,
769 )
769 )
770 # If discovery.randomize is False, random sampling during discovery are
770 # If discovery.randomize is False, random sampling during discovery are
771 # deterministic. It is meant for integration tests.
771 # deterministic. It is meant for integration tests.
772 coreconfigitem(
772 coreconfigitem(
773 b'devel',
773 b'devel',
774 b'discovery.randomize',
774 b'discovery.randomize',
775 default=True,
775 default=True,
776 )
776 )
777 # Control the initial size of the discovery sample
777 # Control the initial size of the discovery sample
778 coreconfigitem(
778 coreconfigitem(
779 b'devel',
779 b'devel',
780 b'discovery.sample-size',
780 b'discovery.sample-size',
781 default=200,
781 default=200,
782 )
782 )
783 # Control the initial size of the discovery for initial change
783 # Control the initial size of the discovery for initial change
784 coreconfigitem(
784 coreconfigitem(
785 b'devel',
785 b'devel',
786 b'discovery.sample-size.initial',
786 b'discovery.sample-size.initial',
787 default=100,
787 default=100,
788 )
788 )
789 _registerdiffopts(section=b'diff')
789 _registerdiffopts(section=b'diff')
790 coreconfigitem(
790 coreconfigitem(
791 b'diff',
791 b'diff',
792 b'merge',
792 b'merge',
793 default=False,
793 default=False,
794 experimental=True,
794 experimental=True,
795 )
795 )
796 coreconfigitem(
796 coreconfigitem(
797 b'email',
797 b'email',
798 b'bcc',
798 b'bcc',
799 default=None,
799 default=None,
800 )
800 )
801 coreconfigitem(
801 coreconfigitem(
802 b'email',
802 b'email',
803 b'cc',
803 b'cc',
804 default=None,
804 default=None,
805 )
805 )
806 coreconfigitem(
806 coreconfigitem(
807 b'email',
807 b'email',
808 b'charsets',
808 b'charsets',
809 default=list,
809 default=list,
810 )
810 )
811 coreconfigitem(
811 coreconfigitem(
812 b'email',
812 b'email',
813 b'from',
813 b'from',
814 default=None,
814 default=None,
815 )
815 )
816 coreconfigitem(
816 coreconfigitem(
817 b'email',
817 b'email',
818 b'method',
818 b'method',
819 default=b'smtp',
819 default=b'smtp',
820 )
820 )
821 coreconfigitem(
821 coreconfigitem(
822 b'email',
822 b'email',
823 b'reply-to',
823 b'reply-to',
824 default=None,
824 default=None,
825 )
825 )
826 coreconfigitem(
826 coreconfigitem(
827 b'email',
827 b'email',
828 b'to',
828 b'to',
829 default=None,
829 default=None,
830 )
830 )
831 coreconfigitem(
831 coreconfigitem(
832 b'experimental',
832 b'experimental',
833 b'archivemetatemplate',
833 b'archivemetatemplate',
834 default=dynamicdefault,
834 default=dynamicdefault,
835 )
835 )
836 coreconfigitem(
836 coreconfigitem(
837 b'experimental',
837 b'experimental',
838 b'auto-publish',
838 b'auto-publish',
839 default=b'publish',
839 default=b'publish',
840 )
840 )
841 coreconfigitem(
841 coreconfigitem(
842 b'experimental',
842 b'experimental',
843 b'bundle-phases',
843 b'bundle-phases',
844 default=False,
844 default=False,
845 )
845 )
846 coreconfigitem(
846 coreconfigitem(
847 b'experimental',
847 b'experimental',
848 b'bundle2-advertise',
848 b'bundle2-advertise',
849 default=True,
849 default=True,
850 )
850 )
851 coreconfigitem(
851 coreconfigitem(
852 b'experimental',
852 b'experimental',
853 b'bundle2-output-capture',
853 b'bundle2-output-capture',
854 default=False,
854 default=False,
855 )
855 )
856 coreconfigitem(
856 coreconfigitem(
857 b'experimental',
857 b'experimental',
858 b'bundle2.pushback',
858 b'bundle2.pushback',
859 default=False,
859 default=False,
860 )
860 )
861 coreconfigitem(
861 coreconfigitem(
862 b'experimental',
862 b'experimental',
863 b'bundle2lazylocking',
863 b'bundle2lazylocking',
864 default=False,
864 default=False,
865 )
865 )
866 coreconfigitem(
866 coreconfigitem(
867 b'experimental',
867 b'experimental',
868 b'bundlecomplevel',
868 b'bundlecomplevel',
869 default=None,
869 default=None,
870 )
870 )
871 coreconfigitem(
871 coreconfigitem(
872 b'experimental',
872 b'experimental',
873 b'bundlecomplevel.bzip2',
873 b'bundlecomplevel.bzip2',
874 default=None,
874 default=None,
875 )
875 )
876 coreconfigitem(
876 coreconfigitem(
877 b'experimental',
877 b'experimental',
878 b'bundlecomplevel.gzip',
878 b'bundlecomplevel.gzip',
879 default=None,
879 default=None,
880 )
880 )
881 coreconfigitem(
881 coreconfigitem(
882 b'experimental',
882 b'experimental',
883 b'bundlecomplevel.none',
883 b'bundlecomplevel.none',
884 default=None,
884 default=None,
885 )
885 )
886 coreconfigitem(
886 coreconfigitem(
887 b'experimental',
887 b'experimental',
888 b'bundlecomplevel.zstd',
888 b'bundlecomplevel.zstd',
889 default=None,
889 default=None,
890 )
890 )
891 coreconfigitem(
891 coreconfigitem(
892 b'experimental',
892 b'experimental',
893 b'bundlecompthreads',
893 b'bundlecompthreads',
894 default=None,
894 default=None,
895 )
895 )
896 coreconfigitem(
896 coreconfigitem(
897 b'experimental',
897 b'experimental',
898 b'bundlecompthreads.bzip2',
898 b'bundlecompthreads.bzip2',
899 default=None,
899 default=None,
900 )
900 )
901 coreconfigitem(
901 coreconfigitem(
902 b'experimental',
902 b'experimental',
903 b'bundlecompthreads.gzip',
903 b'bundlecompthreads.gzip',
904 default=None,
904 default=None,
905 )
905 )
906 coreconfigitem(
906 coreconfigitem(
907 b'experimental',
907 b'experimental',
908 b'bundlecompthreads.none',
908 b'bundlecompthreads.none',
909 default=None,
909 default=None,
910 )
910 )
911 coreconfigitem(
911 coreconfigitem(
912 b'experimental',
912 b'experimental',
913 b'bundlecompthreads.zstd',
913 b'bundlecompthreads.zstd',
914 default=None,
914 default=None,
915 )
915 )
916 coreconfigitem(
916 coreconfigitem(
917 b'experimental',
917 b'experimental',
918 b'changegroup3',
918 b'changegroup3',
919 default=False,
919 default=False,
920 )
920 )
921 coreconfigitem(
921 coreconfigitem(
922 b'experimental',
922 b'experimental',
923 b'changegroup4',
923 b'changegroup4',
924 default=False,
924 default=False,
925 )
925 )
926 coreconfigitem(
926 coreconfigitem(
927 b'experimental',
927 b'experimental',
928 b'cleanup-as-archived',
928 b'cleanup-as-archived',
929 default=False,
929 default=False,
930 )
930 )
931 coreconfigitem(
931 coreconfigitem(
932 b'experimental',
932 b'experimental',
933 b'clientcompressionengines',
933 b'clientcompressionengines',
934 default=list,
934 default=list,
935 )
935 )
936 coreconfigitem(
936 coreconfigitem(
937 b'experimental',
937 b'experimental',
938 b'copytrace',
938 b'copytrace',
939 default=b'on',
939 default=b'on',
940 )
940 )
941 coreconfigitem(
941 coreconfigitem(
942 b'experimental',
942 b'experimental',
943 b'copytrace.movecandidateslimit',
943 b'copytrace.movecandidateslimit',
944 default=100,
944 default=100,
945 )
945 )
946 coreconfigitem(
946 coreconfigitem(
947 b'experimental',
947 b'experimental',
948 b'copytrace.sourcecommitlimit',
948 b'copytrace.sourcecommitlimit',
949 default=100,
949 default=100,
950 )
950 )
951 coreconfigitem(
951 coreconfigitem(
952 b'experimental',
952 b'experimental',
953 b'copies.read-from',
953 b'copies.read-from',
954 default=b"filelog-only",
954 default=b"filelog-only",
955 )
955 )
956 coreconfigitem(
956 coreconfigitem(
957 b'experimental',
957 b'experimental',
958 b'copies.write-to',
958 b'copies.write-to',
959 default=b'filelog-only',
959 default=b'filelog-only',
960 )
960 )
961 coreconfigitem(
961 coreconfigitem(
962 b'experimental',
962 b'experimental',
963 b'crecordtest',
963 b'crecordtest',
964 default=None,
964 default=None,
965 )
965 )
966 coreconfigitem(
966 coreconfigitem(
967 b'experimental',
967 b'experimental',
968 b'directaccess',
968 b'directaccess',
969 default=False,
969 default=False,
970 )
970 )
971 coreconfigitem(
971 coreconfigitem(
972 b'experimental',
972 b'experimental',
973 b'directaccess.revnums',
973 b'directaccess.revnums',
974 default=False,
974 default=False,
975 )
975 )
976 coreconfigitem(
976 coreconfigitem(
977 b'experimental',
977 b'experimental',
978 b'editortmpinhg',
978 b'editortmpinhg',
979 default=False,
979 default=False,
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution',
983 b'evolution',
984 default=list,
984 default=list,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.allowdivergence',
988 b'evolution.allowdivergence',
989 default=False,
989 default=False,
990 alias=[(b'experimental', b'allowdivergence')],
990 alias=[(b'experimental', b'allowdivergence')],
991 )
991 )
992 coreconfigitem(
992 coreconfigitem(
993 b'experimental',
993 b'experimental',
994 b'evolution.allowunstable',
994 b'evolution.allowunstable',
995 default=None,
995 default=None,
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.createmarkers',
999 b'evolution.createmarkers',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.effect-flags',
1004 b'evolution.effect-flags',
1005 default=True,
1005 default=True,
1006 alias=[(b'experimental', b'effect-flags')],
1006 alias=[(b'experimental', b'effect-flags')],
1007 )
1007 )
1008 coreconfigitem(
1008 coreconfigitem(
1009 b'experimental',
1009 b'experimental',
1010 b'evolution.exchange',
1010 b'evolution.exchange',
1011 default=None,
1011 default=None,
1012 )
1012 )
1013 coreconfigitem(
1013 coreconfigitem(
1014 b'experimental',
1014 b'experimental',
1015 b'evolution.bundle-obsmarker',
1015 b'evolution.bundle-obsmarker',
1016 default=False,
1016 default=False,
1017 )
1017 )
1018 coreconfigitem(
1018 coreconfigitem(
1019 b'experimental',
1019 b'experimental',
1020 b'evolution.bundle-obsmarker:mandatory',
1020 b'evolution.bundle-obsmarker:mandatory',
1021 default=True,
1021 default=True,
1022 )
1022 )
1023 coreconfigitem(
1023 coreconfigitem(
1024 b'experimental',
1024 b'experimental',
1025 b'log.topo',
1025 b'log.topo',
1026 default=False,
1026 default=False,
1027 )
1027 )
1028 coreconfigitem(
1028 coreconfigitem(
1029 b'experimental',
1029 b'experimental',
1030 b'evolution.report-instabilities',
1030 b'evolution.report-instabilities',
1031 default=True,
1031 default=True,
1032 )
1032 )
1033 coreconfigitem(
1033 coreconfigitem(
1034 b'experimental',
1034 b'experimental',
1035 b'evolution.track-operation',
1035 b'evolution.track-operation',
1036 default=True,
1036 default=True,
1037 )
1037 )
1038 # repo-level config to exclude a revset visibility
1038 # repo-level config to exclude a revset visibility
1039 #
1039 #
1040 # The target use case is to use `share` to expose different subset of the same
1040 # The target use case is to use `share` to expose different subset of the same
1041 # repository, especially server side. See also `server.view`.
1041 # repository, especially server side. See also `server.view`.
1042 coreconfigitem(
1042 coreconfigitem(
1043 b'experimental',
1043 b'experimental',
1044 b'extra-filter-revs',
1044 b'extra-filter-revs',
1045 default=None,
1045 default=None,
1046 )
1046 )
1047 coreconfigitem(
1047 coreconfigitem(
1048 b'experimental',
1048 b'experimental',
1049 b'maxdeltachainspan',
1049 b'maxdeltachainspan',
1050 default=-1,
1050 default=-1,
1051 )
1051 )
1052 # tracks files which were undeleted (merge might delete them but we explicitly
1052 # tracks files which were undeleted (merge might delete them but we explicitly
1053 # kept/undeleted them) and creates new filenodes for them
1053 # kept/undeleted them) and creates new filenodes for them
1054 coreconfigitem(
1054 coreconfigitem(
1055 b'experimental',
1055 b'experimental',
1056 b'merge-track-salvaged',
1056 b'merge-track-salvaged',
1057 default=False,
1057 default=False,
1058 )
1058 )
1059 coreconfigitem(
1059 coreconfigitem(
1060 b'experimental',
1060 b'experimental',
1061 b'mmapindexthreshold',
1061 b'mmapindexthreshold',
1062 default=None,
1062 default=None,
1063 )
1063 )
1064 coreconfigitem(
1064 coreconfigitem(
1065 b'experimental',
1065 b'experimental',
1066 b'narrow',
1066 b'narrow',
1067 default=False,
1067 default=False,
1068 )
1068 )
1069 coreconfigitem(
1069 coreconfigitem(
1070 b'experimental',
1070 b'experimental',
1071 b'nonnormalparanoidcheck',
1071 b'nonnormalparanoidcheck',
1072 default=False,
1072 default=False,
1073 )
1073 )
1074 coreconfigitem(
1074 coreconfigitem(
1075 b'experimental',
1075 b'experimental',
1076 b'exportableenviron',
1076 b'exportableenviron',
1077 default=list,
1077 default=list,
1078 )
1078 )
1079 coreconfigitem(
1079 coreconfigitem(
1080 b'experimental',
1080 b'experimental',
1081 b'extendedheader.index',
1081 b'extendedheader.index',
1082 default=None,
1082 default=None,
1083 )
1083 )
1084 coreconfigitem(
1084 coreconfigitem(
1085 b'experimental',
1085 b'experimental',
1086 b'extendedheader.similarity',
1086 b'extendedheader.similarity',
1087 default=False,
1087 default=False,
1088 )
1088 )
1089 coreconfigitem(
1089 coreconfigitem(
1090 b'experimental',
1090 b'experimental',
1091 b'graphshorten',
1091 b'graphshorten',
1092 default=False,
1092 default=False,
1093 )
1093 )
1094 coreconfigitem(
1094 coreconfigitem(
1095 b'experimental',
1095 b'experimental',
1096 b'graphstyle.parent',
1096 b'graphstyle.parent',
1097 default=dynamicdefault,
1097 default=dynamicdefault,
1098 )
1098 )
1099 coreconfigitem(
1099 coreconfigitem(
1100 b'experimental',
1100 b'experimental',
1101 b'graphstyle.missing',
1101 b'graphstyle.missing',
1102 default=dynamicdefault,
1102 default=dynamicdefault,
1103 )
1103 )
1104 coreconfigitem(
1104 coreconfigitem(
1105 b'experimental',
1105 b'experimental',
1106 b'graphstyle.grandparent',
1106 b'graphstyle.grandparent',
1107 default=dynamicdefault,
1107 default=dynamicdefault,
1108 )
1108 )
1109 coreconfigitem(
1109 coreconfigitem(
1110 b'experimental',
1110 b'experimental',
1111 b'hook-track-tags',
1111 b'hook-track-tags',
1112 default=False,
1112 default=False,
1113 )
1113 )
1114 coreconfigitem(
1114 coreconfigitem(
1115 b'experimental',
1115 b'experimental',
1116 b'httppostargs',
1116 b'httppostargs',
1117 default=False,
1117 default=False,
1118 )
1118 )
1119 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1119 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1120 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1120 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1121
1121
1122 coreconfigitem(
1122 coreconfigitem(
1123 b'experimental',
1123 b'experimental',
1124 b'obsmarkers-exchange-debug',
1124 b'obsmarkers-exchange-debug',
1125 default=False,
1125 default=False,
1126 )
1126 )
1127 coreconfigitem(
1127 coreconfigitem(
1128 b'experimental',
1128 b'experimental',
1129 b'remotenames',
1129 b'remotenames',
1130 default=False,
1130 default=False,
1131 )
1131 )
1132 coreconfigitem(
1132 coreconfigitem(
1133 b'experimental',
1133 b'experimental',
1134 b'removeemptydirs',
1134 b'removeemptydirs',
1135 default=True,
1135 default=True,
1136 )
1136 )
1137 coreconfigitem(
1137 coreconfigitem(
1138 b'experimental',
1138 b'experimental',
1139 b'revert.interactive.select-to-keep',
1139 b'revert.interactive.select-to-keep',
1140 default=False,
1140 default=False,
1141 )
1141 )
1142 coreconfigitem(
1142 coreconfigitem(
1143 b'experimental',
1143 b'experimental',
1144 b'revisions.prefixhexnode',
1144 b'revisions.prefixhexnode',
1145 default=False,
1145 default=False,
1146 )
1146 )
1147 # "out of experimental" todo list.
1147 # "out of experimental" todo list.
1148 #
1148 #
1149 # * include management of a persistent nodemap in the main docket
1149 # * include management of a persistent nodemap in the main docket
1150 # * enforce a "no-truncate" policy for mmap safety
1150 # * enforce a "no-truncate" policy for mmap safety
1151 # - for censoring operation
1151 # - for censoring operation
1152 # - for stripping operation
1152 # - for stripping operation
1153 # - for rollback operation
1153 # - for rollback operation
1154 # * proper streaming (race free) of the docket file
1154 # * proper streaming (race free) of the docket file
1155 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1155 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1156 # * Exchange-wise, we will also need to do something more efficient than
1156 # * Exchange-wise, we will also need to do something more efficient than
1157 # keeping references to the affected revlogs, especially memory-wise when
1157 # keeping references to the affected revlogs, especially memory-wise when
1158 # rewriting sidedata.
1158 # rewriting sidedata.
1159 # * introduce a proper solution to reduce the number of filelog related files.
1159 # * introduce a proper solution to reduce the number of filelog related files.
1160 # * use caching for reading sidedata (similar to what we do for data).
1160 # * use caching for reading sidedata (similar to what we do for data).
1161 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1161 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1162 # * Improvement to consider
1162 # * Improvement to consider
1163 # - avoid compression header in chunk using the default compression?
1163 # - avoid compression header in chunk using the default compression?
1164 # - forbid "inline" compression mode entirely?
1164 # - forbid "inline" compression mode entirely?
1165 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1165 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1166 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1166 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1167 # - keep track of chain base or size (probably not that useful anymore)
1167 # - keep track of chain base or size (probably not that useful anymore)
1168 coreconfigitem(
1168 coreconfigitem(
1169 b'experimental',
1169 b'experimental',
1170 b'revlogv2',
1170 b'revlogv2',
1171 default=None,
1171 default=None,
1172 )
1172 )
1173 coreconfigitem(
1173 coreconfigitem(
1174 b'experimental',
1174 b'experimental',
1175 b'revisions.disambiguatewithin',
1175 b'revisions.disambiguatewithin',
1176 default=None,
1176 default=None,
1177 )
1177 )
1178 coreconfigitem(
1178 coreconfigitem(
1179 b'experimental',
1179 b'experimental',
1180 b'rust.index',
1180 b'rust.index',
1181 default=False,
1181 default=False,
1182 )
1182 )
1183 coreconfigitem(
1183 coreconfigitem(
1184 b'experimental',
1184 b'experimental',
1185 b'server.filesdata.recommended-batch-size',
1185 b'server.filesdata.recommended-batch-size',
1186 default=50000,
1186 default=50000,
1187 )
1187 )
1188 coreconfigitem(
1188 coreconfigitem(
1189 b'experimental',
1189 b'experimental',
1190 b'server.manifestdata.recommended-batch-size',
1190 b'server.manifestdata.recommended-batch-size',
1191 default=100000,
1191 default=100000,
1192 )
1192 )
1193 coreconfigitem(
1193 coreconfigitem(
1194 b'experimental',
1194 b'experimental',
1195 b'server.stream-narrow-clones',
1195 b'server.stream-narrow-clones',
1196 default=False,
1196 default=False,
1197 )
1197 )
1198 coreconfigitem(
1198 coreconfigitem(
1199 b'experimental',
1199 b'experimental',
1200 b'single-head-per-branch',
1200 b'single-head-per-branch',
1201 default=False,
1201 default=False,
1202 )
1202 )
1203 coreconfigitem(
1203 coreconfigitem(
1204 b'experimental',
1204 b'experimental',
1205 b'single-head-per-branch:account-closed-heads',
1205 b'single-head-per-branch:account-closed-heads',
1206 default=False,
1206 default=False,
1207 )
1207 )
1208 coreconfigitem(
1208 coreconfigitem(
1209 b'experimental',
1209 b'experimental',
1210 b'single-head-per-branch:public-changes-only',
1210 b'single-head-per-branch:public-changes-only',
1211 default=False,
1211 default=False,
1212 )
1212 )
1213 coreconfigitem(
1213 coreconfigitem(
1214 b'experimental',
1214 b'experimental',
1215 b'sparse-read',
1215 b'sparse-read',
1216 default=False,
1216 default=False,
1217 )
1217 )
1218 coreconfigitem(
1218 coreconfigitem(
1219 b'experimental',
1219 b'experimental',
1220 b'sparse-read.density-threshold',
1220 b'sparse-read.density-threshold',
1221 default=0.50,
1221 default=0.50,
1222 )
1222 )
1223 coreconfigitem(
1223 coreconfigitem(
1224 b'experimental',
1224 b'experimental',
1225 b'sparse-read.min-gap-size',
1225 b'sparse-read.min-gap-size',
1226 default=b'65K',
1226 default=b'65K',
1227 )
1227 )
1228 coreconfigitem(
1228 coreconfigitem(
1229 b'experimental',
1229 b'experimental',
1230 b'treemanifest',
1230 b'treemanifest',
1231 default=False,
1231 default=False,
1232 )
1232 )
1233 coreconfigitem(
1233 coreconfigitem(
1234 b'experimental',
1234 b'experimental',
1235 b'update.atomic-file',
1235 b'update.atomic-file',
1236 default=False,
1236 default=False,
1237 )
1237 )
1238 coreconfigitem(
1238 coreconfigitem(
1239 b'experimental',
1239 b'experimental',
1240 b'web.full-garbage-collection-rate',
1240 b'web.full-garbage-collection-rate',
1241 default=1, # still forcing a full collection on each request
1241 default=1, # still forcing a full collection on each request
1242 )
1242 )
1243 coreconfigitem(
1243 coreconfigitem(
1244 b'experimental',
1244 b'experimental',
1245 b'worker.wdir-get-thread-safe',
1245 b'worker.wdir-get-thread-safe',
1246 default=False,
1246 default=False,
1247 )
1247 )
1248 coreconfigitem(
1248 coreconfigitem(
1249 b'experimental',
1249 b'experimental',
1250 b'worker.repository-upgrade',
1250 b'worker.repository-upgrade',
1251 default=False,
1251 default=False,
1252 )
1252 )
1253 coreconfigitem(
1253 coreconfigitem(
1254 b'experimental',
1254 b'experimental',
1255 b'xdiff',
1255 b'xdiff',
1256 default=False,
1256 default=False,
1257 )
1257 )
1258 coreconfigitem(
1258 coreconfigitem(
1259 b'extensions',
1259 b'extensions',
1260 b'[^:]*',
1260 b'[^:]*',
1261 default=None,
1261 default=None,
1262 generic=True,
1262 generic=True,
1263 )
1263 )
1264 coreconfigitem(
1264 coreconfigitem(
1265 b'extensions',
1265 b'extensions',
1266 b'[^:]*:required',
1266 b'[^:]*:required',
1267 default=False,
1267 default=False,
1268 generic=True,
1268 generic=True,
1269 )
1269 )
1270 coreconfigitem(
1270 coreconfigitem(
1271 b'extdata',
1271 b'extdata',
1272 b'.*',
1272 b'.*',
1273 default=None,
1273 default=None,
1274 generic=True,
1274 generic=True,
1275 )
1275 )
1276 coreconfigitem(
1276 coreconfigitem(
1277 b'format',
1277 b'format',
1278 b'bookmarks-in-store',
1278 b'bookmarks-in-store',
1279 default=False,
1279 default=False,
1280 )
1280 )
1281 coreconfigitem(
1281 coreconfigitem(
1282 b'format',
1282 b'format',
1283 b'chunkcachesize',
1283 b'chunkcachesize',
1284 default=None,
1284 default=None,
1285 experimental=True,
1285 experimental=True,
1286 )
1286 )
1287 coreconfigitem(
1287 coreconfigitem(
1288 # Enable this dirstate format *when creating a new repository*.
1288 # Enable this dirstate format *when creating a new repository*.
1289 # Which format to use for existing repos is controlled by .hg/requires
1289 # Which format to use for existing repos is controlled by .hg/requires
1290 b'format',
1290 b'format',
1291 b'use-dirstate-v2',
1291 b'use-dirstate-v2',
1292 default=False,
1292 default=False,
1293 experimental=True,
1293 experimental=True,
1294 alias=[(b'format', b'exp-rc-dirstate-v2')],
1294 alias=[(b'format', b'exp-rc-dirstate-v2')],
1295 )
1295 )
1296 coreconfigitem(
1296 coreconfigitem(
1297 b'format',
1297 b'format',
1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1298 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1299 default=False,
1299 default=False,
1300 experimental=True,
1300 experimental=True,
1301 )
1301 )
1302 coreconfigitem(
1302 coreconfigitem(
1303 b'format',
1303 b'format',
1304 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1304 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1305 default=False,
1305 default=False,
1306 experimental=True,
1306 experimental=True,
1307 )
1307 )
1308 coreconfigitem(
1308 coreconfigitem(
1309 b'format',
1309 b'format',
1310 b'use-dirstate-tracked-hint',
1310 b'use-dirstate-tracked-hint',
1311 default=False,
1311 default=False,
1312 experimental=True,
1312 experimental=True,
1313 )
1313 )
1314 coreconfigitem(
1314 coreconfigitem(
1315 b'format',
1315 b'format',
1316 b'use-dirstate-tracked-hint.version',
1316 b'use-dirstate-tracked-hint.version',
1317 default=1,
1317 default=1,
1318 experimental=True,
1318 experimental=True,
1319 )
1319 )
1320 coreconfigitem(
1320 coreconfigitem(
1321 b'format',
1321 b'format',
1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1322 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1323 default=False,
1323 default=False,
1324 experimental=True,
1324 experimental=True,
1325 )
1325 )
1326 coreconfigitem(
1326 coreconfigitem(
1327 b'format',
1327 b'format',
1328 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1328 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1329 default=False,
1329 default=False,
1330 experimental=True,
1330 experimental=True,
1331 )
1331 )
1332 coreconfigitem(
1332 coreconfigitem(
1333 b'format',
1333 b'format',
1334 b'dotencode',
1334 b'dotencode',
1335 default=True,
1335 default=True,
1336 )
1336 )
1337 coreconfigitem(
1337 coreconfigitem(
1338 b'format',
1338 b'format',
1339 b'generaldelta',
1339 b'generaldelta',
1340 default=False,
1340 default=False,
1341 experimental=True,
1341 experimental=True,
1342 )
1342 )
1343 coreconfigitem(
1343 coreconfigitem(
1344 b'format',
1344 b'format',
1345 b'manifestcachesize',
1345 b'manifestcachesize',
1346 default=None,
1346 default=None,
1347 experimental=True,
1347 experimental=True,
1348 )
1348 )
1349 coreconfigitem(
1349 coreconfigitem(
1350 b'format',
1350 b'format',
1351 b'maxchainlen',
1351 b'maxchainlen',
1352 default=dynamicdefault,
1352 default=dynamicdefault,
1353 experimental=True,
1353 experimental=True,
1354 )
1354 )
1355 coreconfigitem(
1355 coreconfigitem(
1356 b'format',
1356 b'format',
1357 b'obsstore-version',
1357 b'obsstore-version',
1358 default=None,
1358 default=None,
1359 )
1359 )
1360 coreconfigitem(
1360 coreconfigitem(
1361 b'format',
1361 b'format',
1362 b'sparse-revlog',
1362 b'sparse-revlog',
1363 default=True,
1363 default=True,
1364 )
1364 )
1365 coreconfigitem(
1365 coreconfigitem(
1366 b'format',
1366 b'format',
1367 b'revlog-compression',
1367 b'revlog-compression',
1368 default=lambda: [b'zstd', b'zlib'],
1368 default=lambda: [b'zstd', b'zlib'],
1369 alias=[(b'experimental', b'format.compression')],
1369 alias=[(b'experimental', b'format.compression')],
1370 )
1370 )
1371 # Experimental TODOs:
1371 # Experimental TODOs:
1372 #
1372 #
1373 # * Same as for revlogv2 (but for the reduction of the number of files)
1373 # * Same as for revlogv2 (but for the reduction of the number of files)
1374 # * Actually computing the rank of changesets
1374 # * Actually computing the rank of changesets
1375 # * Improvement to investigate
1375 # * Improvement to investigate
1376 # - storing .hgtags fnode
1376 # - storing .hgtags fnode
1377 # - storing branch related identifier
1377 # - storing branch related identifier
1378
1378
1379 coreconfigitem(
1379 coreconfigitem(
1380 b'format',
1380 b'format',
1381 b'exp-use-changelog-v2',
1381 b'exp-use-changelog-v2',
1382 default=None,
1382 default=None,
1383 experimental=True,
1383 experimental=True,
1384 )
1384 )
1385 coreconfigitem(
1385 coreconfigitem(
1386 b'format',
1386 b'format',
1387 b'usefncache',
1387 b'usefncache',
1388 default=True,
1388 default=True,
1389 )
1389 )
1390 coreconfigitem(
1390 coreconfigitem(
1391 b'format',
1391 b'format',
1392 b'usegeneraldelta',
1392 b'usegeneraldelta',
1393 default=True,
1393 default=True,
1394 )
1394 )
1395 coreconfigitem(
1395 coreconfigitem(
1396 b'format',
1396 b'format',
1397 b'usestore',
1397 b'usestore',
1398 default=True,
1398 default=True,
1399 )
1399 )
1400
1400
1401
1401
1402 def _persistent_nodemap_default():
1402 def _persistent_nodemap_default():
1403 """compute `use-persistent-nodemap` default value
1403 """compute `use-persistent-nodemap` default value
1404
1404
1405 The feature is disabled unless a fast implementation is available.
1405 The feature is disabled unless a fast implementation is available.
1406 """
1406 """
1407 from . import policy
1407 from . import policy
1408
1408
1409 return policy.importrust('revlog') is not None
1409 return policy.importrust('revlog') is not None
1410
1410
1411
1411
1412 coreconfigitem(
1412 coreconfigitem(
1413 b'format',
1413 b'format',
1414 b'use-persistent-nodemap',
1414 b'use-persistent-nodemap',
1415 default=_persistent_nodemap_default,
1415 default=_persistent_nodemap_default,
1416 )
1416 )
1417 coreconfigitem(
1417 coreconfigitem(
1418 b'format',
1418 b'format',
1419 b'exp-use-copies-side-data-changeset',
1419 b'exp-use-copies-side-data-changeset',
1420 default=False,
1420 default=False,
1421 experimental=True,
1421 experimental=True,
1422 )
1422 )
1423 coreconfigitem(
1423 coreconfigitem(
1424 b'format',
1424 b'format',
1425 b'use-share-safe',
1425 b'use-share-safe',
1426 default=True,
1426 default=True,
1427 )
1427 )
1428 coreconfigitem(
1428 coreconfigitem(
1429 b'format',
1429 b'format',
1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1430 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1431 default=False,
1431 default=False,
1432 experimental=True,
1432 experimental=True,
1433 )
1433 )
1434 coreconfigitem(
1434 coreconfigitem(
1435 b'format',
1435 b'format',
1436 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1436 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1437 default=False,
1437 default=False,
1438 experimental=True,
1438 experimental=True,
1439 )
1439 )
1440
1440
1441 # Moving this on by default means we are confident about the scaling of phases.
1441 # Moving this on by default means we are confident about the scaling of phases.
1442 # This is not garanteed to be the case at the time this message is written.
1442 # This is not garanteed to be the case at the time this message is written.
1443 coreconfigitem(
1443 coreconfigitem(
1444 b'format',
1444 b'format',
1445 b'use-internal-phase',
1445 b'use-internal-phase',
1446 default=False,
1446 default=False,
1447 experimental=True,
1447 experimental=True,
1448 )
1448 )
1449 # The interaction between the archived phase and obsolescence markers needs to
1449 # The interaction between the archived phase and obsolescence markers needs to
1450 # be sorted out before wider usage of this are to be considered.
1450 # be sorted out before wider usage of this are to be considered.
1451 #
1451 #
1452 # At the time this message is written, behavior when archiving obsolete
1452 # At the time this message is written, behavior when archiving obsolete
1453 # changeset differ significantly from stripping. As part of stripping, we also
1453 # changeset differ significantly from stripping. As part of stripping, we also
1454 # remove the obsolescence marker associated to the stripped changesets,
1454 # remove the obsolescence marker associated to the stripped changesets,
1455 # revealing the precedecessors changesets when applicable. When archiving, we
1455 # revealing the precedecessors changesets when applicable. When archiving, we
1456 # don't touch the obsolescence markers, keeping everything hidden. This can
1456 # don't touch the obsolescence markers, keeping everything hidden. This can
1457 # result in quite confusing situation for people combining exchanging draft
1457 # result in quite confusing situation for people combining exchanging draft
1458 # with the archived phases. As some markers needed by others may be skipped
1458 # with the archived phases. As some markers needed by others may be skipped
1459 # during exchange.
1459 # during exchange.
1460 coreconfigitem(
1460 coreconfigitem(
1461 b'format',
1461 b'format',
1462 b'exp-archived-phase',
1462 b'exp-archived-phase',
1463 default=False,
1463 default=False,
1464 experimental=True,
1464 experimental=True,
1465 )
1465 )
1466 coreconfigitem(
1466 coreconfigitem(
1467 b'shelve',
1467 b'shelve',
1468 b'store',
1468 b'store',
1469 default=b'internal',
1469 default=b'internal',
1470 experimental=True,
1470 experimental=True,
1471 )
1471 )
1472 coreconfigitem(
1472 coreconfigitem(
1473 b'fsmonitor',
1473 b'fsmonitor',
1474 b'warn_when_unused',
1474 b'warn_when_unused',
1475 default=True,
1475 default=True,
1476 )
1476 )
1477 coreconfigitem(
1477 coreconfigitem(
1478 b'fsmonitor',
1478 b'fsmonitor',
1479 b'warn_update_file_count',
1479 b'warn_update_file_count',
1480 default=50000,
1480 default=50000,
1481 )
1481 )
1482 coreconfigitem(
1482 coreconfigitem(
1483 b'fsmonitor',
1483 b'fsmonitor',
1484 b'warn_update_file_count_rust',
1484 b'warn_update_file_count_rust',
1485 default=400000,
1485 default=400000,
1486 )
1486 )
1487 coreconfigitem(
1487 coreconfigitem(
1488 b'help',
1488 b'help',
1489 br'hidden-command\..*',
1489 br'hidden-command\..*',
1490 default=False,
1490 default=False,
1491 generic=True,
1491 generic=True,
1492 )
1492 )
1493 coreconfigitem(
1493 coreconfigitem(
1494 b'help',
1494 b'help',
1495 br'hidden-topic\..*',
1495 br'hidden-topic\..*',
1496 default=False,
1496 default=False,
1497 generic=True,
1497 generic=True,
1498 )
1498 )
1499 coreconfigitem(
1499 coreconfigitem(
1500 b'hooks',
1500 b'hooks',
1501 b'[^:]*',
1501 b'[^:]*',
1502 default=dynamicdefault,
1502 default=dynamicdefault,
1503 generic=True,
1503 generic=True,
1504 )
1504 )
1505 coreconfigitem(
1505 coreconfigitem(
1506 b'hooks',
1506 b'hooks',
1507 b'.*:run-with-plain',
1507 b'.*:run-with-plain',
1508 default=True,
1508 default=True,
1509 generic=True,
1509 generic=True,
1510 )
1510 )
1511 coreconfigitem(
1511 coreconfigitem(
1512 b'hgweb-paths',
1512 b'hgweb-paths',
1513 b'.*',
1513 b'.*',
1514 default=list,
1514 default=list,
1515 generic=True,
1515 generic=True,
1516 )
1516 )
1517 coreconfigitem(
1517 coreconfigitem(
1518 b'hostfingerprints',
1518 b'hostfingerprints',
1519 b'.*',
1519 b'.*',
1520 default=list,
1520 default=list,
1521 generic=True,
1521 generic=True,
1522 )
1522 )
1523 coreconfigitem(
1523 coreconfigitem(
1524 b'hostsecurity',
1524 b'hostsecurity',
1525 b'ciphers',
1525 b'ciphers',
1526 default=None,
1526 default=None,
1527 )
1527 )
1528 coreconfigitem(
1528 coreconfigitem(
1529 b'hostsecurity',
1529 b'hostsecurity',
1530 b'minimumprotocol',
1530 b'minimumprotocol',
1531 default=dynamicdefault,
1531 default=dynamicdefault,
1532 )
1532 )
1533 coreconfigitem(
1533 coreconfigitem(
1534 b'hostsecurity',
1534 b'hostsecurity',
1535 b'.*:minimumprotocol$',
1535 b'.*:minimumprotocol$',
1536 default=dynamicdefault,
1536 default=dynamicdefault,
1537 generic=True,
1537 generic=True,
1538 )
1538 )
1539 coreconfigitem(
1539 coreconfigitem(
1540 b'hostsecurity',
1540 b'hostsecurity',
1541 b'.*:ciphers$',
1541 b'.*:ciphers$',
1542 default=dynamicdefault,
1542 default=dynamicdefault,
1543 generic=True,
1543 generic=True,
1544 )
1544 )
1545 coreconfigitem(
1545 coreconfigitem(
1546 b'hostsecurity',
1546 b'hostsecurity',
1547 b'.*:fingerprints$',
1547 b'.*:fingerprints$',
1548 default=list,
1548 default=list,
1549 generic=True,
1549 generic=True,
1550 )
1550 )
1551 coreconfigitem(
1551 coreconfigitem(
1552 b'hostsecurity',
1552 b'hostsecurity',
1553 b'.*:verifycertsfile$',
1553 b'.*:verifycertsfile$',
1554 default=None,
1554 default=None,
1555 generic=True,
1555 generic=True,
1556 )
1556 )
1557
1557
1558 coreconfigitem(
1558 coreconfigitem(
1559 b'http_proxy',
1559 b'http_proxy',
1560 b'always',
1560 b'always',
1561 default=False,
1561 default=False,
1562 )
1562 )
1563 coreconfigitem(
1563 coreconfigitem(
1564 b'http_proxy',
1564 b'http_proxy',
1565 b'host',
1565 b'host',
1566 default=None,
1566 default=None,
1567 )
1567 )
1568 coreconfigitem(
1568 coreconfigitem(
1569 b'http_proxy',
1569 b'http_proxy',
1570 b'no',
1570 b'no',
1571 default=list,
1571 default=list,
1572 )
1572 )
1573 coreconfigitem(
1573 coreconfigitem(
1574 b'http_proxy',
1574 b'http_proxy',
1575 b'passwd',
1575 b'passwd',
1576 default=None,
1576 default=None,
1577 )
1577 )
1578 coreconfigitem(
1578 coreconfigitem(
1579 b'http_proxy',
1579 b'http_proxy',
1580 b'user',
1580 b'user',
1581 default=None,
1581 default=None,
1582 )
1582 )
1583
1583
1584 coreconfigitem(
1584 coreconfigitem(
1585 b'http',
1585 b'http',
1586 b'timeout',
1586 b'timeout',
1587 default=None,
1587 default=None,
1588 )
1588 )
1589
1589
1590 coreconfigitem(
1590 coreconfigitem(
1591 b'logtoprocess',
1591 b'logtoprocess',
1592 b'commandexception',
1592 b'commandexception',
1593 default=None,
1593 default=None,
1594 )
1594 )
1595 coreconfigitem(
1595 coreconfigitem(
1596 b'logtoprocess',
1596 b'logtoprocess',
1597 b'commandfinish',
1597 b'commandfinish',
1598 default=None,
1598 default=None,
1599 )
1599 )
1600 coreconfigitem(
1600 coreconfigitem(
1601 b'logtoprocess',
1601 b'logtoprocess',
1602 b'command',
1602 b'command',
1603 default=None,
1603 default=None,
1604 )
1604 )
1605 coreconfigitem(
1605 coreconfigitem(
1606 b'logtoprocess',
1606 b'logtoprocess',
1607 b'develwarn',
1607 b'develwarn',
1608 default=None,
1608 default=None,
1609 )
1609 )
1610 coreconfigitem(
1610 coreconfigitem(
1611 b'logtoprocess',
1611 b'logtoprocess',
1612 b'uiblocked',
1612 b'uiblocked',
1613 default=None,
1613 default=None,
1614 )
1614 )
1615 coreconfigitem(
1615 coreconfigitem(
1616 b'merge',
1616 b'merge',
1617 b'checkunknown',
1617 b'checkunknown',
1618 default=b'abort',
1618 default=b'abort',
1619 )
1619 )
1620 coreconfigitem(
1620 coreconfigitem(
1621 b'merge',
1621 b'merge',
1622 b'checkignored',
1622 b'checkignored',
1623 default=b'abort',
1623 default=b'abort',
1624 )
1624 )
1625 coreconfigitem(
1625 coreconfigitem(
1626 b'experimental',
1626 b'experimental',
1627 b'merge.checkpathconflicts',
1627 b'merge.checkpathconflicts',
1628 default=False,
1628 default=False,
1629 )
1629 )
1630 coreconfigitem(
1630 coreconfigitem(
1631 b'merge',
1631 b'merge',
1632 b'followcopies',
1632 b'followcopies',
1633 default=True,
1633 default=True,
1634 )
1634 )
1635 coreconfigitem(
1635 coreconfigitem(
1636 b'merge',
1636 b'merge',
1637 b'on-failure',
1637 b'on-failure',
1638 default=b'continue',
1638 default=b'continue',
1639 )
1639 )
1640 coreconfigitem(
1640 coreconfigitem(
1641 b'merge',
1641 b'merge',
1642 b'preferancestor',
1642 b'preferancestor',
1643 default=lambda: [b'*'],
1643 default=lambda: [b'*'],
1644 experimental=True,
1644 experimental=True,
1645 )
1645 )
1646 coreconfigitem(
1646 coreconfigitem(
1647 b'merge',
1647 b'merge',
1648 b'strict-capability-check',
1648 b'strict-capability-check',
1649 default=False,
1649 default=False,
1650 )
1650 )
1651 coreconfigitem(
1651 coreconfigitem(
1652 b'merge',
1652 b'merge',
1653 b'disable-partial-tools',
1653 b'disable-partial-tools',
1654 default=False,
1654 default=False,
1655 experimental=True,
1655 experimental=True,
1656 )
1656 )
1657 coreconfigitem(
1657 coreconfigitem(
1658 b'partial-merge-tools',
1658 b'partial-merge-tools',
1659 b'.*',
1659 b'.*',
1660 default=None,
1660 default=None,
1661 generic=True,
1661 generic=True,
1662 experimental=True,
1662 experimental=True,
1663 )
1663 )
1664 coreconfigitem(
1664 coreconfigitem(
1665 b'partial-merge-tools',
1665 b'partial-merge-tools',
1666 br'.*\.patterns',
1666 br'.*\.patterns',
1667 default=dynamicdefault,
1667 default=dynamicdefault,
1668 generic=True,
1668 generic=True,
1669 priority=-1,
1669 priority=-1,
1670 experimental=True,
1670 experimental=True,
1671 )
1671 )
1672 coreconfigitem(
1672 coreconfigitem(
1673 b'partial-merge-tools',
1673 b'partial-merge-tools',
1674 br'.*\.executable$',
1674 br'.*\.executable$',
1675 default=dynamicdefault,
1675 default=dynamicdefault,
1676 generic=True,
1676 generic=True,
1677 priority=-1,
1677 priority=-1,
1678 experimental=True,
1678 experimental=True,
1679 )
1679 )
1680 coreconfigitem(
1680 coreconfigitem(
1681 b'partial-merge-tools',
1681 b'partial-merge-tools',
1682 br'.*\.order',
1682 br'.*\.order',
1683 default=0,
1683 default=0,
1684 generic=True,
1684 generic=True,
1685 priority=-1,
1685 priority=-1,
1686 experimental=True,
1686 experimental=True,
1687 )
1687 )
1688 coreconfigitem(
1688 coreconfigitem(
1689 b'partial-merge-tools',
1689 b'partial-merge-tools',
1690 br'.*\.args',
1690 br'.*\.args',
1691 default=b"$local $base $other",
1691 default=b"$local $base $other",
1692 generic=True,
1692 generic=True,
1693 priority=-1,
1693 priority=-1,
1694 experimental=True,
1694 experimental=True,
1695 )
1695 )
1696 coreconfigitem(
1696 coreconfigitem(
1697 b'partial-merge-tools',
1697 b'partial-merge-tools',
1698 br'.*\.disable',
1698 br'.*\.disable',
1699 default=False,
1699 default=False,
1700 generic=True,
1700 generic=True,
1701 priority=-1,
1701 priority=-1,
1702 experimental=True,
1702 experimental=True,
1703 )
1703 )
1704 coreconfigitem(
1704 coreconfigitem(
1705 b'merge-tools',
1705 b'merge-tools',
1706 b'.*',
1706 b'.*',
1707 default=None,
1707 default=None,
1708 generic=True,
1708 generic=True,
1709 )
1709 )
1710 coreconfigitem(
1710 coreconfigitem(
1711 b'merge-tools',
1711 b'merge-tools',
1712 br'.*\.args$',
1712 br'.*\.args$',
1713 default=b"$local $base $other",
1713 default=b"$local $base $other",
1714 generic=True,
1714 generic=True,
1715 priority=-1,
1715 priority=-1,
1716 )
1716 )
1717 coreconfigitem(
1717 coreconfigitem(
1718 b'merge-tools',
1718 b'merge-tools',
1719 br'.*\.binary$',
1719 br'.*\.binary$',
1720 default=False,
1720 default=False,
1721 generic=True,
1721 generic=True,
1722 priority=-1,
1722 priority=-1,
1723 )
1723 )
1724 coreconfigitem(
1724 coreconfigitem(
1725 b'merge-tools',
1725 b'merge-tools',
1726 br'.*\.check$',
1726 br'.*\.check$',
1727 default=list,
1727 default=list,
1728 generic=True,
1728 generic=True,
1729 priority=-1,
1729 priority=-1,
1730 )
1730 )
1731 coreconfigitem(
1731 coreconfigitem(
1732 b'merge-tools',
1732 b'merge-tools',
1733 br'.*\.checkchanged$',
1733 br'.*\.checkchanged$',
1734 default=False,
1734 default=False,
1735 generic=True,
1735 generic=True,
1736 priority=-1,
1736 priority=-1,
1737 )
1737 )
1738 coreconfigitem(
1738 coreconfigitem(
1739 b'merge-tools',
1739 b'merge-tools',
1740 br'.*\.executable$',
1740 br'.*\.executable$',
1741 default=dynamicdefault,
1741 default=dynamicdefault,
1742 generic=True,
1742 generic=True,
1743 priority=-1,
1743 priority=-1,
1744 )
1744 )
1745 coreconfigitem(
1745 coreconfigitem(
1746 b'merge-tools',
1746 b'merge-tools',
1747 br'.*\.fixeol$',
1747 br'.*\.fixeol$',
1748 default=False,
1748 default=False,
1749 generic=True,
1749 generic=True,
1750 priority=-1,
1750 priority=-1,
1751 )
1751 )
1752 coreconfigitem(
1752 coreconfigitem(
1753 b'merge-tools',
1753 b'merge-tools',
1754 br'.*\.gui$',
1754 br'.*\.gui$',
1755 default=False,
1755 default=False,
1756 generic=True,
1756 generic=True,
1757 priority=-1,
1757 priority=-1,
1758 )
1758 )
1759 coreconfigitem(
1759 coreconfigitem(
1760 b'merge-tools',
1760 b'merge-tools',
1761 br'.*\.mergemarkers$',
1761 br'.*\.mergemarkers$',
1762 default=b'basic',
1762 default=b'basic',
1763 generic=True,
1763 generic=True,
1764 priority=-1,
1764 priority=-1,
1765 )
1765 )
1766 coreconfigitem(
1766 coreconfigitem(
1767 b'merge-tools',
1767 b'merge-tools',
1768 br'.*\.mergemarkertemplate$',
1768 br'.*\.mergemarkertemplate$',
1769 default=dynamicdefault, # take from command-templates.mergemarker
1769 default=dynamicdefault, # take from command-templates.mergemarker
1770 generic=True,
1770 generic=True,
1771 priority=-1,
1771 priority=-1,
1772 )
1772 )
1773 coreconfigitem(
1773 coreconfigitem(
1774 b'merge-tools',
1774 b'merge-tools',
1775 br'.*\.priority$',
1775 br'.*\.priority$',
1776 default=0,
1776 default=0,
1777 generic=True,
1777 generic=True,
1778 priority=-1,
1778 priority=-1,
1779 )
1779 )
1780 coreconfigitem(
1780 coreconfigitem(
1781 b'merge-tools',
1781 b'merge-tools',
1782 br'.*\.premerge$',
1782 br'.*\.premerge$',
1783 default=dynamicdefault,
1783 default=dynamicdefault,
1784 generic=True,
1784 generic=True,
1785 priority=-1,
1785 priority=-1,
1786 )
1786 )
1787 coreconfigitem(
1787 coreconfigitem(
1788 b'merge-tools',
1788 b'merge-tools',
1789 br'.*\.regappend$',
1789 br'.*\.regappend$',
1790 default=b"",
1790 default=b"",
1791 generic=True,
1791 generic=True,
1792 priority=-1,
1792 priority=-1,
1793 )
1793 )
1794 coreconfigitem(
1794 coreconfigitem(
1795 b'merge-tools',
1795 b'merge-tools',
1796 br'.*\.symlink$',
1796 br'.*\.symlink$',
1797 default=False,
1797 default=False,
1798 generic=True,
1798 generic=True,
1799 priority=-1,
1799 priority=-1,
1800 )
1800 )
1801 coreconfigitem(
1801 coreconfigitem(
1802 b'pager',
1802 b'pager',
1803 b'attend-.*',
1803 b'attend-.*',
1804 default=dynamicdefault,
1804 default=dynamicdefault,
1805 generic=True,
1805 generic=True,
1806 )
1806 )
1807 coreconfigitem(
1807 coreconfigitem(
1808 b'pager',
1808 b'pager',
1809 b'ignore',
1809 b'ignore',
1810 default=list,
1810 default=list,
1811 )
1811 )
1812 coreconfigitem(
1812 coreconfigitem(
1813 b'pager',
1813 b'pager',
1814 b'pager',
1814 b'pager',
1815 default=dynamicdefault,
1815 default=dynamicdefault,
1816 )
1816 )
1817 coreconfigitem(
1817 coreconfigitem(
1818 b'patch',
1818 b'patch',
1819 b'eol',
1819 b'eol',
1820 default=b'strict',
1820 default=b'strict',
1821 )
1821 )
1822 coreconfigitem(
1822 coreconfigitem(
1823 b'patch',
1823 b'patch',
1824 b'fuzz',
1824 b'fuzz',
1825 default=2,
1825 default=2,
1826 )
1826 )
1827 coreconfigitem(
1827 coreconfigitem(
1828 b'paths',
1828 b'paths',
1829 b'default',
1829 b'default',
1830 default=None,
1830 default=None,
1831 )
1831 )
1832 coreconfigitem(
1832 coreconfigitem(
1833 b'paths',
1833 b'paths',
1834 b'default-push',
1834 b'default-push',
1835 default=None,
1835 default=None,
1836 )
1836 )
1837 coreconfigitem(
1837 coreconfigitem(
1838 b'paths',
1838 b'paths',
1839 b'.*',
1839 b'.*',
1840 default=None,
1840 default=None,
1841 generic=True,
1841 generic=True,
1842 )
1842 )
1843 coreconfigitem(
1843 coreconfigitem(
1844 b'paths',
1844 b'paths',
1845 b'.*:bookmarks.mode',
1845 b'.*:bookmarks.mode',
1846 default='default',
1846 default='default',
1847 generic=True,
1847 generic=True,
1848 )
1848 )
1849 coreconfigitem(
1849 coreconfigitem(
1850 b'paths',
1850 b'paths',
1851 b'.*:multi-urls',
1851 b'.*:multi-urls',
1852 default=False,
1852 default=False,
1853 generic=True,
1853 generic=True,
1854 )
1854 )
1855 coreconfigitem(
1855 coreconfigitem(
1856 b'paths',
1856 b'paths',
1857 b'.*:pushrev',
1857 b'.*:pushrev',
1858 default=None,
1858 default=None,
1859 generic=True,
1859 generic=True,
1860 )
1860 )
1861 coreconfigitem(
1861 coreconfigitem(
1862 b'paths',
1862 b'paths',
1863 b'.*:pushurl',
1863 b'.*:pushurl',
1864 default=None,
1864 default=None,
1865 generic=True,
1865 generic=True,
1866 )
1866 )
1867 coreconfigitem(
1867 coreconfigitem(
1868 b'phases',
1868 b'phases',
1869 b'checksubrepos',
1869 b'checksubrepos',
1870 default=b'follow',
1870 default=b'follow',
1871 )
1871 )
1872 coreconfigitem(
1872 coreconfigitem(
1873 b'phases',
1873 b'phases',
1874 b'new-commit',
1874 b'new-commit',
1875 default=b'draft',
1875 default=b'draft',
1876 )
1876 )
1877 coreconfigitem(
1877 coreconfigitem(
1878 b'phases',
1878 b'phases',
1879 b'publish',
1879 b'publish',
1880 default=True,
1880 default=True,
1881 )
1881 )
1882 coreconfigitem(
1882 coreconfigitem(
1883 b'profiling',
1883 b'profiling',
1884 b'enabled',
1884 b'enabled',
1885 default=False,
1885 default=False,
1886 )
1886 )
1887 coreconfigitem(
1887 coreconfigitem(
1888 b'profiling',
1888 b'profiling',
1889 b'format',
1889 b'format',
1890 default=b'text',
1890 default=b'text',
1891 )
1891 )
1892 coreconfigitem(
1892 coreconfigitem(
1893 b'profiling',
1893 b'profiling',
1894 b'freq',
1894 b'freq',
1895 default=1000,
1895 default=1000,
1896 )
1896 )
1897 coreconfigitem(
1897 coreconfigitem(
1898 b'profiling',
1898 b'profiling',
1899 b'limit',
1899 b'limit',
1900 default=30,
1900 default=30,
1901 )
1901 )
1902 coreconfigitem(
1902 coreconfigitem(
1903 b'profiling',
1903 b'profiling',
1904 b'nested',
1904 b'nested',
1905 default=0,
1905 default=0,
1906 )
1906 )
1907 coreconfigitem(
1907 coreconfigitem(
1908 b'profiling',
1908 b'profiling',
1909 b'output',
1909 b'output',
1910 default=None,
1910 default=None,
1911 )
1911 )
1912 coreconfigitem(
1912 coreconfigitem(
1913 b'profiling',
1913 b'profiling',
1914 b'showmax',
1914 b'showmax',
1915 default=0.999,
1915 default=0.999,
1916 )
1916 )
1917 coreconfigitem(
1917 coreconfigitem(
1918 b'profiling',
1918 b'profiling',
1919 b'showmin',
1919 b'showmin',
1920 default=dynamicdefault,
1920 default=dynamicdefault,
1921 )
1921 )
1922 coreconfigitem(
1922 coreconfigitem(
1923 b'profiling',
1923 b'profiling',
1924 b'showtime',
1924 b'showtime',
1925 default=True,
1925 default=True,
1926 )
1926 )
1927 coreconfigitem(
1927 coreconfigitem(
1928 b'profiling',
1928 b'profiling',
1929 b'sort',
1929 b'sort',
1930 default=b'inlinetime',
1930 default=b'inlinetime',
1931 )
1931 )
1932 coreconfigitem(
1932 coreconfigitem(
1933 b'profiling',
1933 b'profiling',
1934 b'statformat',
1934 b'statformat',
1935 default=b'hotpath',
1935 default=b'hotpath',
1936 )
1936 )
1937 coreconfigitem(
1937 coreconfigitem(
1938 b'profiling',
1938 b'profiling',
1939 b'time-track',
1939 b'time-track',
1940 default=dynamicdefault,
1940 default=dynamicdefault,
1941 )
1941 )
1942 coreconfigitem(
1942 coreconfigitem(
1943 b'profiling',
1943 b'profiling',
1944 b'type',
1944 b'type',
1945 default=b'stat',
1945 default=b'stat',
1946 )
1946 )
1947 coreconfigitem(
1947 coreconfigitem(
1948 b'progress',
1948 b'progress',
1949 b'assume-tty',
1949 b'assume-tty',
1950 default=False,
1950 default=False,
1951 )
1951 )
1952 coreconfigitem(
1952 coreconfigitem(
1953 b'progress',
1953 b'progress',
1954 b'changedelay',
1954 b'changedelay',
1955 default=1,
1955 default=1,
1956 )
1956 )
1957 coreconfigitem(
1957 coreconfigitem(
1958 b'progress',
1958 b'progress',
1959 b'clear-complete',
1959 b'clear-complete',
1960 default=True,
1960 default=True,
1961 )
1961 )
1962 coreconfigitem(
1962 coreconfigitem(
1963 b'progress',
1963 b'progress',
1964 b'debug',
1964 b'debug',
1965 default=False,
1965 default=False,
1966 )
1966 )
1967 coreconfigitem(
1967 coreconfigitem(
1968 b'progress',
1968 b'progress',
1969 b'delay',
1969 b'delay',
1970 default=3,
1970 default=3,
1971 )
1971 )
1972 coreconfigitem(
1972 coreconfigitem(
1973 b'progress',
1973 b'progress',
1974 b'disable',
1974 b'disable',
1975 default=False,
1975 default=False,
1976 )
1976 )
1977 coreconfigitem(
1977 coreconfigitem(
1978 b'progress',
1978 b'progress',
1979 b'estimateinterval',
1979 b'estimateinterval',
1980 default=60.0,
1980 default=60.0,
1981 )
1981 )
1982 coreconfigitem(
1982 coreconfigitem(
1983 b'progress',
1983 b'progress',
1984 b'format',
1984 b'format',
1985 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1985 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1986 )
1986 )
1987 coreconfigitem(
1987 coreconfigitem(
1988 b'progress',
1988 b'progress',
1989 b'refresh',
1989 b'refresh',
1990 default=0.1,
1990 default=0.1,
1991 )
1991 )
1992 coreconfigitem(
1992 coreconfigitem(
1993 b'progress',
1993 b'progress',
1994 b'width',
1994 b'width',
1995 default=dynamicdefault,
1995 default=dynamicdefault,
1996 )
1996 )
1997 coreconfigitem(
1997 coreconfigitem(
1998 b'pull',
1998 b'pull',
1999 b'confirm',
1999 b'confirm',
2000 default=False,
2000 default=False,
2001 )
2001 )
2002 coreconfigitem(
2002 coreconfigitem(
2003 b'push',
2003 b'push',
2004 b'pushvars.server',
2004 b'pushvars.server',
2005 default=False,
2005 default=False,
2006 )
2006 )
2007 coreconfigitem(
2007 coreconfigitem(
2008 b'rewrite',
2008 b'rewrite',
2009 b'backup-bundle',
2009 b'backup-bundle',
2010 default=True,
2010 default=True,
2011 alias=[(b'ui', b'history-editing-backup')],
2011 alias=[(b'ui', b'history-editing-backup')],
2012 )
2012 )
2013 coreconfigitem(
2013 coreconfigitem(
2014 b'rewrite',
2014 b'rewrite',
2015 b'update-timestamp',
2015 b'update-timestamp',
2016 default=False,
2016 default=False,
2017 )
2017 )
2018 coreconfigitem(
2018 coreconfigitem(
2019 b'rewrite',
2019 b'rewrite',
2020 b'empty-successor',
2020 b'empty-successor',
2021 default=b'skip',
2021 default=b'skip',
2022 experimental=True,
2022 experimental=True,
2023 )
2023 )
2024 # experimental as long as format.use-dirstate-v2 is.
2024 # experimental as long as format.use-dirstate-v2 is.
2025 coreconfigitem(
2025 coreconfigitem(
2026 b'storage',
2026 b'storage',
2027 b'dirstate-v2.slow-path',
2027 b'dirstate-v2.slow-path',
2028 default=b"abort",
2028 default=b"abort",
2029 experimental=True,
2029 experimental=True,
2030 )
2030 )
2031 coreconfigitem(
2031 coreconfigitem(
2032 b'storage',
2032 b'storage',
2033 b'new-repo-backend',
2033 b'new-repo-backend',
2034 default=b'revlogv1',
2034 default=b'revlogv1',
2035 experimental=True,
2035 experimental=True,
2036 )
2036 )
2037 coreconfigitem(
2037 coreconfigitem(
2038 b'storage',
2038 b'storage',
2039 b'revlog.optimize-delta-parent-choice',
2039 b'revlog.optimize-delta-parent-choice',
2040 default=True,
2040 default=True,
2041 alias=[(b'format', b'aggressivemergedeltas')],
2041 alias=[(b'format', b'aggressivemergedeltas')],
2042 )
2042 )
2043 coreconfigitem(
2043 coreconfigitem(
2044 b'storage',
2044 b'storage',
2045 b'revlog.delta-parent-search.candidate-group-chunk-size',
2046 default=0,
2047 )
2048 coreconfigitem(
2049 b'storage',
2045 b'revlog.issue6528.fix-incoming',
2050 b'revlog.issue6528.fix-incoming',
2046 default=True,
2051 default=True,
2047 )
2052 )
2048 # experimental as long as rust is experimental (or a C version is implemented)
2053 # experimental as long as rust is experimental (or a C version is implemented)
2049 coreconfigitem(
2054 coreconfigitem(
2050 b'storage',
2055 b'storage',
2051 b'revlog.persistent-nodemap.mmap',
2056 b'revlog.persistent-nodemap.mmap',
2052 default=True,
2057 default=True,
2053 )
2058 )
2054 # experimental as long as format.use-persistent-nodemap is.
2059 # experimental as long as format.use-persistent-nodemap is.
2055 coreconfigitem(
2060 coreconfigitem(
2056 b'storage',
2061 b'storage',
2057 b'revlog.persistent-nodemap.slow-path',
2062 b'revlog.persistent-nodemap.slow-path',
2058 default=b"abort",
2063 default=b"abort",
2059 )
2064 )
2060
2065
2061 coreconfigitem(
2066 coreconfigitem(
2062 b'storage',
2067 b'storage',
2063 b'revlog.reuse-external-delta',
2068 b'revlog.reuse-external-delta',
2064 default=True,
2069 default=True,
2065 )
2070 )
2066 coreconfigitem(
2071 coreconfigitem(
2067 b'storage',
2072 b'storage',
2068 b'revlog.reuse-external-delta-parent',
2073 b'revlog.reuse-external-delta-parent',
2069 default=None,
2074 default=None,
2070 )
2075 )
2071 coreconfigitem(
2076 coreconfigitem(
2072 b'storage',
2077 b'storage',
2073 b'revlog.zlib.level',
2078 b'revlog.zlib.level',
2074 default=None,
2079 default=None,
2075 )
2080 )
2076 coreconfigitem(
2081 coreconfigitem(
2077 b'storage',
2082 b'storage',
2078 b'revlog.zstd.level',
2083 b'revlog.zstd.level',
2079 default=None,
2084 default=None,
2080 )
2085 )
2081 coreconfigitem(
2086 coreconfigitem(
2082 b'server',
2087 b'server',
2083 b'bookmarks-pushkey-compat',
2088 b'bookmarks-pushkey-compat',
2084 default=True,
2089 default=True,
2085 )
2090 )
2086 coreconfigitem(
2091 coreconfigitem(
2087 b'server',
2092 b'server',
2088 b'bundle1',
2093 b'bundle1',
2089 default=True,
2094 default=True,
2090 )
2095 )
2091 coreconfigitem(
2096 coreconfigitem(
2092 b'server',
2097 b'server',
2093 b'bundle1gd',
2098 b'bundle1gd',
2094 default=None,
2099 default=None,
2095 )
2100 )
2096 coreconfigitem(
2101 coreconfigitem(
2097 b'server',
2102 b'server',
2098 b'bundle1.pull',
2103 b'bundle1.pull',
2099 default=None,
2104 default=None,
2100 )
2105 )
2101 coreconfigitem(
2106 coreconfigitem(
2102 b'server',
2107 b'server',
2103 b'bundle1gd.pull',
2108 b'bundle1gd.pull',
2104 default=None,
2109 default=None,
2105 )
2110 )
2106 coreconfigitem(
2111 coreconfigitem(
2107 b'server',
2112 b'server',
2108 b'bundle1.push',
2113 b'bundle1.push',
2109 default=None,
2114 default=None,
2110 )
2115 )
2111 coreconfigitem(
2116 coreconfigitem(
2112 b'server',
2117 b'server',
2113 b'bundle1gd.push',
2118 b'bundle1gd.push',
2114 default=None,
2119 default=None,
2115 )
2120 )
2116 coreconfigitem(
2121 coreconfigitem(
2117 b'server',
2122 b'server',
2118 b'bundle2.stream',
2123 b'bundle2.stream',
2119 default=True,
2124 default=True,
2120 alias=[(b'experimental', b'bundle2.stream')],
2125 alias=[(b'experimental', b'bundle2.stream')],
2121 )
2126 )
2122 coreconfigitem(
2127 coreconfigitem(
2123 b'server',
2128 b'server',
2124 b'compressionengines',
2129 b'compressionengines',
2125 default=list,
2130 default=list,
2126 )
2131 )
2127 coreconfigitem(
2132 coreconfigitem(
2128 b'server',
2133 b'server',
2129 b'concurrent-push-mode',
2134 b'concurrent-push-mode',
2130 default=b'check-related',
2135 default=b'check-related',
2131 )
2136 )
2132 coreconfigitem(
2137 coreconfigitem(
2133 b'server',
2138 b'server',
2134 b'disablefullbundle',
2139 b'disablefullbundle',
2135 default=False,
2140 default=False,
2136 )
2141 )
2137 coreconfigitem(
2142 coreconfigitem(
2138 b'server',
2143 b'server',
2139 b'maxhttpheaderlen',
2144 b'maxhttpheaderlen',
2140 default=1024,
2145 default=1024,
2141 )
2146 )
2142 coreconfigitem(
2147 coreconfigitem(
2143 b'server',
2148 b'server',
2144 b'pullbundle',
2149 b'pullbundle',
2145 default=True,
2150 default=True,
2146 )
2151 )
2147 coreconfigitem(
2152 coreconfigitem(
2148 b'server',
2153 b'server',
2149 b'preferuncompressed',
2154 b'preferuncompressed',
2150 default=False,
2155 default=False,
2151 )
2156 )
2152 coreconfigitem(
2157 coreconfigitem(
2153 b'server',
2158 b'server',
2154 b'streamunbundle',
2159 b'streamunbundle',
2155 default=False,
2160 default=False,
2156 )
2161 )
2157 coreconfigitem(
2162 coreconfigitem(
2158 b'server',
2163 b'server',
2159 b'uncompressed',
2164 b'uncompressed',
2160 default=True,
2165 default=True,
2161 )
2166 )
2162 coreconfigitem(
2167 coreconfigitem(
2163 b'server',
2168 b'server',
2164 b'uncompressedallowsecret',
2169 b'uncompressedallowsecret',
2165 default=False,
2170 default=False,
2166 )
2171 )
2167 coreconfigitem(
2172 coreconfigitem(
2168 b'server',
2173 b'server',
2169 b'view',
2174 b'view',
2170 default=b'served',
2175 default=b'served',
2171 )
2176 )
2172 coreconfigitem(
2177 coreconfigitem(
2173 b'server',
2178 b'server',
2174 b'validate',
2179 b'validate',
2175 default=False,
2180 default=False,
2176 )
2181 )
2177 coreconfigitem(
2182 coreconfigitem(
2178 b'server',
2183 b'server',
2179 b'zliblevel',
2184 b'zliblevel',
2180 default=-1,
2185 default=-1,
2181 )
2186 )
2182 coreconfigitem(
2187 coreconfigitem(
2183 b'server',
2188 b'server',
2184 b'zstdlevel',
2189 b'zstdlevel',
2185 default=3,
2190 default=3,
2186 )
2191 )
2187 coreconfigitem(
2192 coreconfigitem(
2188 b'share',
2193 b'share',
2189 b'pool',
2194 b'pool',
2190 default=None,
2195 default=None,
2191 )
2196 )
2192 coreconfigitem(
2197 coreconfigitem(
2193 b'share',
2198 b'share',
2194 b'poolnaming',
2199 b'poolnaming',
2195 default=b'identity',
2200 default=b'identity',
2196 )
2201 )
2197 coreconfigitem(
2202 coreconfigitem(
2198 b'share',
2203 b'share',
2199 b'safe-mismatch.source-not-safe',
2204 b'safe-mismatch.source-not-safe',
2200 default=b'abort',
2205 default=b'abort',
2201 )
2206 )
2202 coreconfigitem(
2207 coreconfigitem(
2203 b'share',
2208 b'share',
2204 b'safe-mismatch.source-safe',
2209 b'safe-mismatch.source-safe',
2205 default=b'abort',
2210 default=b'abort',
2206 )
2211 )
2207 coreconfigitem(
2212 coreconfigitem(
2208 b'share',
2213 b'share',
2209 b'safe-mismatch.source-not-safe.warn',
2214 b'safe-mismatch.source-not-safe.warn',
2210 default=True,
2215 default=True,
2211 )
2216 )
2212 coreconfigitem(
2217 coreconfigitem(
2213 b'share',
2218 b'share',
2214 b'safe-mismatch.source-safe.warn',
2219 b'safe-mismatch.source-safe.warn',
2215 default=True,
2220 default=True,
2216 )
2221 )
2217 coreconfigitem(
2222 coreconfigitem(
2218 b'share',
2223 b'share',
2219 b'safe-mismatch.source-not-safe:verbose-upgrade',
2224 b'safe-mismatch.source-not-safe:verbose-upgrade',
2220 default=True,
2225 default=True,
2221 )
2226 )
2222 coreconfigitem(
2227 coreconfigitem(
2223 b'share',
2228 b'share',
2224 b'safe-mismatch.source-safe:verbose-upgrade',
2229 b'safe-mismatch.source-safe:verbose-upgrade',
2225 default=True,
2230 default=True,
2226 )
2231 )
2227 coreconfigitem(
2232 coreconfigitem(
2228 b'shelve',
2233 b'shelve',
2229 b'maxbackups',
2234 b'maxbackups',
2230 default=10,
2235 default=10,
2231 )
2236 )
2232 coreconfigitem(
2237 coreconfigitem(
2233 b'smtp',
2238 b'smtp',
2234 b'host',
2239 b'host',
2235 default=None,
2240 default=None,
2236 )
2241 )
2237 coreconfigitem(
2242 coreconfigitem(
2238 b'smtp',
2243 b'smtp',
2239 b'local_hostname',
2244 b'local_hostname',
2240 default=None,
2245 default=None,
2241 )
2246 )
2242 coreconfigitem(
2247 coreconfigitem(
2243 b'smtp',
2248 b'smtp',
2244 b'password',
2249 b'password',
2245 default=None,
2250 default=None,
2246 )
2251 )
2247 coreconfigitem(
2252 coreconfigitem(
2248 b'smtp',
2253 b'smtp',
2249 b'port',
2254 b'port',
2250 default=dynamicdefault,
2255 default=dynamicdefault,
2251 )
2256 )
2252 coreconfigitem(
2257 coreconfigitem(
2253 b'smtp',
2258 b'smtp',
2254 b'tls',
2259 b'tls',
2255 default=b'none',
2260 default=b'none',
2256 )
2261 )
2257 coreconfigitem(
2262 coreconfigitem(
2258 b'smtp',
2263 b'smtp',
2259 b'username',
2264 b'username',
2260 default=None,
2265 default=None,
2261 )
2266 )
2262 coreconfigitem(
2267 coreconfigitem(
2263 b'sparse',
2268 b'sparse',
2264 b'missingwarning',
2269 b'missingwarning',
2265 default=True,
2270 default=True,
2266 experimental=True,
2271 experimental=True,
2267 )
2272 )
2268 coreconfigitem(
2273 coreconfigitem(
2269 b'subrepos',
2274 b'subrepos',
2270 b'allowed',
2275 b'allowed',
2271 default=dynamicdefault, # to make backporting simpler
2276 default=dynamicdefault, # to make backporting simpler
2272 )
2277 )
2273 coreconfigitem(
2278 coreconfigitem(
2274 b'subrepos',
2279 b'subrepos',
2275 b'hg:allowed',
2280 b'hg:allowed',
2276 default=dynamicdefault,
2281 default=dynamicdefault,
2277 )
2282 )
2278 coreconfigitem(
2283 coreconfigitem(
2279 b'subrepos',
2284 b'subrepos',
2280 b'git:allowed',
2285 b'git:allowed',
2281 default=dynamicdefault,
2286 default=dynamicdefault,
2282 )
2287 )
2283 coreconfigitem(
2288 coreconfigitem(
2284 b'subrepos',
2289 b'subrepos',
2285 b'svn:allowed',
2290 b'svn:allowed',
2286 default=dynamicdefault,
2291 default=dynamicdefault,
2287 )
2292 )
2288 coreconfigitem(
2293 coreconfigitem(
2289 b'templates',
2294 b'templates',
2290 b'.*',
2295 b'.*',
2291 default=None,
2296 default=None,
2292 generic=True,
2297 generic=True,
2293 )
2298 )
2294 coreconfigitem(
2299 coreconfigitem(
2295 b'templateconfig',
2300 b'templateconfig',
2296 b'.*',
2301 b'.*',
2297 default=dynamicdefault,
2302 default=dynamicdefault,
2298 generic=True,
2303 generic=True,
2299 )
2304 )
2300 coreconfigitem(
2305 coreconfigitem(
2301 b'trusted',
2306 b'trusted',
2302 b'groups',
2307 b'groups',
2303 default=list,
2308 default=list,
2304 )
2309 )
2305 coreconfigitem(
2310 coreconfigitem(
2306 b'trusted',
2311 b'trusted',
2307 b'users',
2312 b'users',
2308 default=list,
2313 default=list,
2309 )
2314 )
2310 coreconfigitem(
2315 coreconfigitem(
2311 b'ui',
2316 b'ui',
2312 b'_usedassubrepo',
2317 b'_usedassubrepo',
2313 default=False,
2318 default=False,
2314 )
2319 )
2315 coreconfigitem(
2320 coreconfigitem(
2316 b'ui',
2321 b'ui',
2317 b'allowemptycommit',
2322 b'allowemptycommit',
2318 default=False,
2323 default=False,
2319 )
2324 )
2320 coreconfigitem(
2325 coreconfigitem(
2321 b'ui',
2326 b'ui',
2322 b'archivemeta',
2327 b'archivemeta',
2323 default=True,
2328 default=True,
2324 )
2329 )
2325 coreconfigitem(
2330 coreconfigitem(
2326 b'ui',
2331 b'ui',
2327 b'askusername',
2332 b'askusername',
2328 default=False,
2333 default=False,
2329 )
2334 )
2330 coreconfigitem(
2335 coreconfigitem(
2331 b'ui',
2336 b'ui',
2332 b'available-memory',
2337 b'available-memory',
2333 default=None,
2338 default=None,
2334 )
2339 )
2335
2340
2336 coreconfigitem(
2341 coreconfigitem(
2337 b'ui',
2342 b'ui',
2338 b'clonebundlefallback',
2343 b'clonebundlefallback',
2339 default=False,
2344 default=False,
2340 )
2345 )
2341 coreconfigitem(
2346 coreconfigitem(
2342 b'ui',
2347 b'ui',
2343 b'clonebundleprefers',
2348 b'clonebundleprefers',
2344 default=list,
2349 default=list,
2345 )
2350 )
2346 coreconfigitem(
2351 coreconfigitem(
2347 b'ui',
2352 b'ui',
2348 b'clonebundles',
2353 b'clonebundles',
2349 default=True,
2354 default=True,
2350 )
2355 )
2351 coreconfigitem(
2356 coreconfigitem(
2352 b'ui',
2357 b'ui',
2353 b'color',
2358 b'color',
2354 default=b'auto',
2359 default=b'auto',
2355 )
2360 )
2356 coreconfigitem(
2361 coreconfigitem(
2357 b'ui',
2362 b'ui',
2358 b'commitsubrepos',
2363 b'commitsubrepos',
2359 default=False,
2364 default=False,
2360 )
2365 )
2361 coreconfigitem(
2366 coreconfigitem(
2362 b'ui',
2367 b'ui',
2363 b'debug',
2368 b'debug',
2364 default=False,
2369 default=False,
2365 )
2370 )
2366 coreconfigitem(
2371 coreconfigitem(
2367 b'ui',
2372 b'ui',
2368 b'debugger',
2373 b'debugger',
2369 default=None,
2374 default=None,
2370 )
2375 )
2371 coreconfigitem(
2376 coreconfigitem(
2372 b'ui',
2377 b'ui',
2373 b'editor',
2378 b'editor',
2374 default=dynamicdefault,
2379 default=dynamicdefault,
2375 )
2380 )
2376 coreconfigitem(
2381 coreconfigitem(
2377 b'ui',
2382 b'ui',
2378 b'detailed-exit-code',
2383 b'detailed-exit-code',
2379 default=False,
2384 default=False,
2380 experimental=True,
2385 experimental=True,
2381 )
2386 )
2382 coreconfigitem(
2387 coreconfigitem(
2383 b'ui',
2388 b'ui',
2384 b'fallbackencoding',
2389 b'fallbackencoding',
2385 default=None,
2390 default=None,
2386 )
2391 )
2387 coreconfigitem(
2392 coreconfigitem(
2388 b'ui',
2393 b'ui',
2389 b'forcecwd',
2394 b'forcecwd',
2390 default=None,
2395 default=None,
2391 )
2396 )
2392 coreconfigitem(
2397 coreconfigitem(
2393 b'ui',
2398 b'ui',
2394 b'forcemerge',
2399 b'forcemerge',
2395 default=None,
2400 default=None,
2396 )
2401 )
2397 coreconfigitem(
2402 coreconfigitem(
2398 b'ui',
2403 b'ui',
2399 b'formatdebug',
2404 b'formatdebug',
2400 default=False,
2405 default=False,
2401 )
2406 )
2402 coreconfigitem(
2407 coreconfigitem(
2403 b'ui',
2408 b'ui',
2404 b'formatjson',
2409 b'formatjson',
2405 default=False,
2410 default=False,
2406 )
2411 )
2407 coreconfigitem(
2412 coreconfigitem(
2408 b'ui',
2413 b'ui',
2409 b'formatted',
2414 b'formatted',
2410 default=None,
2415 default=None,
2411 )
2416 )
2412 coreconfigitem(
2417 coreconfigitem(
2413 b'ui',
2418 b'ui',
2414 b'interactive',
2419 b'interactive',
2415 default=None,
2420 default=None,
2416 )
2421 )
2417 coreconfigitem(
2422 coreconfigitem(
2418 b'ui',
2423 b'ui',
2419 b'interface',
2424 b'interface',
2420 default=None,
2425 default=None,
2421 )
2426 )
2422 coreconfigitem(
2427 coreconfigitem(
2423 b'ui',
2428 b'ui',
2424 b'interface.chunkselector',
2429 b'interface.chunkselector',
2425 default=None,
2430 default=None,
2426 )
2431 )
2427 coreconfigitem(
2432 coreconfigitem(
2428 b'ui',
2433 b'ui',
2429 b'large-file-limit',
2434 b'large-file-limit',
2430 default=10 * (2 ** 20),
2435 default=10 * (2 ** 20),
2431 )
2436 )
2432 coreconfigitem(
2437 coreconfigitem(
2433 b'ui',
2438 b'ui',
2434 b'logblockedtimes',
2439 b'logblockedtimes',
2435 default=False,
2440 default=False,
2436 )
2441 )
2437 coreconfigitem(
2442 coreconfigitem(
2438 b'ui',
2443 b'ui',
2439 b'merge',
2444 b'merge',
2440 default=None,
2445 default=None,
2441 )
2446 )
2442 coreconfigitem(
2447 coreconfigitem(
2443 b'ui',
2448 b'ui',
2444 b'mergemarkers',
2449 b'mergemarkers',
2445 default=b'basic',
2450 default=b'basic',
2446 )
2451 )
2447 coreconfigitem(
2452 coreconfigitem(
2448 b'ui',
2453 b'ui',
2449 b'message-output',
2454 b'message-output',
2450 default=b'stdio',
2455 default=b'stdio',
2451 )
2456 )
2452 coreconfigitem(
2457 coreconfigitem(
2453 b'ui',
2458 b'ui',
2454 b'nontty',
2459 b'nontty',
2455 default=False,
2460 default=False,
2456 )
2461 )
2457 coreconfigitem(
2462 coreconfigitem(
2458 b'ui',
2463 b'ui',
2459 b'origbackuppath',
2464 b'origbackuppath',
2460 default=None,
2465 default=None,
2461 )
2466 )
2462 coreconfigitem(
2467 coreconfigitem(
2463 b'ui',
2468 b'ui',
2464 b'paginate',
2469 b'paginate',
2465 default=True,
2470 default=True,
2466 )
2471 )
2467 coreconfigitem(
2472 coreconfigitem(
2468 b'ui',
2473 b'ui',
2469 b'patch',
2474 b'patch',
2470 default=None,
2475 default=None,
2471 )
2476 )
2472 coreconfigitem(
2477 coreconfigitem(
2473 b'ui',
2478 b'ui',
2474 b'portablefilenames',
2479 b'portablefilenames',
2475 default=b'warn',
2480 default=b'warn',
2476 )
2481 )
2477 coreconfigitem(
2482 coreconfigitem(
2478 b'ui',
2483 b'ui',
2479 b'promptecho',
2484 b'promptecho',
2480 default=False,
2485 default=False,
2481 )
2486 )
2482 coreconfigitem(
2487 coreconfigitem(
2483 b'ui',
2488 b'ui',
2484 b'quiet',
2489 b'quiet',
2485 default=False,
2490 default=False,
2486 )
2491 )
2487 coreconfigitem(
2492 coreconfigitem(
2488 b'ui',
2493 b'ui',
2489 b'quietbookmarkmove',
2494 b'quietbookmarkmove',
2490 default=False,
2495 default=False,
2491 )
2496 )
2492 coreconfigitem(
2497 coreconfigitem(
2493 b'ui',
2498 b'ui',
2494 b'relative-paths',
2499 b'relative-paths',
2495 default=b'legacy',
2500 default=b'legacy',
2496 )
2501 )
2497 coreconfigitem(
2502 coreconfigitem(
2498 b'ui',
2503 b'ui',
2499 b'remotecmd',
2504 b'remotecmd',
2500 default=b'hg',
2505 default=b'hg',
2501 )
2506 )
2502 coreconfigitem(
2507 coreconfigitem(
2503 b'ui',
2508 b'ui',
2504 b'report_untrusted',
2509 b'report_untrusted',
2505 default=True,
2510 default=True,
2506 )
2511 )
2507 coreconfigitem(
2512 coreconfigitem(
2508 b'ui',
2513 b'ui',
2509 b'rollback',
2514 b'rollback',
2510 default=True,
2515 default=True,
2511 )
2516 )
2512 coreconfigitem(
2517 coreconfigitem(
2513 b'ui',
2518 b'ui',
2514 b'signal-safe-lock',
2519 b'signal-safe-lock',
2515 default=True,
2520 default=True,
2516 )
2521 )
2517 coreconfigitem(
2522 coreconfigitem(
2518 b'ui',
2523 b'ui',
2519 b'slash',
2524 b'slash',
2520 default=False,
2525 default=False,
2521 )
2526 )
2522 coreconfigitem(
2527 coreconfigitem(
2523 b'ui',
2528 b'ui',
2524 b'ssh',
2529 b'ssh',
2525 default=b'ssh',
2530 default=b'ssh',
2526 )
2531 )
2527 coreconfigitem(
2532 coreconfigitem(
2528 b'ui',
2533 b'ui',
2529 b'ssherrorhint',
2534 b'ssherrorhint',
2530 default=None,
2535 default=None,
2531 )
2536 )
2532 coreconfigitem(
2537 coreconfigitem(
2533 b'ui',
2538 b'ui',
2534 b'statuscopies',
2539 b'statuscopies',
2535 default=False,
2540 default=False,
2536 )
2541 )
2537 coreconfigitem(
2542 coreconfigitem(
2538 b'ui',
2543 b'ui',
2539 b'strict',
2544 b'strict',
2540 default=False,
2545 default=False,
2541 )
2546 )
2542 coreconfigitem(
2547 coreconfigitem(
2543 b'ui',
2548 b'ui',
2544 b'style',
2549 b'style',
2545 default=b'',
2550 default=b'',
2546 )
2551 )
2547 coreconfigitem(
2552 coreconfigitem(
2548 b'ui',
2553 b'ui',
2549 b'supportcontact',
2554 b'supportcontact',
2550 default=None,
2555 default=None,
2551 )
2556 )
2552 coreconfigitem(
2557 coreconfigitem(
2553 b'ui',
2558 b'ui',
2554 b'textwidth',
2559 b'textwidth',
2555 default=78,
2560 default=78,
2556 )
2561 )
2557 coreconfigitem(
2562 coreconfigitem(
2558 b'ui',
2563 b'ui',
2559 b'timeout',
2564 b'timeout',
2560 default=b'600',
2565 default=b'600',
2561 )
2566 )
2562 coreconfigitem(
2567 coreconfigitem(
2563 b'ui',
2568 b'ui',
2564 b'timeout.warn',
2569 b'timeout.warn',
2565 default=0,
2570 default=0,
2566 )
2571 )
2567 coreconfigitem(
2572 coreconfigitem(
2568 b'ui',
2573 b'ui',
2569 b'timestamp-output',
2574 b'timestamp-output',
2570 default=False,
2575 default=False,
2571 )
2576 )
2572 coreconfigitem(
2577 coreconfigitem(
2573 b'ui',
2578 b'ui',
2574 b'traceback',
2579 b'traceback',
2575 default=False,
2580 default=False,
2576 )
2581 )
2577 coreconfigitem(
2582 coreconfigitem(
2578 b'ui',
2583 b'ui',
2579 b'tweakdefaults',
2584 b'tweakdefaults',
2580 default=False,
2585 default=False,
2581 )
2586 )
2582 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2587 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2583 coreconfigitem(
2588 coreconfigitem(
2584 b'ui',
2589 b'ui',
2585 b'verbose',
2590 b'verbose',
2586 default=False,
2591 default=False,
2587 )
2592 )
2588 coreconfigitem(
2593 coreconfigitem(
2589 b'verify',
2594 b'verify',
2590 b'skipflags',
2595 b'skipflags',
2591 default=0,
2596 default=0,
2592 )
2597 )
2593 coreconfigitem(
2598 coreconfigitem(
2594 b'web',
2599 b'web',
2595 b'allowbz2',
2600 b'allowbz2',
2596 default=False,
2601 default=False,
2597 )
2602 )
2598 coreconfigitem(
2603 coreconfigitem(
2599 b'web',
2604 b'web',
2600 b'allowgz',
2605 b'allowgz',
2601 default=False,
2606 default=False,
2602 )
2607 )
2603 coreconfigitem(
2608 coreconfigitem(
2604 b'web',
2609 b'web',
2605 b'allow-pull',
2610 b'allow-pull',
2606 alias=[(b'web', b'allowpull')],
2611 alias=[(b'web', b'allowpull')],
2607 default=True,
2612 default=True,
2608 )
2613 )
2609 coreconfigitem(
2614 coreconfigitem(
2610 b'web',
2615 b'web',
2611 b'allow-push',
2616 b'allow-push',
2612 alias=[(b'web', b'allow_push')],
2617 alias=[(b'web', b'allow_push')],
2613 default=list,
2618 default=list,
2614 )
2619 )
2615 coreconfigitem(
2620 coreconfigitem(
2616 b'web',
2621 b'web',
2617 b'allowzip',
2622 b'allowzip',
2618 default=False,
2623 default=False,
2619 )
2624 )
2620 coreconfigitem(
2625 coreconfigitem(
2621 b'web',
2626 b'web',
2622 b'archivesubrepos',
2627 b'archivesubrepos',
2623 default=False,
2628 default=False,
2624 )
2629 )
2625 coreconfigitem(
2630 coreconfigitem(
2626 b'web',
2631 b'web',
2627 b'cache',
2632 b'cache',
2628 default=True,
2633 default=True,
2629 )
2634 )
2630 coreconfigitem(
2635 coreconfigitem(
2631 b'web',
2636 b'web',
2632 b'comparisoncontext',
2637 b'comparisoncontext',
2633 default=5,
2638 default=5,
2634 )
2639 )
2635 coreconfigitem(
2640 coreconfigitem(
2636 b'web',
2641 b'web',
2637 b'contact',
2642 b'contact',
2638 default=None,
2643 default=None,
2639 )
2644 )
2640 coreconfigitem(
2645 coreconfigitem(
2641 b'web',
2646 b'web',
2642 b'deny_push',
2647 b'deny_push',
2643 default=list,
2648 default=list,
2644 )
2649 )
2645 coreconfigitem(
2650 coreconfigitem(
2646 b'web',
2651 b'web',
2647 b'guessmime',
2652 b'guessmime',
2648 default=False,
2653 default=False,
2649 )
2654 )
2650 coreconfigitem(
2655 coreconfigitem(
2651 b'web',
2656 b'web',
2652 b'hidden',
2657 b'hidden',
2653 default=False,
2658 default=False,
2654 )
2659 )
2655 coreconfigitem(
2660 coreconfigitem(
2656 b'web',
2661 b'web',
2657 b'labels',
2662 b'labels',
2658 default=list,
2663 default=list,
2659 )
2664 )
2660 coreconfigitem(
2665 coreconfigitem(
2661 b'web',
2666 b'web',
2662 b'logoimg',
2667 b'logoimg',
2663 default=b'hglogo.png',
2668 default=b'hglogo.png',
2664 )
2669 )
2665 coreconfigitem(
2670 coreconfigitem(
2666 b'web',
2671 b'web',
2667 b'logourl',
2672 b'logourl',
2668 default=b'https://mercurial-scm.org/',
2673 default=b'https://mercurial-scm.org/',
2669 )
2674 )
2670 coreconfigitem(
2675 coreconfigitem(
2671 b'web',
2676 b'web',
2672 b'accesslog',
2677 b'accesslog',
2673 default=b'-',
2678 default=b'-',
2674 )
2679 )
2675 coreconfigitem(
2680 coreconfigitem(
2676 b'web',
2681 b'web',
2677 b'address',
2682 b'address',
2678 default=b'',
2683 default=b'',
2679 )
2684 )
2680 coreconfigitem(
2685 coreconfigitem(
2681 b'web',
2686 b'web',
2682 b'allow-archive',
2687 b'allow-archive',
2683 alias=[(b'web', b'allow_archive')],
2688 alias=[(b'web', b'allow_archive')],
2684 default=list,
2689 default=list,
2685 )
2690 )
2686 coreconfigitem(
2691 coreconfigitem(
2687 b'web',
2692 b'web',
2688 b'allow_read',
2693 b'allow_read',
2689 default=list,
2694 default=list,
2690 )
2695 )
2691 coreconfigitem(
2696 coreconfigitem(
2692 b'web',
2697 b'web',
2693 b'baseurl',
2698 b'baseurl',
2694 default=None,
2699 default=None,
2695 )
2700 )
2696 coreconfigitem(
2701 coreconfigitem(
2697 b'web',
2702 b'web',
2698 b'cacerts',
2703 b'cacerts',
2699 default=None,
2704 default=None,
2700 )
2705 )
2701 coreconfigitem(
2706 coreconfigitem(
2702 b'web',
2707 b'web',
2703 b'certificate',
2708 b'certificate',
2704 default=None,
2709 default=None,
2705 )
2710 )
2706 coreconfigitem(
2711 coreconfigitem(
2707 b'web',
2712 b'web',
2708 b'collapse',
2713 b'collapse',
2709 default=False,
2714 default=False,
2710 )
2715 )
2711 coreconfigitem(
2716 coreconfigitem(
2712 b'web',
2717 b'web',
2713 b'csp',
2718 b'csp',
2714 default=None,
2719 default=None,
2715 )
2720 )
2716 coreconfigitem(
2721 coreconfigitem(
2717 b'web',
2722 b'web',
2718 b'deny_read',
2723 b'deny_read',
2719 default=list,
2724 default=list,
2720 )
2725 )
2721 coreconfigitem(
2726 coreconfigitem(
2722 b'web',
2727 b'web',
2723 b'descend',
2728 b'descend',
2724 default=True,
2729 default=True,
2725 )
2730 )
2726 coreconfigitem(
2731 coreconfigitem(
2727 b'web',
2732 b'web',
2728 b'description',
2733 b'description',
2729 default=b"",
2734 default=b"",
2730 )
2735 )
2731 coreconfigitem(
2736 coreconfigitem(
2732 b'web',
2737 b'web',
2733 b'encoding',
2738 b'encoding',
2734 default=lambda: encoding.encoding,
2739 default=lambda: encoding.encoding,
2735 )
2740 )
2736 coreconfigitem(
2741 coreconfigitem(
2737 b'web',
2742 b'web',
2738 b'errorlog',
2743 b'errorlog',
2739 default=b'-',
2744 default=b'-',
2740 )
2745 )
2741 coreconfigitem(
2746 coreconfigitem(
2742 b'web',
2747 b'web',
2743 b'ipv6',
2748 b'ipv6',
2744 default=False,
2749 default=False,
2745 )
2750 )
2746 coreconfigitem(
2751 coreconfigitem(
2747 b'web',
2752 b'web',
2748 b'maxchanges',
2753 b'maxchanges',
2749 default=10,
2754 default=10,
2750 )
2755 )
2751 coreconfigitem(
2756 coreconfigitem(
2752 b'web',
2757 b'web',
2753 b'maxfiles',
2758 b'maxfiles',
2754 default=10,
2759 default=10,
2755 )
2760 )
2756 coreconfigitem(
2761 coreconfigitem(
2757 b'web',
2762 b'web',
2758 b'maxshortchanges',
2763 b'maxshortchanges',
2759 default=60,
2764 default=60,
2760 )
2765 )
2761 coreconfigitem(
2766 coreconfigitem(
2762 b'web',
2767 b'web',
2763 b'motd',
2768 b'motd',
2764 default=b'',
2769 default=b'',
2765 )
2770 )
2766 coreconfigitem(
2771 coreconfigitem(
2767 b'web',
2772 b'web',
2768 b'name',
2773 b'name',
2769 default=dynamicdefault,
2774 default=dynamicdefault,
2770 )
2775 )
2771 coreconfigitem(
2776 coreconfigitem(
2772 b'web',
2777 b'web',
2773 b'port',
2778 b'port',
2774 default=8000,
2779 default=8000,
2775 )
2780 )
2776 coreconfigitem(
2781 coreconfigitem(
2777 b'web',
2782 b'web',
2778 b'prefix',
2783 b'prefix',
2779 default=b'',
2784 default=b'',
2780 )
2785 )
2781 coreconfigitem(
2786 coreconfigitem(
2782 b'web',
2787 b'web',
2783 b'push_ssl',
2788 b'push_ssl',
2784 default=True,
2789 default=True,
2785 )
2790 )
2786 coreconfigitem(
2791 coreconfigitem(
2787 b'web',
2792 b'web',
2788 b'refreshinterval',
2793 b'refreshinterval',
2789 default=20,
2794 default=20,
2790 )
2795 )
2791 coreconfigitem(
2796 coreconfigitem(
2792 b'web',
2797 b'web',
2793 b'server-header',
2798 b'server-header',
2794 default=None,
2799 default=None,
2795 )
2800 )
2796 coreconfigitem(
2801 coreconfigitem(
2797 b'web',
2802 b'web',
2798 b'static',
2803 b'static',
2799 default=None,
2804 default=None,
2800 )
2805 )
2801 coreconfigitem(
2806 coreconfigitem(
2802 b'web',
2807 b'web',
2803 b'staticurl',
2808 b'staticurl',
2804 default=None,
2809 default=None,
2805 )
2810 )
2806 coreconfigitem(
2811 coreconfigitem(
2807 b'web',
2812 b'web',
2808 b'stripes',
2813 b'stripes',
2809 default=1,
2814 default=1,
2810 )
2815 )
2811 coreconfigitem(
2816 coreconfigitem(
2812 b'web',
2817 b'web',
2813 b'style',
2818 b'style',
2814 default=b'paper',
2819 default=b'paper',
2815 )
2820 )
2816 coreconfigitem(
2821 coreconfigitem(
2817 b'web',
2822 b'web',
2818 b'templates',
2823 b'templates',
2819 default=None,
2824 default=None,
2820 )
2825 )
2821 coreconfigitem(
2826 coreconfigitem(
2822 b'web',
2827 b'web',
2823 b'view',
2828 b'view',
2824 default=b'served',
2829 default=b'served',
2825 experimental=True,
2830 experimental=True,
2826 )
2831 )
2827 coreconfigitem(
2832 coreconfigitem(
2828 b'worker',
2833 b'worker',
2829 b'backgroundclose',
2834 b'backgroundclose',
2830 default=dynamicdefault,
2835 default=dynamicdefault,
2831 )
2836 )
2832 # Windows defaults to a limit of 512 open files. A buffer of 128
2837 # Windows defaults to a limit of 512 open files. A buffer of 128
2833 # should give us enough headway.
2838 # should give us enough headway.
2834 coreconfigitem(
2839 coreconfigitem(
2835 b'worker',
2840 b'worker',
2836 b'backgroundclosemaxqueue',
2841 b'backgroundclosemaxqueue',
2837 default=384,
2842 default=384,
2838 )
2843 )
2839 coreconfigitem(
2844 coreconfigitem(
2840 b'worker',
2845 b'worker',
2841 b'backgroundcloseminfilecount',
2846 b'backgroundcloseminfilecount',
2842 default=2048,
2847 default=2048,
2843 )
2848 )
2844 coreconfigitem(
2849 coreconfigitem(
2845 b'worker',
2850 b'worker',
2846 b'backgroundclosethreadcount',
2851 b'backgroundclosethreadcount',
2847 default=4,
2852 default=4,
2848 )
2853 )
2849 coreconfigitem(
2854 coreconfigitem(
2850 b'worker',
2855 b'worker',
2851 b'enabled',
2856 b'enabled',
2852 default=True,
2857 default=True,
2853 )
2858 )
2854 coreconfigitem(
2859 coreconfigitem(
2855 b'worker',
2860 b'worker',
2856 b'numcpus',
2861 b'numcpus',
2857 default=None,
2862 default=None,
2858 )
2863 )
2859
2864
2860 # Rebase related configuration moved to core because other extension are doing
2865 # Rebase related configuration moved to core because other extension are doing
2861 # strange things. For example, shelve import the extensions to reuse some bit
2866 # strange things. For example, shelve import the extensions to reuse some bit
2862 # without formally loading it.
2867 # without formally loading it.
2863 coreconfigitem(
2868 coreconfigitem(
2864 b'commands',
2869 b'commands',
2865 b'rebase.requiredest',
2870 b'rebase.requiredest',
2866 default=False,
2871 default=False,
2867 )
2872 )
2868 coreconfigitem(
2873 coreconfigitem(
2869 b'experimental',
2874 b'experimental',
2870 b'rebaseskipobsolete',
2875 b'rebaseskipobsolete',
2871 default=True,
2876 default=True,
2872 )
2877 )
2873 coreconfigitem(
2878 coreconfigitem(
2874 b'rebase',
2879 b'rebase',
2875 b'singletransaction',
2880 b'singletransaction',
2876 default=False,
2881 default=False,
2877 )
2882 )
2878 coreconfigitem(
2883 coreconfigitem(
2879 b'rebase',
2884 b'rebase',
2880 b'experimental.inmemory',
2885 b'experimental.inmemory',
2881 default=False,
2886 default=False,
2882 )
2887 )
2883
2888
2884 # This setting controls creation of a rebase_source extra field
2889 # This setting controls creation of a rebase_source extra field
2885 # during rebase. When False, no such field is created. This is
2890 # during rebase. When False, no such field is created. This is
2886 # useful eg for incrementally converting changesets and then
2891 # useful eg for incrementally converting changesets and then
2887 # rebasing them onto an existing repo.
2892 # rebasing them onto an existing repo.
2888 # WARNING: this is an advanced setting reserved for people who know
2893 # WARNING: this is an advanced setting reserved for people who know
2889 # exactly what they are doing. Misuse of this setting can easily
2894 # exactly what they are doing. Misuse of this setting can easily
2890 # result in obsmarker cycles and a vivid headache.
2895 # result in obsmarker cycles and a vivid headache.
2891 coreconfigitem(
2896 coreconfigitem(
2892 b'rebase',
2897 b'rebase',
2893 b'store-source',
2898 b'store-source',
2894 default=True,
2899 default=True,
2895 experimental=True,
2900 experimental=True,
2896 )
2901 )
@@ -1,3332 +1,3347 b''
1 The Mercurial system uses a set of configuration files to control
1 The Mercurial system uses a set of configuration files to control
2 aspects of its behavior.
2 aspects of its behavior.
3
3
4 Troubleshooting
4 Troubleshooting
5 ===============
5 ===============
6
6
7 If you're having problems with your configuration,
7 If you're having problems with your configuration,
8 :hg:`config --source` can help you understand what is introducing
8 :hg:`config --source` can help you understand what is introducing
9 a setting into your environment.
9 a setting into your environment.
10
10
11 See :hg:`help config.syntax` and :hg:`help config.files`
11 See :hg:`help config.syntax` and :hg:`help config.files`
12 for information about how and where to override things.
12 for information about how and where to override things.
13
13
14 Structure
14 Structure
15 =========
15 =========
16
16
17 The configuration files use a simple ini-file format. A configuration
17 The configuration files use a simple ini-file format. A configuration
18 file consists of sections, led by a ``[section]`` header and followed
18 file consists of sections, led by a ``[section]`` header and followed
19 by ``name = value`` entries::
19 by ``name = value`` entries::
20
20
21 [ui]
21 [ui]
22 username = Firstname Lastname <firstname.lastname@example.net>
22 username = Firstname Lastname <firstname.lastname@example.net>
23 verbose = True
23 verbose = True
24
24
25 The above entries will be referred to as ``ui.username`` and
25 The above entries will be referred to as ``ui.username`` and
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
26 ``ui.verbose``, respectively. See :hg:`help config.syntax`.
27
27
28 Files
28 Files
29 =====
29 =====
30
30
31 Mercurial reads configuration data from several files, if they exist.
31 Mercurial reads configuration data from several files, if they exist.
32 These files do not exist by default and you will have to create the
32 These files do not exist by default and you will have to create the
33 appropriate configuration files yourself:
33 appropriate configuration files yourself:
34
34
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
35 Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
36
36
37 Global configuration like the username setting is typically put into:
37 Global configuration like the username setting is typically put into:
38
38
39 .. container:: windows
39 .. container:: windows
40
40
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
41 - ``%USERPROFILE%\mercurial.ini`` (on Windows)
42
42
43 .. container:: unix.plan9
43 .. container:: unix.plan9
44
44
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
45 - ``$HOME/.hgrc`` (on Unix, Plan9)
46
46
47 The names of these files depend on the system on which Mercurial is
47 The names of these files depend on the system on which Mercurial is
48 installed. ``*.rc`` files from a single directory are read in
48 installed. ``*.rc`` files from a single directory are read in
49 alphabetical order, later ones overriding earlier ones. Where multiple
49 alphabetical order, later ones overriding earlier ones. Where multiple
50 paths are given below, settings from earlier paths override later
50 paths are given below, settings from earlier paths override later
51 ones.
51 ones.
52
52
53 .. container:: verbose.unix
53 .. container:: verbose.unix
54
54
55 On Unix, the following files are consulted:
55 On Unix, the following files are consulted:
56
56
57 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
57 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
58 - ``<repo>/.hg/hgrc`` (per-repository)
58 - ``<repo>/.hg/hgrc`` (per-repository)
59 - ``$HOME/.hgrc`` (per-user)
59 - ``$HOME/.hgrc`` (per-user)
60 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
60 - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
61 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
61 - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
62 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
62 - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
63 - ``/etc/mercurial/hgrc`` (per-system)
63 - ``/etc/mercurial/hgrc`` (per-system)
64 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
64 - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
65 - ``<internal>/*.rc`` (defaults)
65 - ``<internal>/*.rc`` (defaults)
66
66
67 .. container:: verbose.windows
67 .. container:: verbose.windows
68
68
69 On Windows, the following files are consulted:
69 On Windows, the following files are consulted:
70
70
71 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
71 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
72 - ``<repo>/.hg/hgrc`` (per-repository)
72 - ``<repo>/.hg/hgrc`` (per-repository)
73 - ``%USERPROFILE%\.hgrc`` (per-user)
73 - ``%USERPROFILE%\.hgrc`` (per-user)
74 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
74 - ``%USERPROFILE%\Mercurial.ini`` (per-user)
75 - ``%HOME%\.hgrc`` (per-user)
75 - ``%HOME%\.hgrc`` (per-user)
76 - ``%HOME%\Mercurial.ini`` (per-user)
76 - ``%HOME%\Mercurial.ini`` (per-user)
77 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
77 - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
78 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
78 - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
79 - ``<install-dir>\Mercurial.ini`` (per-installation)
79 - ``<install-dir>\Mercurial.ini`` (per-installation)
80 - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
80 - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
81 - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
81 - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
82 - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
82 - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
83 - ``<internal>/*.rc`` (defaults)
83 - ``<internal>/*.rc`` (defaults)
84
84
85 .. note::
85 .. note::
86
86
87 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
87 The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
88 is used when running 32-bit Python on 64-bit Windows.
88 is used when running 32-bit Python on 64-bit Windows.
89
89
90 .. container:: verbose.plan9
90 .. container:: verbose.plan9
91
91
92 On Plan9, the following files are consulted:
92 On Plan9, the following files are consulted:
93
93
94 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
94 - ``<repo>/.hg/hgrc-not-shared`` (per-repository)
95 - ``<repo>/.hg/hgrc`` (per-repository)
95 - ``<repo>/.hg/hgrc`` (per-repository)
96 - ``$home/lib/hgrc`` (per-user)
96 - ``$home/lib/hgrc`` (per-user)
97 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
97 - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
98 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
98 - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
99 - ``/lib/mercurial/hgrc`` (per-system)
99 - ``/lib/mercurial/hgrc`` (per-system)
100 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
100 - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
101 - ``<internal>/*.rc`` (defaults)
101 - ``<internal>/*.rc`` (defaults)
102
102
103 Per-repository configuration options only apply in a
103 Per-repository configuration options only apply in a
104 particular repository. This file is not version-controlled, and
104 particular repository. This file is not version-controlled, and
105 will not get transferred during a "clone" operation. Options in
105 will not get transferred during a "clone" operation. Options in
106 this file override options in all other configuration files.
106 this file override options in all other configuration files.
107
107
108 .. container:: unix.plan9
108 .. container:: unix.plan9
109
109
110 On Plan 9 and Unix, most of this file will be ignored if it doesn't
110 On Plan 9 and Unix, most of this file will be ignored if it doesn't
111 belong to a trusted user or to a trusted group. See
111 belong to a trusted user or to a trusted group. See
112 :hg:`help config.trusted` for more details.
112 :hg:`help config.trusted` for more details.
113
113
114 Per-user configuration file(s) are for the user running Mercurial. Options
114 Per-user configuration file(s) are for the user running Mercurial. Options
115 in these files apply to all Mercurial commands executed by this user in any
115 in these files apply to all Mercurial commands executed by this user in any
116 directory. Options in these files override per-system and per-installation
116 directory. Options in these files override per-system and per-installation
117 options.
117 options.
118
118
119 Per-installation configuration files are searched for in the
119 Per-installation configuration files are searched for in the
120 directory where Mercurial is installed. ``<install-root>`` is the
120 directory where Mercurial is installed. ``<install-root>`` is the
121 parent directory of the **hg** executable (or symlink) being run.
121 parent directory of the **hg** executable (or symlink) being run.
122
122
123 .. container:: unix.plan9
123 .. container:: unix.plan9
124
124
125 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
125 For example, if installed in ``/shared/tools/bin/hg``, Mercurial
126 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
126 will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
127 files apply to all Mercurial commands executed by any user in any
127 files apply to all Mercurial commands executed by any user in any
128 directory.
128 directory.
129
129
130 Per-installation configuration files are for the system on
130 Per-installation configuration files are for the system on
131 which Mercurial is running. Options in these files apply to all
131 which Mercurial is running. Options in these files apply to all
132 Mercurial commands executed by any user in any directory. Registry
132 Mercurial commands executed by any user in any directory. Registry
133 keys contain PATH-like strings, every part of which must reference
133 keys contain PATH-like strings, every part of which must reference
134 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
134 a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
135 be read. Mercurial checks each of these locations in the specified
135 be read. Mercurial checks each of these locations in the specified
136 order until one or more configuration files are detected.
136 order until one or more configuration files are detected.
137
137
138 Per-system configuration files are for the system on which Mercurial
138 Per-system configuration files are for the system on which Mercurial
139 is running. Options in these files apply to all Mercurial commands
139 is running. Options in these files apply to all Mercurial commands
140 executed by any user in any directory. Options in these files
140 executed by any user in any directory. Options in these files
141 override per-installation options.
141 override per-installation options.
142
142
143 Mercurial comes with some default configuration. The default configuration
143 Mercurial comes with some default configuration. The default configuration
144 files are installed with Mercurial and will be overwritten on upgrades. Default
144 files are installed with Mercurial and will be overwritten on upgrades. Default
145 configuration files should never be edited by users or administrators but can
145 configuration files should never be edited by users or administrators but can
146 be overridden in other configuration files. So far the directory only contains
146 be overridden in other configuration files. So far the directory only contains
147 merge tool configuration but packagers can also put other default configuration
147 merge tool configuration but packagers can also put other default configuration
148 there.
148 there.
149
149
150 On versions 5.7 and later, if share-safe functionality is enabled,
150 On versions 5.7 and later, if share-safe functionality is enabled,
151 shares will read config file of share source too.
151 shares will read config file of share source too.
152 `<share-source/.hg/hgrc>` is read before reading `<repo/.hg/hgrc>`.
152 `<share-source/.hg/hgrc>` is read before reading `<repo/.hg/hgrc>`.
153
153
154 For configs which should not be shared, `<repo/.hg/hgrc-not-shared>`
154 For configs which should not be shared, `<repo/.hg/hgrc-not-shared>`
155 should be used.
155 should be used.
156
156
157 Syntax
157 Syntax
158 ======
158 ======
159
159
160 A configuration file consists of sections, led by a ``[section]`` header
160 A configuration file consists of sections, led by a ``[section]`` header
161 and followed by ``name = value`` entries (sometimes called
161 and followed by ``name = value`` entries (sometimes called
162 ``configuration keys``)::
162 ``configuration keys``)::
163
163
164 [spam]
164 [spam]
165 eggs=ham
165 eggs=ham
166 green=
166 green=
167 eggs
167 eggs
168
168
169 Each line contains one entry. If the lines that follow are indented,
169 Each line contains one entry. If the lines that follow are indented,
170 they are treated as continuations of that entry. Leading whitespace is
170 they are treated as continuations of that entry. Leading whitespace is
171 removed from values. Empty lines are skipped. Lines beginning with
171 removed from values. Empty lines are skipped. Lines beginning with
172 ``#`` or ``;`` are ignored and may be used to provide comments.
172 ``#`` or ``;`` are ignored and may be used to provide comments.
173
173
174 Configuration keys can be set multiple times, in which case Mercurial
174 Configuration keys can be set multiple times, in which case Mercurial
175 will use the value that was configured last. As an example::
175 will use the value that was configured last. As an example::
176
176
177 [spam]
177 [spam]
178 eggs=large
178 eggs=large
179 ham=serrano
179 ham=serrano
180 eggs=small
180 eggs=small
181
181
182 This would set the configuration key named ``eggs`` to ``small``.
182 This would set the configuration key named ``eggs`` to ``small``.
183
183
184 It is also possible to define a section multiple times. A section can
184 It is also possible to define a section multiple times. A section can
185 be redefined on the same and/or on different configuration files. For
185 be redefined on the same and/or on different configuration files. For
186 example::
186 example::
187
187
188 [foo]
188 [foo]
189 eggs=large
189 eggs=large
190 ham=serrano
190 ham=serrano
191 eggs=small
191 eggs=small
192
192
193 [bar]
193 [bar]
194 eggs=ham
194 eggs=ham
195 green=
195 green=
196 eggs
196 eggs
197
197
198 [foo]
198 [foo]
199 ham=prosciutto
199 ham=prosciutto
200 eggs=medium
200 eggs=medium
201 bread=toasted
201 bread=toasted
202
202
203 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
203 This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
204 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
204 of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
205 respectively. As you can see there only thing that matters is the last
205 respectively. As you can see there only thing that matters is the last
206 value that was set for each of the configuration keys.
206 value that was set for each of the configuration keys.
207
207
208 If a configuration key is set multiple times in different
208 If a configuration key is set multiple times in different
209 configuration files the final value will depend on the order in which
209 configuration files the final value will depend on the order in which
210 the different configuration files are read, with settings from earlier
210 the different configuration files are read, with settings from earlier
211 paths overriding later ones as described on the ``Files`` section
211 paths overriding later ones as described on the ``Files`` section
212 above.
212 above.
213
213
214 A line of the form ``%include file`` will include ``file`` into the
214 A line of the form ``%include file`` will include ``file`` into the
215 current configuration file. The inclusion is recursive, which means
215 current configuration file. The inclusion is recursive, which means
216 that included files can include other files. Filenames are relative to
216 that included files can include other files. Filenames are relative to
217 the configuration file in which the ``%include`` directive is found.
217 the configuration file in which the ``%include`` directive is found.
218 Environment variables and ``~user`` constructs are expanded in
218 Environment variables and ``~user`` constructs are expanded in
219 ``file``. This lets you do something like::
219 ``file``. This lets you do something like::
220
220
221 %include ~/.hgrc.d/$HOST.rc
221 %include ~/.hgrc.d/$HOST.rc
222
222
223 to include a different configuration file on each computer you use.
223 to include a different configuration file on each computer you use.
224
224
225 A line with ``%unset name`` will remove ``name`` from the current
225 A line with ``%unset name`` will remove ``name`` from the current
226 section, if it has been set previously.
226 section, if it has been set previously.
227
227
228 The values are either free-form text strings, lists of text strings,
228 The values are either free-form text strings, lists of text strings,
229 or Boolean values. Boolean values can be set to true using any of "1",
229 or Boolean values. Boolean values can be set to true using any of "1",
230 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
230 "yes", "true", or "on" and to false using "0", "no", "false", or "off"
231 (all case insensitive).
231 (all case insensitive).
232
232
233 List values are separated by whitespace or comma, except when values are
233 List values are separated by whitespace or comma, except when values are
234 placed in double quotation marks::
234 placed in double quotation marks::
235
235
236 allow_read = "John Doe, PhD", brian, betty
236 allow_read = "John Doe, PhD", brian, betty
237
237
238 Quotation marks can be escaped by prefixing them with a backslash. Only
238 Quotation marks can be escaped by prefixing them with a backslash. Only
239 quotation marks at the beginning of a word is counted as a quotation
239 quotation marks at the beginning of a word is counted as a quotation
240 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
240 (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
241
241
242 Sections
242 Sections
243 ========
243 ========
244
244
245 This section describes the different sections that may appear in a
245 This section describes the different sections that may appear in a
246 Mercurial configuration file, the purpose of each section, its possible
246 Mercurial configuration file, the purpose of each section, its possible
247 keys, and their possible values.
247 keys, and their possible values.
248
248
249 ``alias``
249 ``alias``
250 ---------
250 ---------
251
251
252 Defines command aliases.
252 Defines command aliases.
253
253
254 Aliases allow you to define your own commands in terms of other
254 Aliases allow you to define your own commands in terms of other
255 commands (or aliases), optionally including arguments. Positional
255 commands (or aliases), optionally including arguments. Positional
256 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
256 arguments in the form of ``$1``, ``$2``, etc. in the alias definition
257 are expanded by Mercurial before execution. Positional arguments not
257 are expanded by Mercurial before execution. Positional arguments not
258 already used by ``$N`` in the definition are put at the end of the
258 already used by ``$N`` in the definition are put at the end of the
259 command to be executed.
259 command to be executed.
260
260
261 Alias definitions consist of lines of the form::
261 Alias definitions consist of lines of the form::
262
262
263 <alias> = <command> [<argument>]...
263 <alias> = <command> [<argument>]...
264
264
265 For example, this definition::
265 For example, this definition::
266
266
267 latest = log --limit 5
267 latest = log --limit 5
268
268
269 creates a new command ``latest`` that shows only the five most recent
269 creates a new command ``latest`` that shows only the five most recent
270 changesets. You can define subsequent aliases using earlier ones::
270 changesets. You can define subsequent aliases using earlier ones::
271
271
272 stable5 = latest -b stable
272 stable5 = latest -b stable
273
273
274 .. note::
274 .. note::
275
275
276 It is possible to create aliases with the same names as
276 It is possible to create aliases with the same names as
277 existing commands, which will then override the original
277 existing commands, which will then override the original
278 definitions. This is almost always a bad idea!
278 definitions. This is almost always a bad idea!
279
279
280 An alias can start with an exclamation point (``!``) to make it a
280 An alias can start with an exclamation point (``!``) to make it a
281 shell alias. A shell alias is executed with the shell and will let you
281 shell alias. A shell alias is executed with the shell and will let you
282 run arbitrary commands. As an example, ::
282 run arbitrary commands. As an example, ::
283
283
284 echo = !echo $@
284 echo = !echo $@
285
285
286 will let you do ``hg echo foo`` to have ``foo`` printed in your
286 will let you do ``hg echo foo`` to have ``foo`` printed in your
287 terminal. A better example might be::
287 terminal. A better example might be::
288
288
289 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
289 purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
290
290
291 which will make ``hg purge`` delete all unknown files in the
291 which will make ``hg purge`` delete all unknown files in the
292 repository in the same manner as the purge extension.
292 repository in the same manner as the purge extension.
293
293
294 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
294 Positional arguments like ``$1``, ``$2``, etc. in the alias definition
295 expand to the command arguments. Unmatched arguments are
295 expand to the command arguments. Unmatched arguments are
296 removed. ``$0`` expands to the alias name and ``$@`` expands to all
296 removed. ``$0`` expands to the alias name and ``$@`` expands to all
297 arguments separated by a space. ``"$@"`` (with quotes) expands to all
297 arguments separated by a space. ``"$@"`` (with quotes) expands to all
298 arguments quoted individually and separated by a space. These expansions
298 arguments quoted individually and separated by a space. These expansions
299 happen before the command is passed to the shell.
299 happen before the command is passed to the shell.
300
300
301 Shell aliases are executed in an environment where ``$HG`` expands to
301 Shell aliases are executed in an environment where ``$HG`` expands to
302 the path of the Mercurial that was used to execute the alias. This is
302 the path of the Mercurial that was used to execute the alias. This is
303 useful when you want to call further Mercurial commands in a shell
303 useful when you want to call further Mercurial commands in a shell
304 alias, as was done above for the purge alias. In addition,
304 alias, as was done above for the purge alias. In addition,
305 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
305 ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
306 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
306 echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
307
307
308 .. note::
308 .. note::
309
309
310 Some global configuration options such as ``-R`` are
310 Some global configuration options such as ``-R`` are
311 processed before shell aliases and will thus not be passed to
311 processed before shell aliases and will thus not be passed to
312 aliases.
312 aliases.
313
313
314
314
315 ``annotate``
315 ``annotate``
316 ------------
316 ------------
317
317
318 Settings used when displaying file annotations. All values are
318 Settings used when displaying file annotations. All values are
319 Booleans and default to False. See :hg:`help config.diff` for
319 Booleans and default to False. See :hg:`help config.diff` for
320 related options for the diff command.
320 related options for the diff command.
321
321
322 ``ignorews``
322 ``ignorews``
323 Ignore white space when comparing lines.
323 Ignore white space when comparing lines.
324
324
325 ``ignorewseol``
325 ``ignorewseol``
326 Ignore white space at the end of a line when comparing lines.
326 Ignore white space at the end of a line when comparing lines.
327
327
328 ``ignorewsamount``
328 ``ignorewsamount``
329 Ignore changes in the amount of white space.
329 Ignore changes in the amount of white space.
330
330
331 ``ignoreblanklines``
331 ``ignoreblanklines``
332 Ignore changes whose lines are all blank.
332 Ignore changes whose lines are all blank.
333
333
334
334
335 ``auth``
335 ``auth``
336 --------
336 --------
337
337
338 Authentication credentials and other authentication-like configuration
338 Authentication credentials and other authentication-like configuration
339 for HTTP connections. This section allows you to store usernames and
339 for HTTP connections. This section allows you to store usernames and
340 passwords for use when logging *into* HTTP servers. See
340 passwords for use when logging *into* HTTP servers. See
341 :hg:`help config.web` if you want to configure *who* can login to
341 :hg:`help config.web` if you want to configure *who* can login to
342 your HTTP server.
342 your HTTP server.
343
343
344 The following options apply to all hosts.
344 The following options apply to all hosts.
345
345
346 ``cookiefile``
346 ``cookiefile``
347 Path to a file containing HTTP cookie lines. Cookies matching a
347 Path to a file containing HTTP cookie lines. Cookies matching a
348 host will be sent automatically.
348 host will be sent automatically.
349
349
350 The file format uses the Mozilla cookies.txt format, which defines cookies
350 The file format uses the Mozilla cookies.txt format, which defines cookies
351 on their own lines. Each line contains 7 fields delimited by the tab
351 on their own lines. Each line contains 7 fields delimited by the tab
352 character (domain, is_domain_cookie, path, is_secure, expires, name,
352 character (domain, is_domain_cookie, path, is_secure, expires, name,
353 value). For more info, do an Internet search for "Netscape cookies.txt
353 value). For more info, do an Internet search for "Netscape cookies.txt
354 format."
354 format."
355
355
356 Note: the cookies parser does not handle port numbers on domains. You
356 Note: the cookies parser does not handle port numbers on domains. You
357 will need to remove ports from the domain for the cookie to be recognized.
357 will need to remove ports from the domain for the cookie to be recognized.
358 This could result in a cookie being disclosed to an unwanted server.
358 This could result in a cookie being disclosed to an unwanted server.
359
359
360 The cookies file is read-only.
360 The cookies file is read-only.
361
361
362 Other options in this section are grouped by name and have the following
362 Other options in this section are grouped by name and have the following
363 format::
363 format::
364
364
365 <name>.<argument> = <value>
365 <name>.<argument> = <value>
366
366
367 where ``<name>`` is used to group arguments into authentication
367 where ``<name>`` is used to group arguments into authentication
368 entries. Example::
368 entries. Example::
369
369
370 foo.prefix = hg.intevation.de/mercurial
370 foo.prefix = hg.intevation.de/mercurial
371 foo.username = foo
371 foo.username = foo
372 foo.password = bar
372 foo.password = bar
373 foo.schemes = http https
373 foo.schemes = http https
374
374
375 bar.prefix = secure.example.org
375 bar.prefix = secure.example.org
376 bar.key = path/to/file.key
376 bar.key = path/to/file.key
377 bar.cert = path/to/file.cert
377 bar.cert = path/to/file.cert
378 bar.schemes = https
378 bar.schemes = https
379
379
380 Supported arguments:
380 Supported arguments:
381
381
382 ``prefix``
382 ``prefix``
383 Either ``*`` or a URI prefix with or without the scheme part.
383 Either ``*`` or a URI prefix with or without the scheme part.
384 The authentication entry with the longest matching prefix is used
384 The authentication entry with the longest matching prefix is used
385 (where ``*`` matches everything and counts as a match of length
385 (where ``*`` matches everything and counts as a match of length
386 1). If the prefix doesn't include a scheme, the match is performed
386 1). If the prefix doesn't include a scheme, the match is performed
387 against the URI with its scheme stripped as well, and the schemes
387 against the URI with its scheme stripped as well, and the schemes
388 argument, q.v., is then subsequently consulted.
388 argument, q.v., is then subsequently consulted.
389
389
390 ``username``
390 ``username``
391 Optional. Username to authenticate with. If not given, and the
391 Optional. Username to authenticate with. If not given, and the
392 remote site requires basic or digest authentication, the user will
392 remote site requires basic or digest authentication, the user will
393 be prompted for it. Environment variables are expanded in the
393 be prompted for it. Environment variables are expanded in the
394 username letting you do ``foo.username = $USER``. If the URI
394 username letting you do ``foo.username = $USER``. If the URI
395 includes a username, only ``[auth]`` entries with a matching
395 includes a username, only ``[auth]`` entries with a matching
396 username or without a username will be considered.
396 username or without a username will be considered.
397
397
398 ``password``
398 ``password``
399 Optional. Password to authenticate with. If not given, and the
399 Optional. Password to authenticate with. If not given, and the
400 remote site requires basic or digest authentication, the user
400 remote site requires basic or digest authentication, the user
401 will be prompted for it.
401 will be prompted for it.
402
402
403 ``key``
403 ``key``
404 Optional. PEM encoded client certificate key file. Environment
404 Optional. PEM encoded client certificate key file. Environment
405 variables are expanded in the filename.
405 variables are expanded in the filename.
406
406
407 ``cert``
407 ``cert``
408 Optional. PEM encoded client certificate chain file. Environment
408 Optional. PEM encoded client certificate chain file. Environment
409 variables are expanded in the filename.
409 variables are expanded in the filename.
410
410
411 ``schemes``
411 ``schemes``
412 Optional. Space separated list of URI schemes to use this
412 Optional. Space separated list of URI schemes to use this
413 authentication entry with. Only used if the prefix doesn't include
413 authentication entry with. Only used if the prefix doesn't include
414 a scheme. Supported schemes are http and https. They will match
414 a scheme. Supported schemes are http and https. They will match
415 static-http and static-https respectively, as well.
415 static-http and static-https respectively, as well.
416 (default: https)
416 (default: https)
417
417
418 If no suitable authentication entry is found, the user is prompted
418 If no suitable authentication entry is found, the user is prompted
419 for credentials as usual if required by the remote.
419 for credentials as usual if required by the remote.
420
420
421 ``cmdserver``
421 ``cmdserver``
422 -------------
422 -------------
423
423
424 Controls command server settings. (ADVANCED)
424 Controls command server settings. (ADVANCED)
425
425
426 ``message-encodings``
426 ``message-encodings``
427 List of encodings for the ``m`` (message) channel. The first encoding
427 List of encodings for the ``m`` (message) channel. The first encoding
428 supported by the server will be selected and advertised in the hello
428 supported by the server will be selected and advertised in the hello
429 message. This is useful only when ``ui.message-output`` is set to
429 message. This is useful only when ``ui.message-output`` is set to
430 ``channel``. Supported encodings are ``cbor``.
430 ``channel``. Supported encodings are ``cbor``.
431
431
432 ``shutdown-on-interrupt``
432 ``shutdown-on-interrupt``
433 If set to false, the server's main loop will continue running after
433 If set to false, the server's main loop will continue running after
434 SIGINT received. ``runcommand`` requests can still be interrupted by
434 SIGINT received. ``runcommand`` requests can still be interrupted by
435 SIGINT. Close the write end of the pipe to shut down the server
435 SIGINT. Close the write end of the pipe to shut down the server
436 process gracefully.
436 process gracefully.
437 (default: True)
437 (default: True)
438
438
439 ``color``
439 ``color``
440 ---------
440 ---------
441
441
442 Configure the Mercurial color mode. For details about how to define your custom
442 Configure the Mercurial color mode. For details about how to define your custom
443 effect and style see :hg:`help color`.
443 effect and style see :hg:`help color`.
444
444
445 ``mode``
445 ``mode``
446 String: control the method used to output color. One of ``auto``, ``ansi``,
446 String: control the method used to output color. One of ``auto``, ``ansi``,
447 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
447 ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
448 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
448 use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
449 terminal. Any invalid value will disable color.
449 terminal. Any invalid value will disable color.
450
450
451 ``pagermode``
451 ``pagermode``
452 String: optional override of ``color.mode`` used with pager.
452 String: optional override of ``color.mode`` used with pager.
453
453
454 On some systems, terminfo mode may cause problems when using
454 On some systems, terminfo mode may cause problems when using
455 color with ``less -R`` as a pager program. less with the -R option
455 color with ``less -R`` as a pager program. less with the -R option
456 will only display ECMA-48 color codes, and terminfo mode may sometimes
456 will only display ECMA-48 color codes, and terminfo mode may sometimes
457 emit codes that less doesn't understand. You can work around this by
457 emit codes that less doesn't understand. You can work around this by
458 either using ansi mode (or auto mode), or by using less -r (which will
458 either using ansi mode (or auto mode), or by using less -r (which will
459 pass through all terminal control codes, not just color control
459 pass through all terminal control codes, not just color control
460 codes).
460 codes).
461
461
462 On some systems (such as MSYS in Windows), the terminal may support
462 On some systems (such as MSYS in Windows), the terminal may support
463 a different color mode than the pager program.
463 a different color mode than the pager program.
464
464
465 ``commands``
465 ``commands``
466 ------------
466 ------------
467
467
468 ``commit.post-status``
468 ``commit.post-status``
469 Show status of files in the working directory after successful commit.
469 Show status of files in the working directory after successful commit.
470 (default: False)
470 (default: False)
471
471
472 ``merge.require-rev``
472 ``merge.require-rev``
473 Require that the revision to merge the current commit with be specified on
473 Require that the revision to merge the current commit with be specified on
474 the command line. If this is enabled and a revision is not specified, the
474 the command line. If this is enabled and a revision is not specified, the
475 command aborts.
475 command aborts.
476 (default: False)
476 (default: False)
477
477
478 ``push.require-revs``
478 ``push.require-revs``
479 Require revisions to push be specified using one or more mechanisms such as
479 Require revisions to push be specified using one or more mechanisms such as
480 specifying them positionally on the command line, using ``-r``, ``-b``,
480 specifying them positionally on the command line, using ``-r``, ``-b``,
481 and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
481 and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
482 configuration. If this is enabled and revisions are not specified, the
482 configuration. If this is enabled and revisions are not specified, the
483 command aborts.
483 command aborts.
484 (default: False)
484 (default: False)
485
485
486 ``resolve.confirm``
486 ``resolve.confirm``
487 Confirm before performing action if no filename is passed.
487 Confirm before performing action if no filename is passed.
488 (default: False)
488 (default: False)
489
489
490 ``resolve.explicit-re-merge``
490 ``resolve.explicit-re-merge``
491 Require uses of ``hg resolve`` to specify which action it should perform,
491 Require uses of ``hg resolve`` to specify which action it should perform,
492 instead of re-merging files by default.
492 instead of re-merging files by default.
493 (default: False)
493 (default: False)
494
494
495 ``resolve.mark-check``
495 ``resolve.mark-check``
496 Determines what level of checking :hg:`resolve --mark` will perform before
496 Determines what level of checking :hg:`resolve --mark` will perform before
497 marking files as resolved. Valid values are ``none`, ``warn``, and
497 marking files as resolved. Valid values are ``none`, ``warn``, and
498 ``abort``. ``warn`` will output a warning listing the file(s) that still
498 ``abort``. ``warn`` will output a warning listing the file(s) that still
499 have conflict markers in them, but will still mark everything resolved.
499 have conflict markers in them, but will still mark everything resolved.
500 ``abort`` will output the same warning but will not mark things as resolved.
500 ``abort`` will output the same warning but will not mark things as resolved.
501 If --all is passed and this is set to ``abort``, only a warning will be
501 If --all is passed and this is set to ``abort``, only a warning will be
502 shown (an error will not be raised).
502 shown (an error will not be raised).
503 (default: ``none``)
503 (default: ``none``)
504
504
505 ``status.relative``
505 ``status.relative``
506 Make paths in :hg:`status` output relative to the current directory.
506 Make paths in :hg:`status` output relative to the current directory.
507 (default: False)
507 (default: False)
508
508
509 ``status.terse``
509 ``status.terse``
510 Default value for the --terse flag, which condenses status output.
510 Default value for the --terse flag, which condenses status output.
511 (default: empty)
511 (default: empty)
512
512
513 ``update.check``
513 ``update.check``
514 Determines what level of checking :hg:`update` will perform before moving
514 Determines what level of checking :hg:`update` will perform before moving
515 to a destination revision. Valid values are ``abort``, ``none``,
515 to a destination revision. Valid values are ``abort``, ``none``,
516 ``linear``, and ``noconflict``.
516 ``linear``, and ``noconflict``.
517
517
518 - ``abort`` always fails if the working directory has uncommitted changes.
518 - ``abort`` always fails if the working directory has uncommitted changes.
519
519
520 - ``none`` performs no checking, and may result in a merge with uncommitted changes.
520 - ``none`` performs no checking, and may result in a merge with uncommitted changes.
521
521
522 - ``linear`` allows any update as long as it follows a straight line in the
522 - ``linear`` allows any update as long as it follows a straight line in the
523 revision history, and may trigger a merge with uncommitted changes.
523 revision history, and may trigger a merge with uncommitted changes.
524
524
525 - ``noconflict`` will allow any update which would not trigger a merge with
525 - ``noconflict`` will allow any update which would not trigger a merge with
526 uncommitted changes, if any are present.
526 uncommitted changes, if any are present.
527
527
528 (default: ``linear``)
528 (default: ``linear``)
529
529
530 ``update.requiredest``
530 ``update.requiredest``
531 Require that the user pass a destination when running :hg:`update`.
531 Require that the user pass a destination when running :hg:`update`.
532 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
532 For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
533 will be disallowed.
533 will be disallowed.
534 (default: False)
534 (default: False)
535
535
536 ``committemplate``
536 ``committemplate``
537 ------------------
537 ------------------
538
538
539 ``changeset``
539 ``changeset``
540 String: configuration in this section is used as the template to
540 String: configuration in this section is used as the template to
541 customize the text shown in the editor when committing.
541 customize the text shown in the editor when committing.
542
542
543 In addition to pre-defined template keywords, commit log specific one
543 In addition to pre-defined template keywords, commit log specific one
544 below can be used for customization:
544 below can be used for customization:
545
545
546 ``extramsg``
546 ``extramsg``
547 String: Extra message (typically 'Leave message empty to abort
547 String: Extra message (typically 'Leave message empty to abort
548 commit.'). This may be changed by some commands or extensions.
548 commit.'). This may be changed by some commands or extensions.
549
549
550 For example, the template configuration below shows as same text as
550 For example, the template configuration below shows as same text as
551 one shown by default::
551 one shown by default::
552
552
553 [committemplate]
553 [committemplate]
554 changeset = {desc}\n\n
554 changeset = {desc}\n\n
555 HG: Enter commit message. Lines beginning with 'HG:' are removed.
555 HG: Enter commit message. Lines beginning with 'HG:' are removed.
556 HG: {extramsg}
556 HG: {extramsg}
557 HG: --
557 HG: --
558 HG: user: {author}\n{ifeq(p2rev, "-1", "",
558 HG: user: {author}\n{ifeq(p2rev, "-1", "",
559 "HG: branch merge\n")
559 "HG: branch merge\n")
560 }HG: branch '{branch}'\n{if(activebookmark,
560 }HG: branch '{branch}'\n{if(activebookmark,
561 "HG: bookmark '{activebookmark}'\n") }{subrepos %
561 "HG: bookmark '{activebookmark}'\n") }{subrepos %
562 "HG: subrepo {subrepo}\n" }{file_adds %
562 "HG: subrepo {subrepo}\n" }{file_adds %
563 "HG: added {file}\n" }{file_mods %
563 "HG: added {file}\n" }{file_mods %
564 "HG: changed {file}\n" }{file_dels %
564 "HG: changed {file}\n" }{file_dels %
565 "HG: removed {file}\n" }{if(files, "",
565 "HG: removed {file}\n" }{if(files, "",
566 "HG: no files changed\n")}
566 "HG: no files changed\n")}
567
567
568 ``diff()``
568 ``diff()``
569 String: show the diff (see :hg:`help templates` for detail)
569 String: show the diff (see :hg:`help templates` for detail)
570
570
571 Sometimes it is helpful to show the diff of the changeset in the editor without
571 Sometimes it is helpful to show the diff of the changeset in the editor without
572 having to prefix 'HG: ' to each line so that highlighting works correctly. For
572 having to prefix 'HG: ' to each line so that highlighting works correctly. For
573 this, Mercurial provides a special string which will ignore everything below
573 this, Mercurial provides a special string which will ignore everything below
574 it::
574 it::
575
575
576 HG: ------------------------ >8 ------------------------
576 HG: ------------------------ >8 ------------------------
577
577
578 For example, the template configuration below will show the diff below the
578 For example, the template configuration below will show the diff below the
579 extra message::
579 extra message::
580
580
581 [committemplate]
581 [committemplate]
582 changeset = {desc}\n\n
582 changeset = {desc}\n\n
583 HG: Enter commit message. Lines beginning with 'HG:' are removed.
583 HG: Enter commit message. Lines beginning with 'HG:' are removed.
584 HG: {extramsg}
584 HG: {extramsg}
585 HG: ------------------------ >8 ------------------------
585 HG: ------------------------ >8 ------------------------
586 HG: Do not touch the line above.
586 HG: Do not touch the line above.
587 HG: Everything below will be removed.
587 HG: Everything below will be removed.
588 {diff()}
588 {diff()}
589
589
590 .. note::
590 .. note::
591
591
592 For some problematic encodings (see :hg:`help win32mbcs` for
592 For some problematic encodings (see :hg:`help win32mbcs` for
593 detail), this customization should be configured carefully, to
593 detail), this customization should be configured carefully, to
594 avoid showing broken characters.
594 avoid showing broken characters.
595
595
596 For example, if a multibyte character ending with backslash (0x5c) is
596 For example, if a multibyte character ending with backslash (0x5c) is
597 followed by the ASCII character 'n' in the customized template,
597 followed by the ASCII character 'n' in the customized template,
598 the sequence of backslash and 'n' is treated as line-feed unexpectedly
598 the sequence of backslash and 'n' is treated as line-feed unexpectedly
599 (and the multibyte character is broken, too).
599 (and the multibyte character is broken, too).
600
600
601 Customized template is used for commands below (``--edit`` may be
601 Customized template is used for commands below (``--edit`` may be
602 required):
602 required):
603
603
604 - :hg:`backout`
604 - :hg:`backout`
605 - :hg:`commit`
605 - :hg:`commit`
606 - :hg:`fetch` (for merge commit only)
606 - :hg:`fetch` (for merge commit only)
607 - :hg:`graft`
607 - :hg:`graft`
608 - :hg:`histedit`
608 - :hg:`histedit`
609 - :hg:`import`
609 - :hg:`import`
610 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
610 - :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
611 - :hg:`rebase`
611 - :hg:`rebase`
612 - :hg:`shelve`
612 - :hg:`shelve`
613 - :hg:`sign`
613 - :hg:`sign`
614 - :hg:`tag`
614 - :hg:`tag`
615 - :hg:`transplant`
615 - :hg:`transplant`
616
616
617 Configuring items below instead of ``changeset`` allows showing
617 Configuring items below instead of ``changeset`` allows showing
618 customized message only for specific actions, or showing different
618 customized message only for specific actions, or showing different
619 messages for each action.
619 messages for each action.
620
620
621 - ``changeset.backout`` for :hg:`backout`
621 - ``changeset.backout`` for :hg:`backout`
622 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
622 - ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
623 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
623 - ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
624 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
624 - ``changeset.commit.normal.merge`` for :hg:`commit` on merges
625 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
625 - ``changeset.commit.normal.normal`` for :hg:`commit` on other
626 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
626 - ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
627 - ``changeset.gpg.sign`` for :hg:`sign`
627 - ``changeset.gpg.sign`` for :hg:`sign`
628 - ``changeset.graft`` for :hg:`graft`
628 - ``changeset.graft`` for :hg:`graft`
629 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
629 - ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
630 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
630 - ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
631 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
631 - ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
632 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
632 - ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
633 - ``changeset.import.bypass`` for :hg:`import --bypass`
633 - ``changeset.import.bypass`` for :hg:`import --bypass`
634 - ``changeset.import.normal.merge`` for :hg:`import` on merges
634 - ``changeset.import.normal.merge`` for :hg:`import` on merges
635 - ``changeset.import.normal.normal`` for :hg:`import` on other
635 - ``changeset.import.normal.normal`` for :hg:`import` on other
636 - ``changeset.mq.qnew`` for :hg:`qnew`
636 - ``changeset.mq.qnew`` for :hg:`qnew`
637 - ``changeset.mq.qfold`` for :hg:`qfold`
637 - ``changeset.mq.qfold`` for :hg:`qfold`
638 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
638 - ``changeset.mq.qrefresh`` for :hg:`qrefresh`
639 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
639 - ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
640 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
640 - ``changeset.rebase.merge`` for :hg:`rebase` on merges
641 - ``changeset.rebase.normal`` for :hg:`rebase` on other
641 - ``changeset.rebase.normal`` for :hg:`rebase` on other
642 - ``changeset.shelve.shelve`` for :hg:`shelve`
642 - ``changeset.shelve.shelve`` for :hg:`shelve`
643 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
643 - ``changeset.tag.add`` for :hg:`tag` without ``--remove``
644 - ``changeset.tag.remove`` for :hg:`tag --remove`
644 - ``changeset.tag.remove`` for :hg:`tag --remove`
645 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
645 - ``changeset.transplant.merge`` for :hg:`transplant` on merges
646 - ``changeset.transplant.normal`` for :hg:`transplant` on other
646 - ``changeset.transplant.normal`` for :hg:`transplant` on other
647
647
648 These dot-separated lists of names are treated as hierarchical ones.
648 These dot-separated lists of names are treated as hierarchical ones.
649 For example, ``changeset.tag.remove`` customizes the commit message
649 For example, ``changeset.tag.remove`` customizes the commit message
650 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
650 only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
651 commit message for :hg:`tag` regardless of ``--remove`` option.
651 commit message for :hg:`tag` regardless of ``--remove`` option.
652
652
653 When the external editor is invoked for a commit, the corresponding
653 When the external editor is invoked for a commit, the corresponding
654 dot-separated list of names without the ``changeset.`` prefix
654 dot-separated list of names without the ``changeset.`` prefix
655 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
655 (e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
656 variable.
656 variable.
657
657
658 In this section, items other than ``changeset`` can be referred from
658 In this section, items other than ``changeset`` can be referred from
659 others. For example, the configuration to list committed files up
659 others. For example, the configuration to list committed files up
660 below can be referred as ``{listupfiles}``::
660 below can be referred as ``{listupfiles}``::
661
661
662 [committemplate]
662 [committemplate]
663 listupfiles = {file_adds %
663 listupfiles = {file_adds %
664 "HG: added {file}\n" }{file_mods %
664 "HG: added {file}\n" }{file_mods %
665 "HG: changed {file}\n" }{file_dels %
665 "HG: changed {file}\n" }{file_dels %
666 "HG: removed {file}\n" }{if(files, "",
666 "HG: removed {file}\n" }{if(files, "",
667 "HG: no files changed\n")}
667 "HG: no files changed\n")}
668
668
669 ``decode/encode``
669 ``decode/encode``
670 -----------------
670 -----------------
671
671
672 Filters for transforming files on checkout/checkin. This would
672 Filters for transforming files on checkout/checkin. This would
673 typically be used for newline processing or other
673 typically be used for newline processing or other
674 localization/canonicalization of files.
674 localization/canonicalization of files.
675
675
676 Filters consist of a filter pattern followed by a filter command.
676 Filters consist of a filter pattern followed by a filter command.
677 Filter patterns are globs by default, rooted at the repository root.
677 Filter patterns are globs by default, rooted at the repository root.
678 For example, to match any file ending in ``.txt`` in the root
678 For example, to match any file ending in ``.txt`` in the root
679 directory only, use the pattern ``*.txt``. To match any file ending
679 directory only, use the pattern ``*.txt``. To match any file ending
680 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
680 in ``.c`` anywhere in the repository, use the pattern ``**.c``.
681 For each file only the first matching filter applies.
681 For each file only the first matching filter applies.
682
682
683 The filter command can start with a specifier, either ``pipe:`` or
683 The filter command can start with a specifier, either ``pipe:`` or
684 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
684 ``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
685
685
686 A ``pipe:`` command must accept data on stdin and return the transformed
686 A ``pipe:`` command must accept data on stdin and return the transformed
687 data on stdout.
687 data on stdout.
688
688
689 Pipe example::
689 Pipe example::
690
690
691 [encode]
691 [encode]
692 # uncompress gzip files on checkin to improve delta compression
692 # uncompress gzip files on checkin to improve delta compression
693 # note: not necessarily a good idea, just an example
693 # note: not necessarily a good idea, just an example
694 *.gz = pipe: gunzip
694 *.gz = pipe: gunzip
695
695
696 [decode]
696 [decode]
697 # recompress gzip files when writing them to the working dir (we
697 # recompress gzip files when writing them to the working dir (we
698 # can safely omit "pipe:", because it's the default)
698 # can safely omit "pipe:", because it's the default)
699 *.gz = gzip
699 *.gz = gzip
700
700
701 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
701 A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
702 with the name of a temporary file that contains the data to be
702 with the name of a temporary file that contains the data to be
703 filtered by the command. The string ``OUTFILE`` is replaced with the name
703 filtered by the command. The string ``OUTFILE`` is replaced with the name
704 of an empty temporary file, where the filtered data must be written by
704 of an empty temporary file, where the filtered data must be written by
705 the command.
705 the command.
706
706
707 .. container:: windows
707 .. container:: windows
708
708
709 .. note::
709 .. note::
710
710
711 The tempfile mechanism is recommended for Windows systems,
711 The tempfile mechanism is recommended for Windows systems,
712 where the standard shell I/O redirection operators often have
712 where the standard shell I/O redirection operators often have
713 strange effects and may corrupt the contents of your files.
713 strange effects and may corrupt the contents of your files.
714
714
715 This filter mechanism is used internally by the ``eol`` extension to
715 This filter mechanism is used internally by the ``eol`` extension to
716 translate line ending characters between Windows (CRLF) and Unix (LF)
716 translate line ending characters between Windows (CRLF) and Unix (LF)
717 format. We suggest you use the ``eol`` extension for convenience.
717 format. We suggest you use the ``eol`` extension for convenience.
718
718
719
719
720 ``defaults``
720 ``defaults``
721 ------------
721 ------------
722
722
723 (defaults are deprecated. Don't use them. Use aliases instead.)
723 (defaults are deprecated. Don't use them. Use aliases instead.)
724
724
725 Use the ``[defaults]`` section to define command defaults, i.e. the
725 Use the ``[defaults]`` section to define command defaults, i.e. the
726 default options/arguments to pass to the specified commands.
726 default options/arguments to pass to the specified commands.
727
727
728 The following example makes :hg:`log` run in verbose mode, and
728 The following example makes :hg:`log` run in verbose mode, and
729 :hg:`status` show only the modified files, by default::
729 :hg:`status` show only the modified files, by default::
730
730
731 [defaults]
731 [defaults]
732 log = -v
732 log = -v
733 status = -m
733 status = -m
734
734
735 The actual commands, instead of their aliases, must be used when
735 The actual commands, instead of their aliases, must be used when
736 defining command defaults. The command defaults will also be applied
736 defining command defaults. The command defaults will also be applied
737 to the aliases of the commands defined.
737 to the aliases of the commands defined.
738
738
739
739
740 ``diff``
740 ``diff``
741 --------
741 --------
742
742
743 Settings used when displaying diffs. Everything except for ``unified``
743 Settings used when displaying diffs. Everything except for ``unified``
744 is a Boolean and defaults to False. See :hg:`help config.annotate`
744 is a Boolean and defaults to False. See :hg:`help config.annotate`
745 for related options for the annotate command.
745 for related options for the annotate command.
746
746
747 ``git``
747 ``git``
748 Use git extended diff format.
748 Use git extended diff format.
749
749
750 ``nobinary``
750 ``nobinary``
751 Omit git binary patches.
751 Omit git binary patches.
752
752
753 ``nodates``
753 ``nodates``
754 Don't include dates in diff headers.
754 Don't include dates in diff headers.
755
755
756 ``noprefix``
756 ``noprefix``
757 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
757 Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
758
758
759 ``showfunc``
759 ``showfunc``
760 Show which function each change is in.
760 Show which function each change is in.
761
761
762 ``ignorews``
762 ``ignorews``
763 Ignore white space when comparing lines.
763 Ignore white space when comparing lines.
764
764
765 ``ignorewsamount``
765 ``ignorewsamount``
766 Ignore changes in the amount of white space.
766 Ignore changes in the amount of white space.
767
767
768 ``ignoreblanklines``
768 ``ignoreblanklines``
769 Ignore changes whose lines are all blank.
769 Ignore changes whose lines are all blank.
770
770
771 ``unified``
771 ``unified``
772 Number of lines of context to show.
772 Number of lines of context to show.
773
773
774 ``word-diff``
774 ``word-diff``
775 Highlight changed words.
775 Highlight changed words.
776
776
777 ``email``
777 ``email``
778 ---------
778 ---------
779
779
780 Settings for extensions that send email messages.
780 Settings for extensions that send email messages.
781
781
782 ``from``
782 ``from``
783 Optional. Email address to use in "From" header and SMTP envelope
783 Optional. Email address to use in "From" header and SMTP envelope
784 of outgoing messages.
784 of outgoing messages.
785
785
786 ``to``
786 ``to``
787 Optional. Comma-separated list of recipients' email addresses.
787 Optional. Comma-separated list of recipients' email addresses.
788
788
789 ``cc``
789 ``cc``
790 Optional. Comma-separated list of carbon copy recipients'
790 Optional. Comma-separated list of carbon copy recipients'
791 email addresses.
791 email addresses.
792
792
793 ``bcc``
793 ``bcc``
794 Optional. Comma-separated list of blind carbon copy recipients'
794 Optional. Comma-separated list of blind carbon copy recipients'
795 email addresses.
795 email addresses.
796
796
797 ``method``
797 ``method``
798 Optional. Method to use to send email messages. If value is ``smtp``
798 Optional. Method to use to send email messages. If value is ``smtp``
799 (default), use SMTP (see the ``[smtp]`` section for configuration).
799 (default), use SMTP (see the ``[smtp]`` section for configuration).
800 Otherwise, use as name of program to run that acts like sendmail
800 Otherwise, use as name of program to run that acts like sendmail
801 (takes ``-f`` option for sender, list of recipients on command line,
801 (takes ``-f`` option for sender, list of recipients on command line,
802 message on stdin). Normally, setting this to ``sendmail`` or
802 message on stdin). Normally, setting this to ``sendmail`` or
803 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
803 ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
804
804
805 ``charsets``
805 ``charsets``
806 Optional. Comma-separated list of character sets considered
806 Optional. Comma-separated list of character sets considered
807 convenient for recipients. Addresses, headers, and parts not
807 convenient for recipients. Addresses, headers, and parts not
808 containing patches of outgoing messages will be encoded in the
808 containing patches of outgoing messages will be encoded in the
809 first character set to which conversion from local encoding
809 first character set to which conversion from local encoding
810 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
810 (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
811 conversion fails, the text in question is sent as is.
811 conversion fails, the text in question is sent as is.
812 (default: '')
812 (default: '')
813
813
814 Order of outgoing email character sets:
814 Order of outgoing email character sets:
815
815
816 1. ``us-ascii``: always first, regardless of settings
816 1. ``us-ascii``: always first, regardless of settings
817 2. ``email.charsets``: in order given by user
817 2. ``email.charsets``: in order given by user
818 3. ``ui.fallbackencoding``: if not in email.charsets
818 3. ``ui.fallbackencoding``: if not in email.charsets
819 4. ``$HGENCODING``: if not in email.charsets
819 4. ``$HGENCODING``: if not in email.charsets
820 5. ``utf-8``: always last, regardless of settings
820 5. ``utf-8``: always last, regardless of settings
821
821
822 Email example::
822 Email example::
823
823
824 [email]
824 [email]
825 from = Joseph User <joe.user@example.com>
825 from = Joseph User <joe.user@example.com>
826 method = /usr/sbin/sendmail
826 method = /usr/sbin/sendmail
827 # charsets for western Europeans
827 # charsets for western Europeans
828 # us-ascii, utf-8 omitted, as they are tried first and last
828 # us-ascii, utf-8 omitted, as they are tried first and last
829 charsets = iso-8859-1, iso-8859-15, windows-1252
829 charsets = iso-8859-1, iso-8859-15, windows-1252
830
830
831
831
832 ``extensions``
832 ``extensions``
833 --------------
833 --------------
834
834
835 Mercurial has an extension mechanism for adding new features. To
835 Mercurial has an extension mechanism for adding new features. To
836 enable an extension, create an entry for it in this section.
836 enable an extension, create an entry for it in this section.
837
837
838 If you know that the extension is already in Python's search path,
838 If you know that the extension is already in Python's search path,
839 you can give the name of the module, followed by ``=``, with nothing
839 you can give the name of the module, followed by ``=``, with nothing
840 after the ``=``.
840 after the ``=``.
841
841
842 Otherwise, give a name that you choose, followed by ``=``, followed by
842 Otherwise, give a name that you choose, followed by ``=``, followed by
843 the path to the ``.py`` file (including the file name extension) that
843 the path to the ``.py`` file (including the file name extension) that
844 defines the extension.
844 defines the extension.
845
845
846 To explicitly disable an extension that is enabled in an hgrc of
846 To explicitly disable an extension that is enabled in an hgrc of
847 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
847 broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
848 or ``foo = !`` when path is not supplied.
848 or ``foo = !`` when path is not supplied.
849
849
850 Example for ``~/.hgrc``::
850 Example for ``~/.hgrc``::
851
851
852 [extensions]
852 [extensions]
853 # (the churn extension will get loaded from Mercurial's path)
853 # (the churn extension will get loaded from Mercurial's path)
854 churn =
854 churn =
855 # (this extension will get loaded from the file specified)
855 # (this extension will get loaded from the file specified)
856 myfeature = ~/.hgext/myfeature.py
856 myfeature = ~/.hgext/myfeature.py
857
857
858 If an extension fails to load, a warning will be issued, and Mercurial will
858 If an extension fails to load, a warning will be issued, and Mercurial will
859 proceed. To enforce that an extension must be loaded, one can set the `required`
859 proceed. To enforce that an extension must be loaded, one can set the `required`
860 suboption in the config::
860 suboption in the config::
861
861
862 [extensions]
862 [extensions]
863 myfeature = ~/.hgext/myfeature.py
863 myfeature = ~/.hgext/myfeature.py
864 myfeature:required = yes
864 myfeature:required = yes
865
865
866 To debug extension loading issue, one can add `--traceback` to their mercurial
866 To debug extension loading issue, one can add `--traceback` to their mercurial
867 invocation.
867 invocation.
868
868
869 A default setting can we set using the special `*` extension key::
869 A default setting can we set using the special `*` extension key::
870
870
871 [extensions]
871 [extensions]
872 *:required = yes
872 *:required = yes
873 myfeature = ~/.hgext/myfeature.py
873 myfeature = ~/.hgext/myfeature.py
874 rebase=
874 rebase=
875
875
876
876
877 ``format``
877 ``format``
878 ----------
878 ----------
879
879
880 Configuration that controls the repository format. Newer format options are more
880 Configuration that controls the repository format. Newer format options are more
881 powerful, but incompatible with some older versions of Mercurial. Format options
881 powerful, but incompatible with some older versions of Mercurial. Format options
882 are considered at repository initialization only. You need to make a new clone
882 are considered at repository initialization only. You need to make a new clone
883 for config changes to be taken into account.
883 for config changes to be taken into account.
884
884
885 For more details about repository format and version compatibility, see
885 For more details about repository format and version compatibility, see
886 https://www.mercurial-scm.org/wiki/MissingRequirement
886 https://www.mercurial-scm.org/wiki/MissingRequirement
887
887
888 ``usegeneraldelta``
888 ``usegeneraldelta``
889 Enable or disable the "generaldelta" repository format which improves
889 Enable or disable the "generaldelta" repository format which improves
890 repository compression by allowing "revlog" to store deltas against
890 repository compression by allowing "revlog" to store deltas against
891 arbitrary revisions instead of the previously stored one. This provides
891 arbitrary revisions instead of the previously stored one. This provides
892 significant improvement for repositories with branches.
892 significant improvement for repositories with branches.
893
893
894 Repositories with this on-disk format require Mercurial version 1.9.
894 Repositories with this on-disk format require Mercurial version 1.9.
895
895
896 Enabled by default.
896 Enabled by default.
897
897
898 ``dotencode``
898 ``dotencode``
899 Enable or disable the "dotencode" repository format which enhances
899 Enable or disable the "dotencode" repository format which enhances
900 the "fncache" repository format (which has to be enabled to use
900 the "fncache" repository format (which has to be enabled to use
901 dotencode) to avoid issues with filenames starting with "._" on
901 dotencode) to avoid issues with filenames starting with "._" on
902 Mac OS X and spaces on Windows.
902 Mac OS X and spaces on Windows.
903
903
904 Repositories with this on-disk format require Mercurial version 1.7.
904 Repositories with this on-disk format require Mercurial version 1.7.
905
905
906 Enabled by default.
906 Enabled by default.
907
907
908 ``usefncache``
908 ``usefncache``
909 Enable or disable the "fncache" repository format which enhances
909 Enable or disable the "fncache" repository format which enhances
910 the "store" repository format (which has to be enabled to use
910 the "store" repository format (which has to be enabled to use
911 fncache) to allow longer filenames and avoids using Windows
911 fncache) to allow longer filenames and avoids using Windows
912 reserved names, e.g. "nul".
912 reserved names, e.g. "nul".
913
913
914 Repositories with this on-disk format require Mercurial version 1.1.
914 Repositories with this on-disk format require Mercurial version 1.1.
915
915
916 Enabled by default.
916 Enabled by default.
917
917
918 ``use-dirstate-v2``
918 ``use-dirstate-v2``
919 Enable or disable the experimental "dirstate-v2" feature. The dirstate
919 Enable or disable the experimental "dirstate-v2" feature. The dirstate
920 functionality is shared by all commands interacting with the working copy.
920 functionality is shared by all commands interacting with the working copy.
921 The new version is more robust, faster and stores more information.
921 The new version is more robust, faster and stores more information.
922
922
923 The performance-improving version of this feature is currently only
923 The performance-improving version of this feature is currently only
924 implemented in Rust (see :hg:`help rust`), so people not using a version of
924 implemented in Rust (see :hg:`help rust`), so people not using a version of
925 Mercurial compiled with the Rust parts might actually suffer some slowdown.
925 Mercurial compiled with the Rust parts might actually suffer some slowdown.
926 For this reason, such versions will by default refuse to access repositories
926 For this reason, such versions will by default refuse to access repositories
927 with "dirstate-v2" enabled.
927 with "dirstate-v2" enabled.
928
928
929 This behavior can be adjusted via configuration: check
929 This behavior can be adjusted via configuration: check
930 :hg:`help config.storage.dirstate-v2.slow-path` for details.
930 :hg:`help config.storage.dirstate-v2.slow-path` for details.
931
931
932 Repositories with this on-disk format require Mercurial 6.0 or above.
932 Repositories with this on-disk format require Mercurial 6.0 or above.
933
933
934 By default this format variant is disabled if the fast implementation is not
934 By default this format variant is disabled if the fast implementation is not
935 available, and enabled by default if the fast implementation is available.
935 available, and enabled by default if the fast implementation is available.
936
936
937 To accomodate installations of Mercurial without the fast implementation,
937 To accomodate installations of Mercurial without the fast implementation,
938 you can downgrade your repository. To do so run the following command:
938 you can downgrade your repository. To do so run the following command:
939
939
940 $ hg debugupgraderepo \
940 $ hg debugupgraderepo \
941 --run \
941 --run \
942 --config format.use-dirstate-v2=False \
942 --config format.use-dirstate-v2=False \
943 --config storage.dirstate-v2.slow-path=allow
943 --config storage.dirstate-v2.slow-path=allow
944
944
945 For a more comprehensive guide, see :hg:`help internals.dirstate-v2`.
945 For a more comprehensive guide, see :hg:`help internals.dirstate-v2`.
946
946
947 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories``
947 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories``
948 When enabled, an automatic upgrade will be triggered when a repository format
948 When enabled, an automatic upgrade will be triggered when a repository format
949 does not match its `use-dirstate-v2` config.
949 does not match its `use-dirstate-v2` config.
950
950
951 This is an advanced behavior that most users will not need. We recommend you
951 This is an advanced behavior that most users will not need. We recommend you
952 don't use this unless you are a seasoned administrator of a Mercurial install
952 don't use this unless you are a seasoned administrator of a Mercurial install
953 base.
953 base.
954
954
955 Automatic upgrade means that any process accessing the repository will
955 Automatic upgrade means that any process accessing the repository will
956 upgrade the repository format to use `dirstate-v2`. This only triggers if a
956 upgrade the repository format to use `dirstate-v2`. This only triggers if a
957 change is needed. This also applies to operations that would have been
957 change is needed. This also applies to operations that would have been
958 read-only (like hg status).
958 read-only (like hg status).
959
959
960 If the repository cannot be locked, the automatic-upgrade operation will be
960 If the repository cannot be locked, the automatic-upgrade operation will be
961 skipped. The next operation will attempt it again.
961 skipped. The next operation will attempt it again.
962
962
963 This configuration will apply for moves in any direction, either adding the
963 This configuration will apply for moves in any direction, either adding the
964 `dirstate-v2` format if `format.use-dirstate-v2=yes` or removing the
964 `dirstate-v2` format if `format.use-dirstate-v2=yes` or removing the
965 `dirstate-v2` requirement if `format.use-dirstate-v2=no`. So we recommend
965 `dirstate-v2` requirement if `format.use-dirstate-v2=no`. So we recommend
966 setting both this value and `format.use-dirstate-v2` at the same time.
966 setting both this value and `format.use-dirstate-v2` at the same time.
967
967
968 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet``
968 ``use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet``
969 Hide message when performing such automatic upgrade.
969 Hide message when performing such automatic upgrade.
970
970
971 ``use-dirstate-tracked-hint``
971 ``use-dirstate-tracked-hint``
972 Enable or disable the writing of "tracked key" file alongside the dirstate.
972 Enable or disable the writing of "tracked key" file alongside the dirstate.
973 (default to disabled)
973 (default to disabled)
974
974
975 That "tracked-hint" can help external automations to detect changes to the
975 That "tracked-hint" can help external automations to detect changes to the
976 set of tracked files. (i.e the result of `hg files` or `hg status -macd`)
976 set of tracked files. (i.e the result of `hg files` or `hg status -macd`)
977
977
978 The tracked-hint is written in a new `.hg/dirstate-tracked-hint`. That file
978 The tracked-hint is written in a new `.hg/dirstate-tracked-hint`. That file
979 contains two lines:
979 contains two lines:
980 - the first line is the file version (currently: 1),
980 - the first line is the file version (currently: 1),
981 - the second line contains the "tracked-hint".
981 - the second line contains the "tracked-hint".
982 That file is written right after the dirstate is written.
982 That file is written right after the dirstate is written.
983
983
984 The tracked-hint changes whenever the set of file tracked in the dirstate
984 The tracked-hint changes whenever the set of file tracked in the dirstate
985 changes. The general idea is:
985 changes. The general idea is:
986 - if the hint is identical, the set of tracked file SHOULD be identical,
986 - if the hint is identical, the set of tracked file SHOULD be identical,
987 - if the hint is different, the set of tracked file MIGHT be different.
987 - if the hint is different, the set of tracked file MIGHT be different.
988
988
989 The "hint is identical" case uses `SHOULD` as the dirstate and the hint file
989 The "hint is identical" case uses `SHOULD` as the dirstate and the hint file
990 are two distinct files and therefore that cannot be read or written to in an
990 are two distinct files and therefore that cannot be read or written to in an
991 atomic way. If the key is identical, nothing garantees that the dirstate is
991 atomic way. If the key is identical, nothing garantees that the dirstate is
992 not updated right after the hint file. This is considered a negligible
992 not updated right after the hint file. This is considered a negligible
993 limitation for the intended usecase. It is actually possible to prevent this
993 limitation for the intended usecase. It is actually possible to prevent this
994 race by taking the repository lock during read operations.
994 race by taking the repository lock during read operations.
995
995
996 They are two "ways" to use this feature:
996 They are two "ways" to use this feature:
997
997
998 1) monitoring changes to the `.hg/dirstate-tracked-hint`, if the file
998 1) monitoring changes to the `.hg/dirstate-tracked-hint`, if the file
999 changes, the tracked set might have changed.
999 changes, the tracked set might have changed.
1000
1000
1001 2) storing the value and comparing it to a later value.
1001 2) storing the value and comparing it to a later value.
1002
1002
1003
1003
1004 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories``
1004 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories``
1005 When enabled, an automatic upgrade will be triggered when a repository format
1005 When enabled, an automatic upgrade will be triggered when a repository format
1006 does not match its `use-dirstate-tracked-hint` config.
1006 does not match its `use-dirstate-tracked-hint` config.
1007
1007
1008 This is an advanced behavior that most users will not need. We recommend you
1008 This is an advanced behavior that most users will not need. We recommend you
1009 don't use this unless you are a seasoned administrator of a Mercurial install
1009 don't use this unless you are a seasoned administrator of a Mercurial install
1010 base.
1010 base.
1011
1011
1012 Automatic upgrade means that any process accessing the repository will
1012 Automatic upgrade means that any process accessing the repository will
1013 upgrade the repository format to use `dirstate-tracked-hint`. This only
1013 upgrade the repository format to use `dirstate-tracked-hint`. This only
1014 triggers if a change is needed. This also applies to operations that would
1014 triggers if a change is needed. This also applies to operations that would
1015 have been read-only (like hg status).
1015 have been read-only (like hg status).
1016
1016
1017 If the repository cannot be locked, the automatic-upgrade operation will be
1017 If the repository cannot be locked, the automatic-upgrade operation will be
1018 skipped. The next operation will attempt it again.
1018 skipped. The next operation will attempt it again.
1019
1019
1020 This configuration will apply for moves in any direction, either adding the
1020 This configuration will apply for moves in any direction, either adding the
1021 `dirstate-tracked-hint` format if `format.use-dirstate-tracked-hint=yes` or
1021 `dirstate-tracked-hint` format if `format.use-dirstate-tracked-hint=yes` or
1022 removing the `dirstate-tracked-hint` requirement if
1022 removing the `dirstate-tracked-hint` requirement if
1023 `format.use-dirstate-tracked-hint=no`. So we recommend setting both this
1023 `format.use-dirstate-tracked-hint=no`. So we recommend setting both this
1024 value and `format.use-dirstate-tracked-hint` at the same time.
1024 value and `format.use-dirstate-tracked-hint` at the same time.
1025
1025
1026
1026
1027 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet``
1027 ``use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet``
1028 Hide message when performing such automatic upgrade.
1028 Hide message when performing such automatic upgrade.
1029
1029
1030
1030
1031 ``use-persistent-nodemap``
1031 ``use-persistent-nodemap``
1032 Enable or disable the "persistent-nodemap" feature which improves
1032 Enable or disable the "persistent-nodemap" feature which improves
1033 performance if the Rust extensions are available.
1033 performance if the Rust extensions are available.
1034
1034
1035 The "persistent-nodemap" persist the "node -> rev" on disk removing the
1035 The "persistent-nodemap" persist the "node -> rev" on disk removing the
1036 need to dynamically build that mapping for each Mercurial invocation. This
1036 need to dynamically build that mapping for each Mercurial invocation. This
1037 significantly reduces the startup cost of various local and server-side
1037 significantly reduces the startup cost of various local and server-side
1038 operation for larger repositories.
1038 operation for larger repositories.
1039
1039
1040 The performance-improving version of this feature is currently only
1040 The performance-improving version of this feature is currently only
1041 implemented in Rust (see :hg:`help rust`), so people not using a version of
1041 implemented in Rust (see :hg:`help rust`), so people not using a version of
1042 Mercurial compiled with the Rust parts might actually suffer some slowdown.
1042 Mercurial compiled with the Rust parts might actually suffer some slowdown.
1043 For this reason, such versions will by default refuse to access repositories
1043 For this reason, such versions will by default refuse to access repositories
1044 with "persistent-nodemap".
1044 with "persistent-nodemap".
1045
1045
1046 This behavior can be adjusted via configuration: check
1046 This behavior can be adjusted via configuration: check
1047 :hg:`help config.storage.revlog.persistent-nodemap.slow-path` for details.
1047 :hg:`help config.storage.revlog.persistent-nodemap.slow-path` for details.
1048
1048
1049 Repositories with this on-disk format require Mercurial 5.4 or above.
1049 Repositories with this on-disk format require Mercurial 5.4 or above.
1050
1050
1051 By default this format variant is disabled if the fast implementation is not
1051 By default this format variant is disabled if the fast implementation is not
1052 available, and enabled by default if the fast implementation is available.
1052 available, and enabled by default if the fast implementation is available.
1053
1053
1054 To accomodate installations of Mercurial without the fast implementation,
1054 To accomodate installations of Mercurial without the fast implementation,
1055 you can downgrade your repository. To do so run the following command:
1055 you can downgrade your repository. To do so run the following command:
1056
1056
1057 $ hg debugupgraderepo \
1057 $ hg debugupgraderepo \
1058 --run \
1058 --run \
1059 --config format.use-persistent-nodemap=False \
1059 --config format.use-persistent-nodemap=False \
1060 --config storage.revlog.persistent-nodemap.slow-path=allow
1060 --config storage.revlog.persistent-nodemap.slow-path=allow
1061
1061
1062 ``use-share-safe``
1062 ``use-share-safe``
1063 Enforce "safe" behaviors for all "shares" that access this repository.
1063 Enforce "safe" behaviors for all "shares" that access this repository.
1064
1064
1065 With this feature, "shares" using this repository as a source will:
1065 With this feature, "shares" using this repository as a source will:
1066
1066
1067 * read the source repository's configuration (`<source>/.hg/hgrc`).
1067 * read the source repository's configuration (`<source>/.hg/hgrc`).
1068 * read and use the source repository's "requirements"
1068 * read and use the source repository's "requirements"
1069 (except the working copy specific one).
1069 (except the working copy specific one).
1070
1070
1071 Without this feature, "shares" using this repository as a source will:
1071 Without this feature, "shares" using this repository as a source will:
1072
1072
1073 * keep tracking the repository "requirements" in the share only, ignoring
1073 * keep tracking the repository "requirements" in the share only, ignoring
1074 the source "requirements", possibly diverging from them.
1074 the source "requirements", possibly diverging from them.
1075 * ignore source repository config. This can create problems, like silently
1075 * ignore source repository config. This can create problems, like silently
1076 ignoring important hooks.
1076 ignoring important hooks.
1077
1077
1078 Beware that existing shares will not be upgraded/downgraded, and by
1078 Beware that existing shares will not be upgraded/downgraded, and by
1079 default, Mercurial will refuse to interact with them until the mismatch
1079 default, Mercurial will refuse to interact with them until the mismatch
1080 is resolved. See :hg:`help config.share.safe-mismatch.source-safe` and
1080 is resolved. See :hg:`help config.share.safe-mismatch.source-safe` and
1081 :hg:`help config.share.safe-mismatch.source-not-safe` for details.
1081 :hg:`help config.share.safe-mismatch.source-not-safe` for details.
1082
1082
1083 Introduced in Mercurial 5.7.
1083 Introduced in Mercurial 5.7.
1084
1084
1085 Enabled by default in Mercurial 6.1.
1085 Enabled by default in Mercurial 6.1.
1086
1086
1087 ``use-share-safe.automatic-upgrade-of-mismatching-repositories``
1087 ``use-share-safe.automatic-upgrade-of-mismatching-repositories``
1088 When enabled, an automatic upgrade will be triggered when a repository format
1088 When enabled, an automatic upgrade will be triggered when a repository format
1089 does not match its `use-share-safe` config.
1089 does not match its `use-share-safe` config.
1090
1090
1091 This is an advanced behavior that most users will not need. We recommend you
1091 This is an advanced behavior that most users will not need. We recommend you
1092 don't use this unless you are a seasoned administrator of a Mercurial install
1092 don't use this unless you are a seasoned administrator of a Mercurial install
1093 base.
1093 base.
1094
1094
1095 Automatic upgrade means that any process accessing the repository will
1095 Automatic upgrade means that any process accessing the repository will
1096 upgrade the repository format to use `share-safe`. This only triggers if a
1096 upgrade the repository format to use `share-safe`. This only triggers if a
1097 change is needed. This also applies to operation that would have been
1097 change is needed. This also applies to operation that would have been
1098 read-only (like hg status).
1098 read-only (like hg status).
1099
1099
1100 If the repository cannot be locked, the automatic-upgrade operation will be
1100 If the repository cannot be locked, the automatic-upgrade operation will be
1101 skipped. The next operation will attempt it again.
1101 skipped. The next operation will attempt it again.
1102
1102
1103 This configuration will apply for moves in any direction, either adding the
1103 This configuration will apply for moves in any direction, either adding the
1104 `share-safe` format if `format.use-share-safe=yes` or removing the
1104 `share-safe` format if `format.use-share-safe=yes` or removing the
1105 `share-safe` requirement if `format.use-share-safe=no`. So we recommend
1105 `share-safe` requirement if `format.use-share-safe=no`. So we recommend
1106 setting both this value and `format.use-share-safe` at the same time.
1106 setting both this value and `format.use-share-safe` at the same time.
1107
1107
1108 ``use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet``
1108 ``use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet``
1109 Hide message when performing such automatic upgrade.
1109 Hide message when performing such automatic upgrade.
1110
1110
1111 ``usestore``
1111 ``usestore``
1112 Enable or disable the "store" repository format which improves
1112 Enable or disable the "store" repository format which improves
1113 compatibility with systems that fold case or otherwise mangle
1113 compatibility with systems that fold case or otherwise mangle
1114 filenames. Disabling this option will allow you to store longer filenames
1114 filenames. Disabling this option will allow you to store longer filenames
1115 in some situations at the expense of compatibility.
1115 in some situations at the expense of compatibility.
1116
1116
1117 Repositories with this on-disk format require Mercurial version 0.9.4.
1117 Repositories with this on-disk format require Mercurial version 0.9.4.
1118
1118
1119 Enabled by default.
1119 Enabled by default.
1120
1120
1121 ``sparse-revlog``
1121 ``sparse-revlog``
1122 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
1122 Enable or disable the ``sparse-revlog`` delta strategy. This format improves
1123 delta re-use inside revlog. For very branchy repositories, it results in a
1123 delta re-use inside revlog. For very branchy repositories, it results in a
1124 smaller store. For repositories with many revisions, it also helps
1124 smaller store. For repositories with many revisions, it also helps
1125 performance (by using shortened delta chains.)
1125 performance (by using shortened delta chains.)
1126
1126
1127 Repositories with this on-disk format require Mercurial version 4.7
1127 Repositories with this on-disk format require Mercurial version 4.7
1128
1128
1129 Enabled by default.
1129 Enabled by default.
1130
1130
1131 ``revlog-compression``
1131 ``revlog-compression``
1132 Compression algorithm used by revlog. Supported values are `zlib` and
1132 Compression algorithm used by revlog. Supported values are `zlib` and
1133 `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
1133 `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
1134 a newer format that is usually a net win over `zlib`, operating faster at
1134 a newer format that is usually a net win over `zlib`, operating faster at
1135 better compression rates. Use `zstd` to reduce CPU usage. Multiple values
1135 better compression rates. Use `zstd` to reduce CPU usage. Multiple values
1136 can be specified, the first available one will be used.
1136 can be specified, the first available one will be used.
1137
1137
1138 On some systems, the Mercurial installation may lack `zstd` support.
1138 On some systems, the Mercurial installation may lack `zstd` support.
1139
1139
1140 Default is `zstd` if available, `zlib` otherwise.
1140 Default is `zstd` if available, `zlib` otherwise.
1141
1141
1142 ``bookmarks-in-store``
1142 ``bookmarks-in-store``
1143 Store bookmarks in .hg/store/. This means that bookmarks are shared when
1143 Store bookmarks in .hg/store/. This means that bookmarks are shared when
1144 using `hg share` regardless of the `-B` option.
1144 using `hg share` regardless of the `-B` option.
1145
1145
1146 Repositories with this on-disk format require Mercurial version 5.1.
1146 Repositories with this on-disk format require Mercurial version 5.1.
1147
1147
1148 Disabled by default.
1148 Disabled by default.
1149
1149
1150
1150
1151 ``graph``
1151 ``graph``
1152 ---------
1152 ---------
1153
1153
1154 Web graph view configuration. This section let you change graph
1154 Web graph view configuration. This section let you change graph
1155 elements display properties by branches, for instance to make the
1155 elements display properties by branches, for instance to make the
1156 ``default`` branch stand out.
1156 ``default`` branch stand out.
1157
1157
1158 Each line has the following format::
1158 Each line has the following format::
1159
1159
1160 <branch>.<argument> = <value>
1160 <branch>.<argument> = <value>
1161
1161
1162 where ``<branch>`` is the name of the branch being
1162 where ``<branch>`` is the name of the branch being
1163 customized. Example::
1163 customized. Example::
1164
1164
1165 [graph]
1165 [graph]
1166 # 2px width
1166 # 2px width
1167 default.width = 2
1167 default.width = 2
1168 # red color
1168 # red color
1169 default.color = FF0000
1169 default.color = FF0000
1170
1170
1171 Supported arguments:
1171 Supported arguments:
1172
1172
1173 ``width``
1173 ``width``
1174 Set branch edges width in pixels.
1174 Set branch edges width in pixels.
1175
1175
1176 ``color``
1176 ``color``
1177 Set branch edges color in hexadecimal RGB notation.
1177 Set branch edges color in hexadecimal RGB notation.
1178
1178
1179 ``hooks``
1179 ``hooks``
1180 ---------
1180 ---------
1181
1181
1182 Commands or Python functions that get automatically executed by
1182 Commands or Python functions that get automatically executed by
1183 various actions such as starting or finishing a commit. Multiple
1183 various actions such as starting or finishing a commit. Multiple
1184 hooks can be run for the same action by appending a suffix to the
1184 hooks can be run for the same action by appending a suffix to the
1185 action. Overriding a site-wide hook can be done by changing its
1185 action. Overriding a site-wide hook can be done by changing its
1186 value or setting it to an empty string. Hooks can be prioritized
1186 value or setting it to an empty string. Hooks can be prioritized
1187 by adding a prefix of ``priority.`` to the hook name on a new line
1187 by adding a prefix of ``priority.`` to the hook name on a new line
1188 and setting the priority. The default priority is 0.
1188 and setting the priority. The default priority is 0.
1189
1189
1190 Example ``.hg/hgrc``::
1190 Example ``.hg/hgrc``::
1191
1191
1192 [hooks]
1192 [hooks]
1193 # update working directory after adding changesets
1193 # update working directory after adding changesets
1194 changegroup.update = hg update
1194 changegroup.update = hg update
1195 # do not use the site-wide hook
1195 # do not use the site-wide hook
1196 incoming =
1196 incoming =
1197 incoming.email = /my/email/hook
1197 incoming.email = /my/email/hook
1198 incoming.autobuild = /my/build/hook
1198 incoming.autobuild = /my/build/hook
1199 # force autobuild hook to run before other incoming hooks
1199 # force autobuild hook to run before other incoming hooks
1200 priority.incoming.autobuild = 1
1200 priority.incoming.autobuild = 1
1201 ### control HGPLAIN setting when running autobuild hook
1201 ### control HGPLAIN setting when running autobuild hook
1202 # HGPLAIN always set (default from Mercurial 5.7)
1202 # HGPLAIN always set (default from Mercurial 5.7)
1203 incoming.autobuild:run-with-plain = yes
1203 incoming.autobuild:run-with-plain = yes
1204 # HGPLAIN never set
1204 # HGPLAIN never set
1205 incoming.autobuild:run-with-plain = no
1205 incoming.autobuild:run-with-plain = no
1206 # HGPLAIN inherited from environment (default before Mercurial 5.7)
1206 # HGPLAIN inherited from environment (default before Mercurial 5.7)
1207 incoming.autobuild:run-with-plain = auto
1207 incoming.autobuild:run-with-plain = auto
1208
1208
1209 Most hooks are run with environment variables set that give useful
1209 Most hooks are run with environment variables set that give useful
1210 additional information. For each hook below, the environment variables
1210 additional information. For each hook below, the environment variables
1211 it is passed are listed with names in the form ``$HG_foo``. The
1211 it is passed are listed with names in the form ``$HG_foo``. The
1212 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
1212 ``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
1213 They contain the type of hook which triggered the run and the full name
1213 They contain the type of hook which triggered the run and the full name
1214 of the hook in the config, respectively. In the example above, this will
1214 of the hook in the config, respectively. In the example above, this will
1215 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
1215 be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
1216
1216
1217 .. container:: windows
1217 .. container:: windows
1218
1218
1219 Some basic Unix syntax can be enabled for portability, including ``$VAR``
1219 Some basic Unix syntax can be enabled for portability, including ``$VAR``
1220 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
1220 and ``${VAR}`` style variables. A ``~`` followed by ``\`` or ``/`` will
1221 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
1221 be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
1222 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
1222 on Unix. To use a literal ``$`` or ``~``, it must be escaped with a back
1223 slash or inside of a strong quote. Strong quotes will be replaced by
1223 slash or inside of a strong quote. Strong quotes will be replaced by
1224 double quotes after processing.
1224 double quotes after processing.
1225
1225
1226 This feature is enabled by adding a prefix of ``tonative.`` to the hook
1226 This feature is enabled by adding a prefix of ``tonative.`` to the hook
1227 name on a new line, and setting it to ``True``. For example::
1227 name on a new line, and setting it to ``True``. For example::
1228
1228
1229 [hooks]
1229 [hooks]
1230 incoming.autobuild = /my/build/hook
1230 incoming.autobuild = /my/build/hook
1231 # enable translation to cmd.exe syntax for autobuild hook
1231 # enable translation to cmd.exe syntax for autobuild hook
1232 tonative.incoming.autobuild = True
1232 tonative.incoming.autobuild = True
1233
1233
1234 ``changegroup``
1234 ``changegroup``
1235 Run after a changegroup has been added via push, pull or unbundle. The ID of
1235 Run after a changegroup has been added via push, pull or unbundle. The ID of
1236 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
1236 the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
1237 The URL from which changes came is in ``$HG_URL``.
1237 The URL from which changes came is in ``$HG_URL``.
1238
1238
1239 ``commit``
1239 ``commit``
1240 Run after a changeset has been created in the local repository. The ID
1240 Run after a changeset has been created in the local repository. The ID
1241 of the newly created changeset is in ``$HG_NODE``. Parent changeset
1241 of the newly created changeset is in ``$HG_NODE``. Parent changeset
1242 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1242 IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1243
1243
1244 ``incoming``
1244 ``incoming``
1245 Run after a changeset has been pulled, pushed, or unbundled into
1245 Run after a changeset has been pulled, pushed, or unbundled into
1246 the local repository. The ID of the newly arrived changeset is in
1246 the local repository. The ID of the newly arrived changeset is in
1247 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
1247 ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
1248
1248
1249 ``outgoing``
1249 ``outgoing``
1250 Run after sending changes from the local repository to another. The ID of
1250 Run after sending changes from the local repository to another. The ID of
1251 first changeset sent is in ``$HG_NODE``. The source of operation is in
1251 first changeset sent is in ``$HG_NODE``. The source of operation is in
1252 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
1252 ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
1253
1253
1254 ``post-<command>``
1254 ``post-<command>``
1255 Run after successful invocations of the associated command. The
1255 Run after successful invocations of the associated command. The
1256 contents of the command line are passed as ``$HG_ARGS`` and the result
1256 contents of the command line are passed as ``$HG_ARGS`` and the result
1257 code in ``$HG_RESULT``. Parsed command line arguments are passed as
1257 code in ``$HG_RESULT``. Parsed command line arguments are passed as
1258 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
1258 ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
1259 the python data internally passed to <command>. ``$HG_OPTS`` is a
1259 the python data internally passed to <command>. ``$HG_OPTS`` is a
1260 dictionary of options (with unspecified options set to their defaults).
1260 dictionary of options (with unspecified options set to their defaults).
1261 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
1261 ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
1262
1262
1263 ``fail-<command>``
1263 ``fail-<command>``
1264 Run after a failed invocation of an associated command. The contents
1264 Run after a failed invocation of an associated command. The contents
1265 of the command line are passed as ``$HG_ARGS``. Parsed command line
1265 of the command line are passed as ``$HG_ARGS``. Parsed command line
1266 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
1266 arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
1267 string representations of the python data internally passed to
1267 string representations of the python data internally passed to
1268 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
1268 <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
1269 options set to their defaults). ``$HG_PATS`` is a list of arguments.
1269 options set to their defaults). ``$HG_PATS`` is a list of arguments.
1270 Hook failure is ignored.
1270 Hook failure is ignored.
1271
1271
1272 ``pre-<command>``
1272 ``pre-<command>``
1273 Run before executing the associated command. The contents of the
1273 Run before executing the associated command. The contents of the
1274 command line are passed as ``$HG_ARGS``. Parsed command line arguments
1274 command line are passed as ``$HG_ARGS``. Parsed command line arguments
1275 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
1275 are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
1276 representations of the data internally passed to <command>. ``$HG_OPTS``
1276 representations of the data internally passed to <command>. ``$HG_OPTS``
1277 is a dictionary of options (with unspecified options set to their
1277 is a dictionary of options (with unspecified options set to their
1278 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
1278 defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
1279 failure, the command doesn't execute and Mercurial returns the failure
1279 failure, the command doesn't execute and Mercurial returns the failure
1280 code.
1280 code.
1281
1281
1282 ``prechangegroup``
1282 ``prechangegroup``
1283 Run before a changegroup is added via push, pull or unbundle. Exit
1283 Run before a changegroup is added via push, pull or unbundle. Exit
1284 status 0 allows the changegroup to proceed. A non-zero status will
1284 status 0 allows the changegroup to proceed. A non-zero status will
1285 cause the push, pull or unbundle to fail. The URL from which changes
1285 cause the push, pull or unbundle to fail. The URL from which changes
1286 will come is in ``$HG_URL``.
1286 will come is in ``$HG_URL``.
1287
1287
1288 ``precommit``
1288 ``precommit``
1289 Run before starting a local commit. Exit status 0 allows the
1289 Run before starting a local commit. Exit status 0 allows the
1290 commit to proceed. A non-zero status will cause the commit to fail.
1290 commit to proceed. A non-zero status will cause the commit to fail.
1291 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1291 Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1292
1292
1293 ``prelistkeys``
1293 ``prelistkeys``
1294 Run before listing pushkeys (like bookmarks) in the
1294 Run before listing pushkeys (like bookmarks) in the
1295 repository. A non-zero status will cause failure. The key namespace is
1295 repository. A non-zero status will cause failure. The key namespace is
1296 in ``$HG_NAMESPACE``.
1296 in ``$HG_NAMESPACE``.
1297
1297
1298 ``preoutgoing``
1298 ``preoutgoing``
1299 Run before collecting changes to send from the local repository to
1299 Run before collecting changes to send from the local repository to
1300 another. A non-zero status will cause failure. This lets you prevent
1300 another. A non-zero status will cause failure. This lets you prevent
1301 pull over HTTP or SSH. It can also prevent propagating commits (via
1301 pull over HTTP or SSH. It can also prevent propagating commits (via
1302 local pull, push (outbound) or bundle commands), but not completely,
1302 local pull, push (outbound) or bundle commands), but not completely,
1303 since you can just copy files instead. The source of operation is in
1303 since you can just copy files instead. The source of operation is in
1304 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1304 ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
1305 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1305 SSH or HTTP repository. If "push", "pull" or "bundle", the operation
1306 is happening on behalf of a repository on same system.
1306 is happening on behalf of a repository on same system.
1307
1307
1308 ``prepushkey``
1308 ``prepushkey``
1309 Run before a pushkey (like a bookmark) is added to the
1309 Run before a pushkey (like a bookmark) is added to the
1310 repository. A non-zero status will cause the key to be rejected. The
1310 repository. A non-zero status will cause the key to be rejected. The
1311 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1311 key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
1312 the old value (if any) is in ``$HG_OLD``, and the new value is in
1312 the old value (if any) is in ``$HG_OLD``, and the new value is in
1313 ``$HG_NEW``.
1313 ``$HG_NEW``.
1314
1314
1315 ``pretag``
1315 ``pretag``
1316 Run before creating a tag. Exit status 0 allows the tag to be
1316 Run before creating a tag. Exit status 0 allows the tag to be
1317 created. A non-zero status will cause the tag to fail. The ID of the
1317 created. A non-zero status will cause the tag to fail. The ID of the
1318 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1318 changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
1319 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1319 tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
1320
1320
1321 ``pretxnopen``
1321 ``pretxnopen``
1322 Run before any new repository transaction is open. The reason for the
1322 Run before any new repository transaction is open. The reason for the
1323 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1323 transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
1324 transaction will be in ``$HG_TXNID``. A non-zero status will prevent the
1324 transaction will be in ``$HG_TXNID``. A non-zero status will prevent the
1325 transaction from being opened.
1325 transaction from being opened.
1326
1326
1327 ``pretxnclose``
1327 ``pretxnclose``
1328 Run right before the transaction is actually finalized. Any repository change
1328 Run right before the transaction is actually finalized. Any repository change
1329 will be visible to the hook program. This lets you validate the transaction
1329 will be visible to the hook program. This lets you validate the transaction
1330 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1330 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1331 status will cause the transaction to be rolled back. The reason for the
1331 status will cause the transaction to be rolled back. The reason for the
1332 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1332 transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
1333 the transaction will be in ``$HG_TXNID``. The rest of the available data will
1333 the transaction will be in ``$HG_TXNID``. The rest of the available data will
1334 vary according the transaction type. Changes unbundled to the repository will
1334 vary according the transaction type. Changes unbundled to the repository will
1335 add ``$HG_URL`` and ``$HG_SOURCE``. New changesets will add ``$HG_NODE`` (the
1335 add ``$HG_URL`` and ``$HG_SOURCE``. New changesets will add ``$HG_NODE`` (the
1336 ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last added
1336 ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last added
1337 changeset). Bookmark and phase changes will set ``$HG_BOOKMARK_MOVED`` and
1337 changeset). Bookmark and phase changes will set ``$HG_BOOKMARK_MOVED`` and
1338 ``$HG_PHASES_MOVED`` to ``1`` respectively. The number of new obsmarkers, if
1338 ``$HG_PHASES_MOVED`` to ``1`` respectively. The number of new obsmarkers, if
1339 any, will be in ``$HG_NEW_OBSMARKERS``, etc.
1339 any, will be in ``$HG_NEW_OBSMARKERS``, etc.
1340
1340
1341 ``pretxnclose-bookmark``
1341 ``pretxnclose-bookmark``
1342 Run right before a bookmark change is actually finalized. Any repository
1342 Run right before a bookmark change is actually finalized. Any repository
1343 change will be visible to the hook program. This lets you validate the
1343 change will be visible to the hook program. This lets you validate the
1344 transaction content or change it. Exit status 0 allows the commit to
1344 transaction content or change it. Exit status 0 allows the commit to
1345 proceed. A non-zero status will cause the transaction to be rolled back.
1345 proceed. A non-zero status will cause the transaction to be rolled back.
1346 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1346 The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
1347 bookmark location will be available in ``$HG_NODE`` while the previous
1347 bookmark location will be available in ``$HG_NODE`` while the previous
1348 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1348 location will be available in ``$HG_OLDNODE``. In case of a bookmark
1349 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1349 creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
1350 will be empty.
1350 will be empty.
1351 In addition, the reason for the transaction opening will be in
1351 In addition, the reason for the transaction opening will be in
1352 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1352 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1353 ``$HG_TXNID``.
1353 ``$HG_TXNID``.
1354
1354
1355 ``pretxnclose-phase``
1355 ``pretxnclose-phase``
1356 Run right before a phase change is actually finalized. Any repository change
1356 Run right before a phase change is actually finalized. Any repository change
1357 will be visible to the hook program. This lets you validate the transaction
1357 will be visible to the hook program. This lets you validate the transaction
1358 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1358 content or change it. Exit status 0 allows the commit to proceed. A non-zero
1359 status will cause the transaction to be rolled back. The hook is called
1359 status will cause the transaction to be rolled back. The hook is called
1360 multiple times, once for each revision affected by a phase change.
1360 multiple times, once for each revision affected by a phase change.
1361 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1361 The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
1362 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1362 while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
1363 will be empty. In addition, the reason for the transaction opening will be in
1363 will be empty. In addition, the reason for the transaction opening will be in
1364 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1364 ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
1365 ``$HG_TXNID``. The hook is also run for newly added revisions. In this case
1365 ``$HG_TXNID``. The hook is also run for newly added revisions. In this case
1366 the ``$HG_OLDPHASE`` entry will be empty.
1366 the ``$HG_OLDPHASE`` entry will be empty.
1367
1367
1368 ``txnclose``
1368 ``txnclose``
1369 Run after any repository transaction has been committed. At this
1369 Run after any repository transaction has been committed. At this
1370 point, the transaction can no longer be rolled back. The hook will run
1370 point, the transaction can no longer be rolled back. The hook will run
1371 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1371 after the lock is released. See :hg:`help config.hooks.pretxnclose` for
1372 details about available variables.
1372 details about available variables.
1373
1373
1374 ``txnclose-bookmark``
1374 ``txnclose-bookmark``
1375 Run after any bookmark change has been committed. At this point, the
1375 Run after any bookmark change has been committed. At this point, the
1376 transaction can no longer be rolled back. The hook will run after the lock
1376 transaction can no longer be rolled back. The hook will run after the lock
1377 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1377 is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
1378 about available variables.
1378 about available variables.
1379
1379
1380 ``txnclose-phase``
1380 ``txnclose-phase``
1381 Run after any phase change has been committed. At this point, the
1381 Run after any phase change has been committed. At this point, the
1382 transaction can no longer be rolled back. The hook will run after the lock
1382 transaction can no longer be rolled back. The hook will run after the lock
1383 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1383 is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
1384 available variables.
1384 available variables.
1385
1385
1386 ``txnabort``
1386 ``txnabort``
1387 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1387 Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
1388 for details about available variables.
1388 for details about available variables.
1389
1389
1390 ``pretxnchangegroup``
1390 ``pretxnchangegroup``
1391 Run after a changegroup has been added via push, pull or unbundle, but before
1391 Run after a changegroup has been added via push, pull or unbundle, but before
1392 the transaction has been committed. The changegroup is visible to the hook
1392 the transaction has been committed. The changegroup is visible to the hook
1393 program. This allows validation of incoming changes before accepting them.
1393 program. This allows validation of incoming changes before accepting them.
1394 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1394 The ID of the first new changeset is in ``$HG_NODE`` and last is in
1395 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1395 ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
1396 status will cause the transaction to be rolled back, and the push, pull or
1396 status will cause the transaction to be rolled back, and the push, pull or
1397 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1397 unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
1398
1398
1399 ``pretxncommit``
1399 ``pretxncommit``
1400 Run after a changeset has been created, but before the transaction is
1400 Run after a changeset has been created, but before the transaction is
1401 committed. The changeset is visible to the hook program. This allows
1401 committed. The changeset is visible to the hook program. This allows
1402 validation of the commit message and changes. Exit status 0 allows the
1402 validation of the commit message and changes. Exit status 0 allows the
1403 commit to proceed. A non-zero status will cause the transaction to
1403 commit to proceed. A non-zero status will cause the transaction to
1404 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1404 be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
1405 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1405 changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
1406
1406
1407 ``preupdate``
1407 ``preupdate``
1408 Run before updating the working directory. Exit status 0 allows
1408 Run before updating the working directory. Exit status 0 allows
1409 the update to proceed. A non-zero status will prevent the update.
1409 the update to proceed. A non-zero status will prevent the update.
1410 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1410 The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
1411 merge, the ID of second new parent is in ``$HG_PARENT2``.
1411 merge, the ID of second new parent is in ``$HG_PARENT2``.
1412
1412
1413 ``listkeys``
1413 ``listkeys``
1414 Run after listing pushkeys (like bookmarks) in the repository. The
1414 Run after listing pushkeys (like bookmarks) in the repository. The
1415 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1415 key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
1416 dictionary containing the keys and values.
1416 dictionary containing the keys and values.
1417
1417
1418 ``pushkey``
1418 ``pushkey``
1419 Run after a pushkey (like a bookmark) is added to the
1419 Run after a pushkey (like a bookmark) is added to the
1420 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1420 repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
1421 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1421 ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
1422 value is in ``$HG_NEW``.
1422 value is in ``$HG_NEW``.
1423
1423
1424 ``tag``
1424 ``tag``
1425 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1425 Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
1426 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1426 The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
1427 the repository if ``$HG_LOCAL=0``.
1427 the repository if ``$HG_LOCAL=0``.
1428
1428
1429 ``update``
1429 ``update``
1430 Run after updating the working directory. The changeset ID of first
1430 Run after updating the working directory. The changeset ID of first
1431 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1431 new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
1432 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1432 parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
1433 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1433 update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
1434
1434
1435 .. note::
1435 .. note::
1436
1436
1437 It is generally better to use standard hooks rather than the
1437 It is generally better to use standard hooks rather than the
1438 generic pre- and post- command hooks, as they are guaranteed to be
1438 generic pre- and post- command hooks, as they are guaranteed to be
1439 called in the appropriate contexts for influencing transactions.
1439 called in the appropriate contexts for influencing transactions.
1440 Also, hooks like "commit" will be called in all contexts that
1440 Also, hooks like "commit" will be called in all contexts that
1441 generate a commit (e.g. tag) and not just the commit command.
1441 generate a commit (e.g. tag) and not just the commit command.
1442
1442
1443 .. note::
1443 .. note::
1444
1444
1445 Environment variables with empty values may not be passed to
1445 Environment variables with empty values may not be passed to
1446 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1446 hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
1447 will have an empty value under Unix-like platforms for non-merge
1447 will have an empty value under Unix-like platforms for non-merge
1448 changesets, while it will not be available at all under Windows.
1448 changesets, while it will not be available at all under Windows.
1449
1449
1450 The syntax for Python hooks is as follows::
1450 The syntax for Python hooks is as follows::
1451
1451
1452 hookname = python:modulename.submodule.callable
1452 hookname = python:modulename.submodule.callable
1453 hookname = python:/path/to/python/module.py:callable
1453 hookname = python:/path/to/python/module.py:callable
1454
1454
1455 Python hooks are run within the Mercurial process. Each hook is
1455 Python hooks are run within the Mercurial process. Each hook is
1456 called with at least three keyword arguments: a ui object (keyword
1456 called with at least three keyword arguments: a ui object (keyword
1457 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1457 ``ui``), a repository object (keyword ``repo``), and a ``hooktype``
1458 keyword that tells what kind of hook is used. Arguments listed as
1458 keyword that tells what kind of hook is used. Arguments listed as
1459 environment variables above are passed as keyword arguments, with no
1459 environment variables above are passed as keyword arguments, with no
1460 ``HG_`` prefix, and names in lower case.
1460 ``HG_`` prefix, and names in lower case.
1461
1461
1462 If a Python hook returns a "true" value or raises an exception, this
1462 If a Python hook returns a "true" value or raises an exception, this
1463 is treated as a failure.
1463 is treated as a failure.
1464
1464
1465
1465
1466 ``hostfingerprints``
1466 ``hostfingerprints``
1467 --------------------
1467 --------------------
1468
1468
1469 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1469 (Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
1470
1470
1471 Fingerprints of the certificates of known HTTPS servers.
1471 Fingerprints of the certificates of known HTTPS servers.
1472
1472
1473 A HTTPS connection to a server with a fingerprint configured here will
1473 A HTTPS connection to a server with a fingerprint configured here will
1474 only succeed if the servers certificate matches the fingerprint.
1474 only succeed if the servers certificate matches the fingerprint.
1475 This is very similar to how ssh known hosts works.
1475 This is very similar to how ssh known hosts works.
1476
1476
1477 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1477 The fingerprint is the SHA-1 hash value of the DER encoded certificate.
1478 Multiple values can be specified (separated by spaces or commas). This can
1478 Multiple values can be specified (separated by spaces or commas). This can
1479 be used to define both old and new fingerprints while a host transitions
1479 be used to define both old and new fingerprints while a host transitions
1480 to a new certificate.
1480 to a new certificate.
1481
1481
1482 The CA chain and web.cacerts is not used for servers with a fingerprint.
1482 The CA chain and web.cacerts is not used for servers with a fingerprint.
1483
1483
1484 For example::
1484 For example::
1485
1485
1486 [hostfingerprints]
1486 [hostfingerprints]
1487 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1487 hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1488 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1488 hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1489
1489
1490 ``hostsecurity``
1490 ``hostsecurity``
1491 ----------------
1491 ----------------
1492
1492
1493 Used to specify global and per-host security settings for connecting to
1493 Used to specify global and per-host security settings for connecting to
1494 other machines.
1494 other machines.
1495
1495
1496 The following options control default behavior for all hosts.
1496 The following options control default behavior for all hosts.
1497
1497
1498 ``ciphers``
1498 ``ciphers``
1499 Defines the cryptographic ciphers to use for connections.
1499 Defines the cryptographic ciphers to use for connections.
1500
1500
1501 Value must be a valid OpenSSL Cipher List Format as documented at
1501 Value must be a valid OpenSSL Cipher List Format as documented at
1502 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1502 https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
1503
1503
1504 This setting is for advanced users only. Setting to incorrect values
1504 This setting is for advanced users only. Setting to incorrect values
1505 can significantly lower connection security or decrease performance.
1505 can significantly lower connection security or decrease performance.
1506 You have been warned.
1506 You have been warned.
1507
1507
1508 This option requires Python 2.7.
1508 This option requires Python 2.7.
1509
1509
1510 ``minimumprotocol``
1510 ``minimumprotocol``
1511 Defines the minimum channel encryption protocol to use.
1511 Defines the minimum channel encryption protocol to use.
1512
1512
1513 By default, the highest version of TLS supported by both client and server
1513 By default, the highest version of TLS supported by both client and server
1514 is used.
1514 is used.
1515
1515
1516 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1516 Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
1517
1517
1518 When running on an old Python version, only ``tls1.0`` is allowed since
1518 When running on an old Python version, only ``tls1.0`` is allowed since
1519 old versions of Python only support up to TLS 1.0.
1519 old versions of Python only support up to TLS 1.0.
1520
1520
1521 When running a Python that supports modern TLS versions, the default is
1521 When running a Python that supports modern TLS versions, the default is
1522 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1522 ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
1523 weakens security and should only be used as a feature of last resort if
1523 weakens security and should only be used as a feature of last resort if
1524 a server does not support TLS 1.1+.
1524 a server does not support TLS 1.1+.
1525
1525
1526 Options in the ``[hostsecurity]`` section can have the form
1526 Options in the ``[hostsecurity]`` section can have the form
1527 ``hostname``:``setting``. This allows multiple settings to be defined on a
1527 ``hostname``:``setting``. This allows multiple settings to be defined on a
1528 per-host basis.
1528 per-host basis.
1529
1529
1530 The following per-host settings can be defined.
1530 The following per-host settings can be defined.
1531
1531
1532 ``ciphers``
1532 ``ciphers``
1533 This behaves like ``ciphers`` as described above except it only applies
1533 This behaves like ``ciphers`` as described above except it only applies
1534 to the host on which it is defined.
1534 to the host on which it is defined.
1535
1535
1536 ``fingerprints``
1536 ``fingerprints``
1537 A list of hashes of the DER encoded peer/remote certificate. Values have
1537 A list of hashes of the DER encoded peer/remote certificate. Values have
1538 the form ``algorithm``:``fingerprint``. e.g.
1538 the form ``algorithm``:``fingerprint``. e.g.
1539 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1539 ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
1540 In addition, colons (``:``) can appear in the fingerprint part.
1540 In addition, colons (``:``) can appear in the fingerprint part.
1541
1541
1542 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1542 The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
1543 ``sha512``.
1543 ``sha512``.
1544
1544
1545 Use of ``sha256`` or ``sha512`` is preferred.
1545 Use of ``sha256`` or ``sha512`` is preferred.
1546
1546
1547 If a fingerprint is specified, the CA chain is not validated for this
1547 If a fingerprint is specified, the CA chain is not validated for this
1548 host and Mercurial will require the remote certificate to match one
1548 host and Mercurial will require the remote certificate to match one
1549 of the fingerprints specified. This means if the server updates its
1549 of the fingerprints specified. This means if the server updates its
1550 certificate, Mercurial will abort until a new fingerprint is defined.
1550 certificate, Mercurial will abort until a new fingerprint is defined.
1551 This can provide stronger security than traditional CA-based validation
1551 This can provide stronger security than traditional CA-based validation
1552 at the expense of convenience.
1552 at the expense of convenience.
1553
1553
1554 This option takes precedence over ``verifycertsfile``.
1554 This option takes precedence over ``verifycertsfile``.
1555
1555
1556 ``minimumprotocol``
1556 ``minimumprotocol``
1557 This behaves like ``minimumprotocol`` as described above except it
1557 This behaves like ``minimumprotocol`` as described above except it
1558 only applies to the host on which it is defined.
1558 only applies to the host on which it is defined.
1559
1559
1560 ``verifycertsfile``
1560 ``verifycertsfile``
1561 Path to file a containing a list of PEM encoded certificates used to
1561 Path to file a containing a list of PEM encoded certificates used to
1562 verify the server certificate. Environment variables and ``~user``
1562 verify the server certificate. Environment variables and ``~user``
1563 constructs are expanded in the filename.
1563 constructs are expanded in the filename.
1564
1564
1565 The server certificate or the certificate's certificate authority (CA)
1565 The server certificate or the certificate's certificate authority (CA)
1566 must match a certificate from this file or certificate verification
1566 must match a certificate from this file or certificate verification
1567 will fail and connections to the server will be refused.
1567 will fail and connections to the server will be refused.
1568
1568
1569 If defined, only certificates provided by this file will be used:
1569 If defined, only certificates provided by this file will be used:
1570 ``web.cacerts`` and any system/default certificates will not be
1570 ``web.cacerts`` and any system/default certificates will not be
1571 used.
1571 used.
1572
1572
1573 This option has no effect if the per-host ``fingerprints`` option
1573 This option has no effect if the per-host ``fingerprints`` option
1574 is set.
1574 is set.
1575
1575
1576 The format of the file is as follows::
1576 The format of the file is as follows::
1577
1577
1578 -----BEGIN CERTIFICATE-----
1578 -----BEGIN CERTIFICATE-----
1579 ... (certificate in base64 PEM encoding) ...
1579 ... (certificate in base64 PEM encoding) ...
1580 -----END CERTIFICATE-----
1580 -----END CERTIFICATE-----
1581 -----BEGIN CERTIFICATE-----
1581 -----BEGIN CERTIFICATE-----
1582 ... (certificate in base64 PEM encoding) ...
1582 ... (certificate in base64 PEM encoding) ...
1583 -----END CERTIFICATE-----
1583 -----END CERTIFICATE-----
1584
1584
1585 For example::
1585 For example::
1586
1586
1587 [hostsecurity]
1587 [hostsecurity]
1588 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1588 hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
1589 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1589 hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
1590 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1590 hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
1591 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1591 foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
1592
1592
1593 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1593 To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
1594 when connecting to ``hg.example.com``::
1594 when connecting to ``hg.example.com``::
1595
1595
1596 [hostsecurity]
1596 [hostsecurity]
1597 minimumprotocol = tls1.2
1597 minimumprotocol = tls1.2
1598 hg.example.com:minimumprotocol = tls1.1
1598 hg.example.com:minimumprotocol = tls1.1
1599
1599
1600 ``http_proxy``
1600 ``http_proxy``
1601 --------------
1601 --------------
1602
1602
1603 Used to access web-based Mercurial repositories through a HTTP
1603 Used to access web-based Mercurial repositories through a HTTP
1604 proxy.
1604 proxy.
1605
1605
1606 ``host``
1606 ``host``
1607 Host name and (optional) port of the proxy server, for example
1607 Host name and (optional) port of the proxy server, for example
1608 "myproxy:8000".
1608 "myproxy:8000".
1609
1609
1610 ``no``
1610 ``no``
1611 Optional. Comma-separated list of host names that should bypass
1611 Optional. Comma-separated list of host names that should bypass
1612 the proxy.
1612 the proxy.
1613
1613
1614 ``passwd``
1614 ``passwd``
1615 Optional. Password to authenticate with at the proxy server.
1615 Optional. Password to authenticate with at the proxy server.
1616
1616
1617 ``user``
1617 ``user``
1618 Optional. User name to authenticate with at the proxy server.
1618 Optional. User name to authenticate with at the proxy server.
1619
1619
1620 ``always``
1620 ``always``
1621 Optional. Always use the proxy, even for localhost and any entries
1621 Optional. Always use the proxy, even for localhost and any entries
1622 in ``http_proxy.no``. (default: False)
1622 in ``http_proxy.no``. (default: False)
1623
1623
1624 ``http``
1624 ``http``
1625 ----------
1625 ----------
1626
1626
1627 Used to configure access to Mercurial repositories via HTTP.
1627 Used to configure access to Mercurial repositories via HTTP.
1628
1628
1629 ``timeout``
1629 ``timeout``
1630 If set, blocking operations will timeout after that many seconds.
1630 If set, blocking operations will timeout after that many seconds.
1631 (default: None)
1631 (default: None)
1632
1632
1633 ``merge``
1633 ``merge``
1634 ---------
1634 ---------
1635
1635
1636 This section specifies behavior during merges and updates.
1636 This section specifies behavior during merges and updates.
1637
1637
1638 ``checkignored``
1638 ``checkignored``
1639 Controls behavior when an ignored file on disk has the same name as a tracked
1639 Controls behavior when an ignored file on disk has the same name as a tracked
1640 file in the changeset being merged or updated to, and has different
1640 file in the changeset being merged or updated to, and has different
1641 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1641 contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
1642 abort on such files. With ``warn``, warn on such files and back them up as
1642 abort on such files. With ``warn``, warn on such files and back them up as
1643 ``.orig``. With ``ignore``, don't print a warning and back them up as
1643 ``.orig``. With ``ignore``, don't print a warning and back them up as
1644 ``.orig``. (default: ``abort``)
1644 ``.orig``. (default: ``abort``)
1645
1645
1646 ``checkunknown``
1646 ``checkunknown``
1647 Controls behavior when an unknown file that isn't ignored has the same name
1647 Controls behavior when an unknown file that isn't ignored has the same name
1648 as a tracked file in the changeset being merged or updated to, and has
1648 as a tracked file in the changeset being merged or updated to, and has
1649 different contents. Similar to ``merge.checkignored``, except for files that
1649 different contents. Similar to ``merge.checkignored``, except for files that
1650 are not ignored. (default: ``abort``)
1650 are not ignored. (default: ``abort``)
1651
1651
1652 ``on-failure``
1652 ``on-failure``
1653 When set to ``continue`` (the default), the merge process attempts to
1653 When set to ``continue`` (the default), the merge process attempts to
1654 merge all unresolved files using the merge chosen tool, regardless of
1654 merge all unresolved files using the merge chosen tool, regardless of
1655 whether previous file merge attempts during the process succeeded or not.
1655 whether previous file merge attempts during the process succeeded or not.
1656 Setting this to ``prompt`` will prompt after any merge failure continue
1656 Setting this to ``prompt`` will prompt after any merge failure continue
1657 or halt the merge process. Setting this to ``halt`` will automatically
1657 or halt the merge process. Setting this to ``halt`` will automatically
1658 halt the merge process on any merge tool failure. The merge process
1658 halt the merge process on any merge tool failure. The merge process
1659 can be restarted by using the ``resolve`` command. When a merge is
1659 can be restarted by using the ``resolve`` command. When a merge is
1660 halted, the repository is left in a normal ``unresolved`` merge state.
1660 halted, the repository is left in a normal ``unresolved`` merge state.
1661 (default: ``continue``)
1661 (default: ``continue``)
1662
1662
1663 ``strict-capability-check``
1663 ``strict-capability-check``
1664 Whether capabilities of internal merge tools are checked strictly
1664 Whether capabilities of internal merge tools are checked strictly
1665 or not, while examining rules to decide merge tool to be used.
1665 or not, while examining rules to decide merge tool to be used.
1666 (default: False)
1666 (default: False)
1667
1667
1668 ``merge-patterns``
1668 ``merge-patterns``
1669 ------------------
1669 ------------------
1670
1670
1671 This section specifies merge tools to associate with particular file
1671 This section specifies merge tools to associate with particular file
1672 patterns. Tools matched here will take precedence over the default
1672 patterns. Tools matched here will take precedence over the default
1673 merge tool. Patterns are globs by default, rooted at the repository
1673 merge tool. Patterns are globs by default, rooted at the repository
1674 root.
1674 root.
1675
1675
1676 Example::
1676 Example::
1677
1677
1678 [merge-patterns]
1678 [merge-patterns]
1679 **.c = kdiff3
1679 **.c = kdiff3
1680 **.jpg = myimgmerge
1680 **.jpg = myimgmerge
1681
1681
1682 ``merge-tools``
1682 ``merge-tools``
1683 ---------------
1683 ---------------
1684
1684
1685 This section configures external merge tools to use for file-level
1685 This section configures external merge tools to use for file-level
1686 merges. This section has likely been preconfigured at install time.
1686 merges. This section has likely been preconfigured at install time.
1687 Use :hg:`config merge-tools` to check the existing configuration.
1687 Use :hg:`config merge-tools` to check the existing configuration.
1688 Also see :hg:`help merge-tools` for more details.
1688 Also see :hg:`help merge-tools` for more details.
1689
1689
1690 Example ``~/.hgrc``::
1690 Example ``~/.hgrc``::
1691
1691
1692 [merge-tools]
1692 [merge-tools]
1693 # Override stock tool location
1693 # Override stock tool location
1694 kdiff3.executable = ~/bin/kdiff3
1694 kdiff3.executable = ~/bin/kdiff3
1695 # Specify command line
1695 # Specify command line
1696 kdiff3.args = $base $local $other -o $output
1696 kdiff3.args = $base $local $other -o $output
1697 # Give higher priority
1697 # Give higher priority
1698 kdiff3.priority = 1
1698 kdiff3.priority = 1
1699
1699
1700 # Changing the priority of preconfigured tool
1700 # Changing the priority of preconfigured tool
1701 meld.priority = 0
1701 meld.priority = 0
1702
1702
1703 # Disable a preconfigured tool
1703 # Disable a preconfigured tool
1704 vimdiff.disabled = yes
1704 vimdiff.disabled = yes
1705
1705
1706 # Define new tool
1706 # Define new tool
1707 myHtmlTool.args = -m $local $other $base $output
1707 myHtmlTool.args = -m $local $other $base $output
1708 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1708 myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
1709 myHtmlTool.priority = 1
1709 myHtmlTool.priority = 1
1710
1710
1711 Supported arguments:
1711 Supported arguments:
1712
1712
1713 ``priority``
1713 ``priority``
1714 The priority in which to evaluate this tool.
1714 The priority in which to evaluate this tool.
1715 (default: 0)
1715 (default: 0)
1716
1716
1717 ``executable``
1717 ``executable``
1718 Either just the name of the executable or its pathname.
1718 Either just the name of the executable or its pathname.
1719
1719
1720 .. container:: windows
1720 .. container:: windows
1721
1721
1722 On Windows, the path can use environment variables with ${ProgramFiles}
1722 On Windows, the path can use environment variables with ${ProgramFiles}
1723 syntax.
1723 syntax.
1724
1724
1725 (default: the tool name)
1725 (default: the tool name)
1726
1726
1727 ``args``
1727 ``args``
1728 The arguments to pass to the tool executable. You can refer to the
1728 The arguments to pass to the tool executable. You can refer to the
1729 files being merged as well as the output file through these
1729 files being merged as well as the output file through these
1730 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1730 variables: ``$base``, ``$local``, ``$other``, ``$output``.
1731
1731
1732 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1732 The meaning of ``$local`` and ``$other`` can vary depending on which action is
1733 being performed. During an update or merge, ``$local`` represents the original
1733 being performed. During an update or merge, ``$local`` represents the original
1734 state of the file, while ``$other`` represents the commit you are updating to or
1734 state of the file, while ``$other`` represents the commit you are updating to or
1735 the commit you are merging with. During a rebase, ``$local`` represents the
1735 the commit you are merging with. During a rebase, ``$local`` represents the
1736 destination of the rebase, and ``$other`` represents the commit being rebased.
1736 destination of the rebase, and ``$other`` represents the commit being rebased.
1737
1737
1738 Some operations define custom labels to assist with identifying the revisions,
1738 Some operations define custom labels to assist with identifying the revisions,
1739 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1739 accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
1740 labels are not available, these will be ``local``, ``other``, and ``base``,
1740 labels are not available, these will be ``local``, ``other``, and ``base``,
1741 respectively.
1741 respectively.
1742 (default: ``$local $base $other``)
1742 (default: ``$local $base $other``)
1743
1743
1744 ``premerge``
1744 ``premerge``
1745 Attempt to run internal non-interactive 3-way merge tool before
1745 Attempt to run internal non-interactive 3-way merge tool before
1746 launching external tool. Options are ``true``, ``false``, ``keep``,
1746 launching external tool. Options are ``true``, ``false``, ``keep``,
1747 ``keep-merge3``, or ``keep-mergediff`` (experimental). The ``keep`` option
1747 ``keep-merge3``, or ``keep-mergediff`` (experimental). The ``keep`` option
1748 will leave markers in the file if the premerge fails. The ``keep-merge3``
1748 will leave markers in the file if the premerge fails. The ``keep-merge3``
1749 will do the same but include information about the base of the merge in the
1749 will do the same but include information about the base of the merge in the
1750 marker (see internal :merge3 in :hg:`help merge-tools`). The
1750 marker (see internal :merge3 in :hg:`help merge-tools`). The
1751 ``keep-mergediff`` option is similar but uses a different marker style
1751 ``keep-mergediff`` option is similar but uses a different marker style
1752 (see internal :merge3 in :hg:`help merge-tools`). (default: True)
1752 (see internal :merge3 in :hg:`help merge-tools`). (default: True)
1753
1753
1754 ``binary``
1754 ``binary``
1755 This tool can merge binary files. (default: False, unless tool
1755 This tool can merge binary files. (default: False, unless tool
1756 was selected by file pattern match)
1756 was selected by file pattern match)
1757
1757
1758 ``symlink``
1758 ``symlink``
1759 This tool can merge symlinks. (default: False)
1759 This tool can merge symlinks. (default: False)
1760
1760
1761 ``check``
1761 ``check``
1762 A list of merge success-checking options:
1762 A list of merge success-checking options:
1763
1763
1764 ``changed``
1764 ``changed``
1765 Ask whether merge was successful when the merged file shows no changes.
1765 Ask whether merge was successful when the merged file shows no changes.
1766 ``conflicts``
1766 ``conflicts``
1767 Check whether there are conflicts even though the tool reported success.
1767 Check whether there are conflicts even though the tool reported success.
1768 ``prompt``
1768 ``prompt``
1769 Always prompt for merge success, regardless of success reported by tool.
1769 Always prompt for merge success, regardless of success reported by tool.
1770
1770
1771 ``fixeol``
1771 ``fixeol``
1772 Attempt to fix up EOL changes caused by the merge tool.
1772 Attempt to fix up EOL changes caused by the merge tool.
1773 (default: False)
1773 (default: False)
1774
1774
1775 ``gui``
1775 ``gui``
1776 This tool requires a graphical interface to run. (default: False)
1776 This tool requires a graphical interface to run. (default: False)
1777
1777
1778 ``mergemarkers``
1778 ``mergemarkers``
1779 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1779 Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
1780 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1780 ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
1781 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1781 ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
1782 markers generated during premerge will be ``detailed`` if either this option or
1782 markers generated during premerge will be ``detailed`` if either this option or
1783 the corresponding option in the ``[ui]`` section is ``detailed``.
1783 the corresponding option in the ``[ui]`` section is ``detailed``.
1784 (default: ``basic``)
1784 (default: ``basic``)
1785
1785
1786 ``mergemarkertemplate``
1786 ``mergemarkertemplate``
1787 This setting can be used to override ``mergemarker`` from the
1787 This setting can be used to override ``mergemarker`` from the
1788 ``[command-templates]`` section on a per-tool basis; this applies to the
1788 ``[command-templates]`` section on a per-tool basis; this applies to the
1789 ``$label``-prefixed variables and to the conflict markers that are generated
1789 ``$label``-prefixed variables and to the conflict markers that are generated
1790 if ``premerge`` is ``keep` or ``keep-merge3``. See the corresponding variable
1790 if ``premerge`` is ``keep` or ``keep-merge3``. See the corresponding variable
1791 in ``[ui]`` for more information.
1791 in ``[ui]`` for more information.
1792
1792
1793 .. container:: windows
1793 .. container:: windows
1794
1794
1795 ``regkey``
1795 ``regkey``
1796 Windows registry key which describes install location of this
1796 Windows registry key which describes install location of this
1797 tool. Mercurial will search for this key first under
1797 tool. Mercurial will search for this key first under
1798 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1798 ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
1799 (default: None)
1799 (default: None)
1800
1800
1801 ``regkeyalt``
1801 ``regkeyalt``
1802 An alternate Windows registry key to try if the first key is not
1802 An alternate Windows registry key to try if the first key is not
1803 found. The alternate key uses the same ``regname`` and ``regappend``
1803 found. The alternate key uses the same ``regname`` and ``regappend``
1804 semantics of the primary key. The most common use for this key
1804 semantics of the primary key. The most common use for this key
1805 is to search for 32bit applications on 64bit operating systems.
1805 is to search for 32bit applications on 64bit operating systems.
1806 (default: None)
1806 (default: None)
1807
1807
1808 ``regname``
1808 ``regname``
1809 Name of value to read from specified registry key.
1809 Name of value to read from specified registry key.
1810 (default: the unnamed (default) value)
1810 (default: the unnamed (default) value)
1811
1811
1812 ``regappend``
1812 ``regappend``
1813 String to append to the value read from the registry, typically
1813 String to append to the value read from the registry, typically
1814 the executable name of the tool.
1814 the executable name of the tool.
1815 (default: None)
1815 (default: None)
1816
1816
1817 ``pager``
1817 ``pager``
1818 ---------
1818 ---------
1819
1819
1820 Setting used to control when to paginate and with what external tool. See
1820 Setting used to control when to paginate and with what external tool. See
1821 :hg:`help pager` for details.
1821 :hg:`help pager` for details.
1822
1822
1823 ``pager``
1823 ``pager``
1824 Define the external tool used as pager.
1824 Define the external tool used as pager.
1825
1825
1826 If no pager is set, Mercurial uses the environment variable $PAGER.
1826 If no pager is set, Mercurial uses the environment variable $PAGER.
1827 If neither pager.pager, nor $PAGER is set, a default pager will be
1827 If neither pager.pager, nor $PAGER is set, a default pager will be
1828 used, typically `less` on Unix and `more` on Windows. Example::
1828 used, typically `less` on Unix and `more` on Windows. Example::
1829
1829
1830 [pager]
1830 [pager]
1831 pager = less -FRX
1831 pager = less -FRX
1832
1832
1833 ``ignore``
1833 ``ignore``
1834 List of commands to disable the pager for. Example::
1834 List of commands to disable the pager for. Example::
1835
1835
1836 [pager]
1836 [pager]
1837 ignore = version, help, update
1837 ignore = version, help, update
1838
1838
1839 ``patch``
1839 ``patch``
1840 ---------
1840 ---------
1841
1841
1842 Settings used when applying patches, for instance through the 'import'
1842 Settings used when applying patches, for instance through the 'import'
1843 command or with Mercurial Queues extension.
1843 command or with Mercurial Queues extension.
1844
1844
1845 ``eol``
1845 ``eol``
1846 When set to 'strict' patch content and patched files end of lines
1846 When set to 'strict' patch content and patched files end of lines
1847 are preserved. When set to ``lf`` or ``crlf``, both files end of
1847 are preserved. When set to ``lf`` or ``crlf``, both files end of
1848 lines are ignored when patching and the result line endings are
1848 lines are ignored when patching and the result line endings are
1849 normalized to either LF (Unix) or CRLF (Windows). When set to
1849 normalized to either LF (Unix) or CRLF (Windows). When set to
1850 ``auto``, end of lines are again ignored while patching but line
1850 ``auto``, end of lines are again ignored while patching but line
1851 endings in patched files are normalized to their original setting
1851 endings in patched files are normalized to their original setting
1852 on a per-file basis. If target file does not exist or has no end
1852 on a per-file basis. If target file does not exist or has no end
1853 of line, patch line endings are preserved.
1853 of line, patch line endings are preserved.
1854 (default: strict)
1854 (default: strict)
1855
1855
1856 ``fuzz``
1856 ``fuzz``
1857 The number of lines of 'fuzz' to allow when applying patches. This
1857 The number of lines of 'fuzz' to allow when applying patches. This
1858 controls how much context the patcher is allowed to ignore when
1858 controls how much context the patcher is allowed to ignore when
1859 trying to apply a patch.
1859 trying to apply a patch.
1860 (default: 2)
1860 (default: 2)
1861
1861
1862 ``paths``
1862 ``paths``
1863 ---------
1863 ---------
1864
1864
1865 Assigns symbolic names and behavior to repositories.
1865 Assigns symbolic names and behavior to repositories.
1866
1866
1867 Options are symbolic names defining the URL or directory that is the
1867 Options are symbolic names defining the URL or directory that is the
1868 location of the repository. Example::
1868 location of the repository. Example::
1869
1869
1870 [paths]
1870 [paths]
1871 my_server = https://example.com/my_repo
1871 my_server = https://example.com/my_repo
1872 local_path = /home/me/repo
1872 local_path = /home/me/repo
1873
1873
1874 These symbolic names can be used from the command line. To pull
1874 These symbolic names can be used from the command line. To pull
1875 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1875 from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
1876 :hg:`push local_path`. You can check :hg:`help urls` for details about
1876 :hg:`push local_path`. You can check :hg:`help urls` for details about
1877 valid URLs.
1877 valid URLs.
1878
1878
1879 Options containing colons (``:``) denote sub-options that can influence
1879 Options containing colons (``:``) denote sub-options that can influence
1880 behavior for that specific path. Example::
1880 behavior for that specific path. Example::
1881
1881
1882 [paths]
1882 [paths]
1883 my_server = https://example.com/my_path
1883 my_server = https://example.com/my_path
1884 my_server:pushurl = ssh://example.com/my_path
1884 my_server:pushurl = ssh://example.com/my_path
1885
1885
1886 Paths using the `path://otherpath` scheme will inherit the sub-options value from
1886 Paths using the `path://otherpath` scheme will inherit the sub-options value from
1887 the path they point to.
1887 the path they point to.
1888
1888
1889 The following sub-options can be defined:
1889 The following sub-options can be defined:
1890
1890
1891 ``multi-urls``
1891 ``multi-urls``
1892 A boolean option. When enabled the value of the `[paths]` entry will be
1892 A boolean option. When enabled the value of the `[paths]` entry will be
1893 parsed as a list and the alias will resolve to multiple destination. If some
1893 parsed as a list and the alias will resolve to multiple destination. If some
1894 of the list entry use the `path://` syntax, the suboption will be inherited
1894 of the list entry use the `path://` syntax, the suboption will be inherited
1895 individually.
1895 individually.
1896
1896
1897 ``pushurl``
1897 ``pushurl``
1898 The URL to use for push operations. If not defined, the location
1898 The URL to use for push operations. If not defined, the location
1899 defined by the path's main entry is used.
1899 defined by the path's main entry is used.
1900
1900
1901 ``pushrev``
1901 ``pushrev``
1902 A revset defining which revisions to push by default.
1902 A revset defining which revisions to push by default.
1903
1903
1904 When :hg:`push` is executed without a ``-r`` argument, the revset
1904 When :hg:`push` is executed without a ``-r`` argument, the revset
1905 defined by this sub-option is evaluated to determine what to push.
1905 defined by this sub-option is evaluated to determine what to push.
1906
1906
1907 For example, a value of ``.`` will push the working directory's
1907 For example, a value of ``.`` will push the working directory's
1908 revision by default.
1908 revision by default.
1909
1909
1910 Revsets specifying bookmarks will not result in the bookmark being
1910 Revsets specifying bookmarks will not result in the bookmark being
1911 pushed.
1911 pushed.
1912
1912
1913 ``bookmarks.mode``
1913 ``bookmarks.mode``
1914 How bookmark will be dealt during the exchange. It support the following value
1914 How bookmark will be dealt during the exchange. It support the following value
1915
1915
1916 - ``default``: the default behavior, local and remote bookmarks are "merged"
1916 - ``default``: the default behavior, local and remote bookmarks are "merged"
1917 on push/pull.
1917 on push/pull.
1918
1918
1919 - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This
1919 - ``mirror``: when pulling, replace local bookmarks by remote bookmarks. This
1920 is useful to replicate a repository, or as an optimization.
1920 is useful to replicate a repository, or as an optimization.
1921
1921
1922 - ``ignore``: ignore bookmarks during exchange.
1922 - ``ignore``: ignore bookmarks during exchange.
1923 (This currently only affect pulling)
1923 (This currently only affect pulling)
1924
1924
1925 The following special named paths exist:
1925 The following special named paths exist:
1926
1926
1927 ``default``
1927 ``default``
1928 The URL or directory to use when no source or remote is specified.
1928 The URL or directory to use when no source or remote is specified.
1929
1929
1930 :hg:`clone` will automatically define this path to the location the
1930 :hg:`clone` will automatically define this path to the location the
1931 repository was cloned from.
1931 repository was cloned from.
1932
1932
1933 ``default-push``
1933 ``default-push``
1934 (deprecated) The URL or directory for the default :hg:`push` location.
1934 (deprecated) The URL or directory for the default :hg:`push` location.
1935 ``default:pushurl`` should be used instead.
1935 ``default:pushurl`` should be used instead.
1936
1936
1937 ``phases``
1937 ``phases``
1938 ----------
1938 ----------
1939
1939
1940 Specifies default handling of phases. See :hg:`help phases` for more
1940 Specifies default handling of phases. See :hg:`help phases` for more
1941 information about working with phases.
1941 information about working with phases.
1942
1942
1943 ``publish``
1943 ``publish``
1944 Controls draft phase behavior when working as a server. When true,
1944 Controls draft phase behavior when working as a server. When true,
1945 pushed changesets are set to public in both client and server and
1945 pushed changesets are set to public in both client and server and
1946 pulled or cloned changesets are set to public in the client.
1946 pulled or cloned changesets are set to public in the client.
1947 (default: True)
1947 (default: True)
1948
1948
1949 ``new-commit``
1949 ``new-commit``
1950 Phase of newly-created commits.
1950 Phase of newly-created commits.
1951 (default: draft)
1951 (default: draft)
1952
1952
1953 ``checksubrepos``
1953 ``checksubrepos``
1954 Check the phase of the current revision of each subrepository. Allowed
1954 Check the phase of the current revision of each subrepository. Allowed
1955 values are "ignore", "follow" and "abort". For settings other than
1955 values are "ignore", "follow" and "abort". For settings other than
1956 "ignore", the phase of the current revision of each subrepository is
1956 "ignore", the phase of the current revision of each subrepository is
1957 checked before committing the parent repository. If any of those phases is
1957 checked before committing the parent repository. If any of those phases is
1958 greater than the phase of the parent repository (e.g. if a subrepo is in a
1958 greater than the phase of the parent repository (e.g. if a subrepo is in a
1959 "secret" phase while the parent repo is in "draft" phase), the commit is
1959 "secret" phase while the parent repo is in "draft" phase), the commit is
1960 either aborted (if checksubrepos is set to "abort") or the higher phase is
1960 either aborted (if checksubrepos is set to "abort") or the higher phase is
1961 used for the parent repository commit (if set to "follow").
1961 used for the parent repository commit (if set to "follow").
1962 (default: follow)
1962 (default: follow)
1963
1963
1964
1964
1965 ``profiling``
1965 ``profiling``
1966 -------------
1966 -------------
1967
1967
1968 Specifies profiling type, format, and file output. Two profilers are
1968 Specifies profiling type, format, and file output. Two profilers are
1969 supported: an instrumenting profiler (named ``ls``), and a sampling
1969 supported: an instrumenting profiler (named ``ls``), and a sampling
1970 profiler (named ``stat``).
1970 profiler (named ``stat``).
1971
1971
1972 In this section description, 'profiling data' stands for the raw data
1972 In this section description, 'profiling data' stands for the raw data
1973 collected during profiling, while 'profiling report' stands for a
1973 collected during profiling, while 'profiling report' stands for a
1974 statistical text report generated from the profiling data.
1974 statistical text report generated from the profiling data.
1975
1975
1976 ``enabled``
1976 ``enabled``
1977 Enable the profiler.
1977 Enable the profiler.
1978 (default: false)
1978 (default: false)
1979
1979
1980 This is equivalent to passing ``--profile`` on the command line.
1980 This is equivalent to passing ``--profile`` on the command line.
1981
1981
1982 ``type``
1982 ``type``
1983 The type of profiler to use.
1983 The type of profiler to use.
1984 (default: stat)
1984 (default: stat)
1985
1985
1986 ``ls``
1986 ``ls``
1987 Use Python's built-in instrumenting profiler. This profiler
1987 Use Python's built-in instrumenting profiler. This profiler
1988 works on all platforms, but each line number it reports is the
1988 works on all platforms, but each line number it reports is the
1989 first line of a function. This restriction makes it difficult to
1989 first line of a function. This restriction makes it difficult to
1990 identify the expensive parts of a non-trivial function.
1990 identify the expensive parts of a non-trivial function.
1991 ``stat``
1991 ``stat``
1992 Use a statistical profiler, statprof. This profiler is most
1992 Use a statistical profiler, statprof. This profiler is most
1993 useful for profiling commands that run for longer than about 0.1
1993 useful for profiling commands that run for longer than about 0.1
1994 seconds.
1994 seconds.
1995
1995
1996 ``format``
1996 ``format``
1997 Profiling format. Specific to the ``ls`` instrumenting profiler.
1997 Profiling format. Specific to the ``ls`` instrumenting profiler.
1998 (default: text)
1998 (default: text)
1999
1999
2000 ``text``
2000 ``text``
2001 Generate a profiling report. When saving to a file, it should be
2001 Generate a profiling report. When saving to a file, it should be
2002 noted that only the report is saved, and the profiling data is
2002 noted that only the report is saved, and the profiling data is
2003 not kept.
2003 not kept.
2004 ``kcachegrind``
2004 ``kcachegrind``
2005 Format profiling data for kcachegrind use: when saving to a
2005 Format profiling data for kcachegrind use: when saving to a
2006 file, the generated file can directly be loaded into
2006 file, the generated file can directly be loaded into
2007 kcachegrind.
2007 kcachegrind.
2008
2008
2009 ``statformat``
2009 ``statformat``
2010 Profiling format for the ``stat`` profiler.
2010 Profiling format for the ``stat`` profiler.
2011 (default: hotpath)
2011 (default: hotpath)
2012
2012
2013 ``hotpath``
2013 ``hotpath``
2014 Show a tree-based display containing the hot path of execution (where
2014 Show a tree-based display containing the hot path of execution (where
2015 most time was spent).
2015 most time was spent).
2016 ``bymethod``
2016 ``bymethod``
2017 Show a table of methods ordered by how frequently they are active.
2017 Show a table of methods ordered by how frequently they are active.
2018 ``byline``
2018 ``byline``
2019 Show a table of lines in files ordered by how frequently they are active.
2019 Show a table of lines in files ordered by how frequently they are active.
2020 ``json``
2020 ``json``
2021 Render profiling data as JSON.
2021 Render profiling data as JSON.
2022
2022
2023 ``freq``
2023 ``freq``
2024 Sampling frequency. Specific to the ``stat`` sampling profiler.
2024 Sampling frequency. Specific to the ``stat`` sampling profiler.
2025 (default: 1000)
2025 (default: 1000)
2026
2026
2027 ``output``
2027 ``output``
2028 File path where profiling data or report should be saved. If the
2028 File path where profiling data or report should be saved. If the
2029 file exists, it is replaced. (default: None, data is printed on
2029 file exists, it is replaced. (default: None, data is printed on
2030 stderr)
2030 stderr)
2031
2031
2032 ``sort``
2032 ``sort``
2033 Sort field. Specific to the ``ls`` instrumenting profiler.
2033 Sort field. Specific to the ``ls`` instrumenting profiler.
2034 One of ``callcount``, ``reccallcount``, ``totaltime`` and
2034 One of ``callcount``, ``reccallcount``, ``totaltime`` and
2035 ``inlinetime``.
2035 ``inlinetime``.
2036 (default: inlinetime)
2036 (default: inlinetime)
2037
2037
2038 ``time-track``
2038 ``time-track``
2039 Control if the stat profiler track ``cpu`` or ``real`` time.
2039 Control if the stat profiler track ``cpu`` or ``real`` time.
2040 (default: ``cpu`` on Windows, otherwise ``real``)
2040 (default: ``cpu`` on Windows, otherwise ``real``)
2041
2041
2042 ``limit``
2042 ``limit``
2043 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
2043 Number of lines to show. Specific to the ``ls`` instrumenting profiler.
2044 (default: 30)
2044 (default: 30)
2045
2045
2046 ``nested``
2046 ``nested``
2047 Show at most this number of lines of drill-down info after each main entry.
2047 Show at most this number of lines of drill-down info after each main entry.
2048 This can help explain the difference between Total and Inline.
2048 This can help explain the difference between Total and Inline.
2049 Specific to the ``ls`` instrumenting profiler.
2049 Specific to the ``ls`` instrumenting profiler.
2050 (default: 0)
2050 (default: 0)
2051
2051
2052 ``showmin``
2052 ``showmin``
2053 Minimum fraction of samples an entry must have for it to be displayed.
2053 Minimum fraction of samples an entry must have for it to be displayed.
2054 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
2054 Can be specified as a float between ``0.0`` and ``1.0`` or can have a
2055 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
2055 ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
2056
2056
2057 Only used by the ``stat`` profiler.
2057 Only used by the ``stat`` profiler.
2058
2058
2059 For the ``hotpath`` format, default is ``0.05``.
2059 For the ``hotpath`` format, default is ``0.05``.
2060 For the ``chrome`` format, default is ``0.005``.
2060 For the ``chrome`` format, default is ``0.005``.
2061
2061
2062 The option is unused on other formats.
2062 The option is unused on other formats.
2063
2063
2064 ``showmax``
2064 ``showmax``
2065 Maximum fraction of samples an entry can have before it is ignored in
2065 Maximum fraction of samples an entry can have before it is ignored in
2066 display. Values format is the same as ``showmin``.
2066 display. Values format is the same as ``showmin``.
2067
2067
2068 Only used by the ``stat`` profiler.
2068 Only used by the ``stat`` profiler.
2069
2069
2070 For the ``chrome`` format, default is ``0.999``.
2070 For the ``chrome`` format, default is ``0.999``.
2071
2071
2072 The option is unused on other formats.
2072 The option is unused on other formats.
2073
2073
2074 ``showtime``
2074 ``showtime``
2075 Show time taken as absolute durations, in addition to percentages.
2075 Show time taken as absolute durations, in addition to percentages.
2076 Only used by the ``hotpath`` format.
2076 Only used by the ``hotpath`` format.
2077 (default: true)
2077 (default: true)
2078
2078
2079 ``progress``
2079 ``progress``
2080 ------------
2080 ------------
2081
2081
2082 Mercurial commands can draw progress bars that are as informative as
2082 Mercurial commands can draw progress bars that are as informative as
2083 possible. Some progress bars only offer indeterminate information, while others
2083 possible. Some progress bars only offer indeterminate information, while others
2084 have a definite end point.
2084 have a definite end point.
2085
2085
2086 ``debug``
2086 ``debug``
2087 Whether to print debug info when updating the progress bar. (default: False)
2087 Whether to print debug info when updating the progress bar. (default: False)
2088
2088
2089 ``delay``
2089 ``delay``
2090 Number of seconds (float) before showing the progress bar. (default: 3)
2090 Number of seconds (float) before showing the progress bar. (default: 3)
2091
2091
2092 ``changedelay``
2092 ``changedelay``
2093 Minimum delay before showing a new topic. When set to less than 3 * refresh,
2093 Minimum delay before showing a new topic. When set to less than 3 * refresh,
2094 that value will be used instead. (default: 1)
2094 that value will be used instead. (default: 1)
2095
2095
2096 ``estimateinterval``
2096 ``estimateinterval``
2097 Maximum sampling interval in seconds for speed and estimated time
2097 Maximum sampling interval in seconds for speed and estimated time
2098 calculation. (default: 60)
2098 calculation. (default: 60)
2099
2099
2100 ``refresh``
2100 ``refresh``
2101 Time in seconds between refreshes of the progress bar. (default: 0.1)
2101 Time in seconds between refreshes of the progress bar. (default: 0.1)
2102
2102
2103 ``format``
2103 ``format``
2104 Format of the progress bar.
2104 Format of the progress bar.
2105
2105
2106 Valid entries for the format field are ``topic``, ``bar``, ``number``,
2106 Valid entries for the format field are ``topic``, ``bar``, ``number``,
2107 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
2107 ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
2108 last 20 characters of the item, but this can be changed by adding either
2108 last 20 characters of the item, but this can be changed by adding either
2109 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
2109 ``-<num>`` which would take the last num characters, or ``+<num>`` for the
2110 first num characters.
2110 first num characters.
2111
2111
2112 (default: topic bar number estimate)
2112 (default: topic bar number estimate)
2113
2113
2114 ``width``
2114 ``width``
2115 If set, the maximum width of the progress information (that is, min(width,
2115 If set, the maximum width of the progress information (that is, min(width,
2116 term width) will be used).
2116 term width) will be used).
2117
2117
2118 ``clear-complete``
2118 ``clear-complete``
2119 Clear the progress bar after it's done. (default: True)
2119 Clear the progress bar after it's done. (default: True)
2120
2120
2121 ``disable``
2121 ``disable``
2122 If true, don't show a progress bar.
2122 If true, don't show a progress bar.
2123
2123
2124 ``assume-tty``
2124 ``assume-tty``
2125 If true, ALWAYS show a progress bar, unless disable is given.
2125 If true, ALWAYS show a progress bar, unless disable is given.
2126
2126
2127 ``rebase``
2127 ``rebase``
2128 ----------
2128 ----------
2129
2129
2130 ``evolution.allowdivergence``
2130 ``evolution.allowdivergence``
2131 Default to False, when True allow creating divergence when performing
2131 Default to False, when True allow creating divergence when performing
2132 rebase of obsolete changesets.
2132 rebase of obsolete changesets.
2133
2133
2134 ``revsetalias``
2134 ``revsetalias``
2135 ---------------
2135 ---------------
2136
2136
2137 Alias definitions for revsets. See :hg:`help revsets` for details.
2137 Alias definitions for revsets. See :hg:`help revsets` for details.
2138
2138
2139 ``rewrite``
2139 ``rewrite``
2140 -----------
2140 -----------
2141
2141
2142 ``backup-bundle``
2142 ``backup-bundle``
2143 Whether to save stripped changesets to a bundle file. (default: True)
2143 Whether to save stripped changesets to a bundle file. (default: True)
2144
2144
2145 ``update-timestamp``
2145 ``update-timestamp``
2146 If true, updates the date and time of the changeset to current. It is only
2146 If true, updates the date and time of the changeset to current. It is only
2147 applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
2147 applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
2148 current version.
2148 current version.
2149
2149
2150 ``empty-successor``
2150 ``empty-successor``
2151
2151
2152 Control what happens with empty successors that are the result of rewrite
2152 Control what happens with empty successors that are the result of rewrite
2153 operations. If set to ``skip``, the successor is not created. If set to
2153 operations. If set to ``skip``, the successor is not created. If set to
2154 ``keep``, the empty successor is created and kept.
2154 ``keep``, the empty successor is created and kept.
2155
2155
2156 Currently, only the rebase and absorb commands consider this configuration.
2156 Currently, only the rebase and absorb commands consider this configuration.
2157 (EXPERIMENTAL)
2157 (EXPERIMENTAL)
2158
2158
2159 ``rhg``
2159 ``rhg``
2160 -------
2160 -------
2161
2161
2162 The pure Rust fast-path for Mercurial. See `rust/README.rst` in the Mercurial repository.
2162 The pure Rust fast-path for Mercurial. See `rust/README.rst` in the Mercurial repository.
2163
2163
2164 ``fallback-executable``
2164 ``fallback-executable``
2165 Path to the executable to run in a sub-process when falling back to
2165 Path to the executable to run in a sub-process when falling back to
2166 another implementation of Mercurial.
2166 another implementation of Mercurial.
2167
2167
2168 ``fallback-immediately``
2168 ``fallback-immediately``
2169 Fall back to ``fallback-executable`` as soon as possible, regardless of
2169 Fall back to ``fallback-executable`` as soon as possible, regardless of
2170 the `rhg.on-unsupported` configuration. Useful for debugging, for example to
2170 the `rhg.on-unsupported` configuration. Useful for debugging, for example to
2171 bypass `rhg` if the deault `hg` points to `rhg`.
2171 bypass `rhg` if the deault `hg` points to `rhg`.
2172
2172
2173 Note that because this requires loading the configuration, it is possible
2173 Note that because this requires loading the configuration, it is possible
2174 that `rhg` error out before being able to fall back.
2174 that `rhg` error out before being able to fall back.
2175
2175
2176 ``ignored-extensions``
2176 ``ignored-extensions``
2177 Controls which extensions should be ignored by `rhg`. By default, `rhg`
2177 Controls which extensions should be ignored by `rhg`. By default, `rhg`
2178 triggers the `rhg.on-unsupported` behavior any unsupported extensions.
2178 triggers the `rhg.on-unsupported` behavior any unsupported extensions.
2179 Users can disable that behavior when they know that a given extension
2179 Users can disable that behavior when they know that a given extension
2180 does not need support from `rhg`.
2180 does not need support from `rhg`.
2181
2181
2182 Expects a list of extension names, or ``*`` to ignore all extensions.
2182 Expects a list of extension names, or ``*`` to ignore all extensions.
2183
2183
2184 Note: ``*:<suboption>`` is also a valid extension name for this
2184 Note: ``*:<suboption>`` is also a valid extension name for this
2185 configuration option.
2185 configuration option.
2186 As of this writing, the only valid "global" suboption is ``required``.
2186 As of this writing, the only valid "global" suboption is ``required``.
2187
2187
2188 ``on-unsupported``
2188 ``on-unsupported``
2189 Controls the behavior of `rhg` when detecting unsupported features.
2189 Controls the behavior of `rhg` when detecting unsupported features.
2190
2190
2191 Possible values are `abort` (default), `abort-silent` and `fallback`.
2191 Possible values are `abort` (default), `abort-silent` and `fallback`.
2192
2192
2193 ``abort``
2193 ``abort``
2194 Print an error message describing what feature is not supported,
2194 Print an error message describing what feature is not supported,
2195 and exit with code 252
2195 and exit with code 252
2196
2196
2197 ``abort-silent``
2197 ``abort-silent``
2198 Silently exit with code 252
2198 Silently exit with code 252
2199
2199
2200 ``fallback``
2200 ``fallback``
2201 Try running the fallback executable with the same parameters
2201 Try running the fallback executable with the same parameters
2202 (and trace the fallback reason, use `RUST_LOG=trace` to see).
2202 (and trace the fallback reason, use `RUST_LOG=trace` to see).
2203
2203
2204 ``share``
2204 ``share``
2205 ---------
2205 ---------
2206
2206
2207 ``safe-mismatch.source-safe``
2207 ``safe-mismatch.source-safe``
2208 Controls what happens when the shared repository does not use the
2208 Controls what happens when the shared repository does not use the
2209 share-safe mechanism but its source repository does.
2209 share-safe mechanism but its source repository does.
2210
2210
2211 Possible values are `abort` (default), `allow`, `upgrade-abort` and
2211 Possible values are `abort` (default), `allow`, `upgrade-abort` and
2212 `upgrade-allow`.
2212 `upgrade-allow`.
2213
2213
2214 ``abort``
2214 ``abort``
2215 Disallows running any command and aborts
2215 Disallows running any command and aborts
2216 ``allow``
2216 ``allow``
2217 Respects the feature presence in the share source
2217 Respects the feature presence in the share source
2218 ``upgrade-abort``
2218 ``upgrade-abort``
2219 Tries to upgrade the share to use share-safe; if it fails, aborts
2219 Tries to upgrade the share to use share-safe; if it fails, aborts
2220 ``upgrade-allow``
2220 ``upgrade-allow``
2221 Tries to upgrade the share; if it fails, continue by
2221 Tries to upgrade the share; if it fails, continue by
2222 respecting the share source setting
2222 respecting the share source setting
2223
2223
2224 Check :hg:`help config.format.use-share-safe` for details about the
2224 Check :hg:`help config.format.use-share-safe` for details about the
2225 share-safe feature.
2225 share-safe feature.
2226
2226
2227 ``safe-mismatch.source-safe:verbose-upgrade``
2227 ``safe-mismatch.source-safe:verbose-upgrade``
2228 Display a message when upgrading, (default: True)
2228 Display a message when upgrading, (default: True)
2229
2229
2230 ``safe-mismatch.source-safe.warn``
2230 ``safe-mismatch.source-safe.warn``
2231 Shows a warning on operations if the shared repository does not use
2231 Shows a warning on operations if the shared repository does not use
2232 share-safe, but the source repository does.
2232 share-safe, but the source repository does.
2233 (default: True)
2233 (default: True)
2234
2234
2235 ``safe-mismatch.source-not-safe``
2235 ``safe-mismatch.source-not-safe``
2236 Controls what happens when the shared repository uses the share-safe
2236 Controls what happens when the shared repository uses the share-safe
2237 mechanism but its source does not.
2237 mechanism but its source does not.
2238
2238
2239 Possible values are `abort` (default), `allow`, `downgrade-abort` and
2239 Possible values are `abort` (default), `allow`, `downgrade-abort` and
2240 `downgrade-allow`.
2240 `downgrade-allow`.
2241
2241
2242 ``abort``
2242 ``abort``
2243 Disallows running any command and aborts
2243 Disallows running any command and aborts
2244 ``allow``
2244 ``allow``
2245 Respects the feature presence in the share source
2245 Respects the feature presence in the share source
2246 ``downgrade-abort``
2246 ``downgrade-abort``
2247 Tries to downgrade the share to not use share-safe; if it fails, aborts
2247 Tries to downgrade the share to not use share-safe; if it fails, aborts
2248 ``downgrade-allow``
2248 ``downgrade-allow``
2249 Tries to downgrade the share to not use share-safe;
2249 Tries to downgrade the share to not use share-safe;
2250 if it fails, continue by respecting the shared source setting
2250 if it fails, continue by respecting the shared source setting
2251
2251
2252 Check :hg:`help config.format.use-share-safe` for details about the
2252 Check :hg:`help config.format.use-share-safe` for details about the
2253 share-safe feature.
2253 share-safe feature.
2254
2254
2255 ``safe-mismatch.source-not-safe:verbose-upgrade``
2255 ``safe-mismatch.source-not-safe:verbose-upgrade``
2256 Display a message when upgrading, (default: True)
2256 Display a message when upgrading, (default: True)
2257
2257
2258 ``safe-mismatch.source-not-safe.warn``
2258 ``safe-mismatch.source-not-safe.warn``
2259 Shows a warning on operations if the shared repository uses share-safe,
2259 Shows a warning on operations if the shared repository uses share-safe,
2260 but the source repository does not.
2260 but the source repository does not.
2261 (default: True)
2261 (default: True)
2262
2262
2263 ``storage``
2263 ``storage``
2264 -----------
2264 -----------
2265
2265
2266 Control the strategy Mercurial uses internally to store history. Options in this
2266 Control the strategy Mercurial uses internally to store history. Options in this
2267 category impact performance and repository size.
2267 category impact performance and repository size.
2268
2268
2269 ``revlog.issue6528.fix-incoming``
2269 ``revlog.issue6528.fix-incoming``
2270 Version 5.8 of Mercurial had a bug leading to altering the parent of file
2270 Version 5.8 of Mercurial had a bug leading to altering the parent of file
2271 revision with copy information (or any other metadata) on exchange. This
2271 revision with copy information (or any other metadata) on exchange. This
2272 leads to the copy metadata to be overlooked by various internal logic. The
2272 leads to the copy metadata to be overlooked by various internal logic. The
2273 issue was fixed in Mercurial 5.8.1.
2273 issue was fixed in Mercurial 5.8.1.
2274 (See https://bz.mercurial-scm.org/show_bug.cgi?id=6528 for details)
2274 (See https://bz.mercurial-scm.org/show_bug.cgi?id=6528 for details)
2275
2275
2276 As a result Mercurial is now checking and fixing incoming file revisions to
2276 As a result Mercurial is now checking and fixing incoming file revisions to
2277 make sure there parents are in the right order. This behavior can be
2277 make sure there parents are in the right order. This behavior can be
2278 disabled by setting this option to `no`. This apply to revisions added
2278 disabled by setting this option to `no`. This apply to revisions added
2279 through push, pull, clone and unbundle.
2279 through push, pull, clone and unbundle.
2280
2280
2281 To fix affected revisions that already exist within the repository, one can
2281 To fix affected revisions that already exist within the repository, one can
2282 use :hg:`debug-repair-issue-6528`.
2282 use :hg:`debug-repair-issue-6528`.
2283
2283
2284 .. container:: verbose
2285
2286 ``revlog.delta-parent-search.candidate-group-chunk-size``
2287 Tune the number of delta bases the storage will consider in the
2288 same "round" of search. In some very rare cases, using a smaller value
2289 might result in faster processing at the possible expense of storage
2290 space, while using larger values might result in slower processing at the
2291 possible benefit of storage space. A value of "0" means no limitation.
2292
2293 default: no limitation
2294
2295 This is unlikely that you'll have to tune this configuration. If you think
2296 you do, consider talking with the mercurial developer community about your
2297 repositories.
2298
2284 ``revlog.optimize-delta-parent-choice``
2299 ``revlog.optimize-delta-parent-choice``
2285 When storing a merge revision, both parents will be equally considered as
2300 When storing a merge revision, both parents will be equally considered as
2286 a possible delta base. This results in better delta selection and improved
2301 a possible delta base. This results in better delta selection and improved
2287 revlog compression. This option is enabled by default.
2302 revlog compression. This option is enabled by default.
2288
2303
2289 Turning this option off can result in large increase of repository size for
2304 Turning this option off can result in large increase of repository size for
2290 repository with many merges.
2305 repository with many merges.
2291
2306
2292 ``revlog.persistent-nodemap.mmap``
2307 ``revlog.persistent-nodemap.mmap``
2293 Whether to use the Operating System "memory mapping" feature (when
2308 Whether to use the Operating System "memory mapping" feature (when
2294 possible) to access the persistent nodemap data. This improve performance
2309 possible) to access the persistent nodemap data. This improve performance
2295 and reduce memory pressure.
2310 and reduce memory pressure.
2296
2311
2297 Default to True.
2312 Default to True.
2298
2313
2299 For details on the "persistent-nodemap" feature, see:
2314 For details on the "persistent-nodemap" feature, see:
2300 :hg:`help config.format.use-persistent-nodemap`.
2315 :hg:`help config.format.use-persistent-nodemap`.
2301
2316
2302 ``revlog.persistent-nodemap.slow-path``
2317 ``revlog.persistent-nodemap.slow-path``
2303 Control the behavior of Merucrial when using a repository with "persistent"
2318 Control the behavior of Merucrial when using a repository with "persistent"
2304 nodemap with an installation of Mercurial without a fast implementation for
2319 nodemap with an installation of Mercurial without a fast implementation for
2305 the feature:
2320 the feature:
2306
2321
2307 ``allow``: Silently use the slower implementation to access the repository.
2322 ``allow``: Silently use the slower implementation to access the repository.
2308 ``warn``: Warn, but use the slower implementation to access the repository.
2323 ``warn``: Warn, but use the slower implementation to access the repository.
2309 ``abort``: Prevent access to such repositories. (This is the default)
2324 ``abort``: Prevent access to such repositories. (This is the default)
2310
2325
2311 For details on the "persistent-nodemap" feature, see:
2326 For details on the "persistent-nodemap" feature, see:
2312 :hg:`help config.format.use-persistent-nodemap`.
2327 :hg:`help config.format.use-persistent-nodemap`.
2313
2328
2314 ``revlog.reuse-external-delta-parent``
2329 ``revlog.reuse-external-delta-parent``
2315 Control the order in which delta parents are considered when adding new
2330 Control the order in which delta parents are considered when adding new
2316 revisions from an external source.
2331 revisions from an external source.
2317 (typically: apply bundle from `hg pull` or `hg push`).
2332 (typically: apply bundle from `hg pull` or `hg push`).
2318
2333
2319 New revisions are usually provided as a delta against other revisions. By
2334 New revisions are usually provided as a delta against other revisions. By
2320 default, Mercurial will try to reuse this delta first, therefore using the
2335 default, Mercurial will try to reuse this delta first, therefore using the
2321 same "delta parent" as the source. Directly using delta's from the source
2336 same "delta parent" as the source. Directly using delta's from the source
2322 reduces CPU usage and usually speeds up operation. However, in some case,
2337 reduces CPU usage and usually speeds up operation. However, in some case,
2323 the source might have sub-optimal delta bases and forcing their reevaluation
2338 the source might have sub-optimal delta bases and forcing their reevaluation
2324 is useful. For example, pushes from an old client could have sub-optimal
2339 is useful. For example, pushes from an old client could have sub-optimal
2325 delta's parent that the server want to optimize. (lack of general delta, bad
2340 delta's parent that the server want to optimize. (lack of general delta, bad
2326 parents, choice, lack of sparse-revlog, etc).
2341 parents, choice, lack of sparse-revlog, etc).
2327
2342
2328 This option is enabled by default. Turning it off will ensure bad delta
2343 This option is enabled by default. Turning it off will ensure bad delta
2329 parent choices from older client do not propagate to this repository, at
2344 parent choices from older client do not propagate to this repository, at
2330 the cost of a small increase in CPU consumption.
2345 the cost of a small increase in CPU consumption.
2331
2346
2332 Note: this option only control the order in which delta parents are
2347 Note: this option only control the order in which delta parents are
2333 considered. Even when disabled, the existing delta from the source will be
2348 considered. Even when disabled, the existing delta from the source will be
2334 reused if the same delta parent is selected.
2349 reused if the same delta parent is selected.
2335
2350
2336 ``revlog.reuse-external-delta``
2351 ``revlog.reuse-external-delta``
2337 Control the reuse of delta from external source.
2352 Control the reuse of delta from external source.
2338 (typically: apply bundle from `hg pull` or `hg push`).
2353 (typically: apply bundle from `hg pull` or `hg push`).
2339
2354
2340 New revisions are usually provided as a delta against another revision. By
2355 New revisions are usually provided as a delta against another revision. By
2341 default, Mercurial will not recompute the same delta again, trusting
2356 default, Mercurial will not recompute the same delta again, trusting
2342 externally provided deltas. There have been rare cases of small adjustment
2357 externally provided deltas. There have been rare cases of small adjustment
2343 to the diffing algorithm in the past. So in some rare case, recomputing
2358 to the diffing algorithm in the past. So in some rare case, recomputing
2344 delta provided by ancient clients can provides better results. Disabling
2359 delta provided by ancient clients can provides better results. Disabling
2345 this option means going through a full delta recomputation for all incoming
2360 this option means going through a full delta recomputation for all incoming
2346 revisions. It means a large increase in CPU usage and will slow operations
2361 revisions. It means a large increase in CPU usage and will slow operations
2347 down.
2362 down.
2348
2363
2349 This option is enabled by default. When disabled, it also disables the
2364 This option is enabled by default. When disabled, it also disables the
2350 related ``storage.revlog.reuse-external-delta-parent`` option.
2365 related ``storage.revlog.reuse-external-delta-parent`` option.
2351
2366
2352 ``revlog.zlib.level``
2367 ``revlog.zlib.level``
2353 Zlib compression level used when storing data into the repository. Accepted
2368 Zlib compression level used when storing data into the repository. Accepted
2354 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
2369 Value range from 1 (lowest compression) to 9 (highest compression). Zlib
2355 default value is 6.
2370 default value is 6.
2356
2371
2357
2372
2358 ``revlog.zstd.level``
2373 ``revlog.zstd.level``
2359 zstd compression level used when storing data into the repository. Accepted
2374 zstd compression level used when storing data into the repository. Accepted
2360 Value range from 1 (lowest compression) to 22 (highest compression).
2375 Value range from 1 (lowest compression) to 22 (highest compression).
2361 (default 3)
2376 (default 3)
2362
2377
2363 ``server``
2378 ``server``
2364 ----------
2379 ----------
2365
2380
2366 Controls generic server settings.
2381 Controls generic server settings.
2367
2382
2368 ``bookmarks-pushkey-compat``
2383 ``bookmarks-pushkey-compat``
2369 Trigger pushkey hook when being pushed bookmark updates. This config exist
2384 Trigger pushkey hook when being pushed bookmark updates. This config exist
2370 for compatibility purpose (default to True)
2385 for compatibility purpose (default to True)
2371
2386
2372 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
2387 If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
2373 movement we recommend you migrate them to ``txnclose-bookmark`` and
2388 movement we recommend you migrate them to ``txnclose-bookmark`` and
2374 ``pretxnclose-bookmark``.
2389 ``pretxnclose-bookmark``.
2375
2390
2376 ``compressionengines``
2391 ``compressionengines``
2377 List of compression engines and their relative priority to advertise
2392 List of compression engines and their relative priority to advertise
2378 to clients.
2393 to clients.
2379
2394
2380 The order of compression engines determines their priority, the first
2395 The order of compression engines determines their priority, the first
2381 having the highest priority. If a compression engine is not listed
2396 having the highest priority. If a compression engine is not listed
2382 here, it won't be advertised to clients.
2397 here, it won't be advertised to clients.
2383
2398
2384 If not set (the default), built-in defaults are used. Run
2399 If not set (the default), built-in defaults are used. Run
2385 :hg:`debuginstall` to list available compression engines and their
2400 :hg:`debuginstall` to list available compression engines and their
2386 default wire protocol priority.
2401 default wire protocol priority.
2387
2402
2388 Older Mercurial clients only support zlib compression and this setting
2403 Older Mercurial clients only support zlib compression and this setting
2389 has no effect for legacy clients.
2404 has no effect for legacy clients.
2390
2405
2391 ``uncompressed``
2406 ``uncompressed``
2392 Whether to allow clients to clone a repository using the
2407 Whether to allow clients to clone a repository using the
2393 uncompressed streaming protocol. This transfers about 40% more
2408 uncompressed streaming protocol. This transfers about 40% more
2394 data than a regular clone, but uses less memory and CPU on both
2409 data than a regular clone, but uses less memory and CPU on both
2395 server and client. Over a LAN (100 Mbps or better) or a very fast
2410 server and client. Over a LAN (100 Mbps or better) or a very fast
2396 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
2411 WAN, an uncompressed streaming clone is a lot faster (~10x) than a
2397 regular clone. Over most WAN connections (anything slower than
2412 regular clone. Over most WAN connections (anything slower than
2398 about 6 Mbps), uncompressed streaming is slower, because of the
2413 about 6 Mbps), uncompressed streaming is slower, because of the
2399 extra data transfer overhead. This mode will also temporarily hold
2414 extra data transfer overhead. This mode will also temporarily hold
2400 the write lock while determining what data to transfer.
2415 the write lock while determining what data to transfer.
2401 (default: True)
2416 (default: True)
2402
2417
2403 ``uncompressedallowsecret``
2418 ``uncompressedallowsecret``
2404 Whether to allow stream clones when the repository contains secret
2419 Whether to allow stream clones when the repository contains secret
2405 changesets. (default: False)
2420 changesets. (default: False)
2406
2421
2407 ``preferuncompressed``
2422 ``preferuncompressed``
2408 When set, clients will try to use the uncompressed streaming
2423 When set, clients will try to use the uncompressed streaming
2409 protocol. (default: False)
2424 protocol. (default: False)
2410
2425
2411 ``disablefullbundle``
2426 ``disablefullbundle``
2412 When set, servers will refuse attempts to do pull-based clones.
2427 When set, servers will refuse attempts to do pull-based clones.
2413 If this option is set, ``preferuncompressed`` and/or clone bundles
2428 If this option is set, ``preferuncompressed`` and/or clone bundles
2414 are highly recommended. Partial clones will still be allowed.
2429 are highly recommended. Partial clones will still be allowed.
2415 (default: False)
2430 (default: False)
2416
2431
2417 ``streamunbundle``
2432 ``streamunbundle``
2418 When set, servers will apply data sent from the client directly,
2433 When set, servers will apply data sent from the client directly,
2419 otherwise it will be written to a temporary file first. This option
2434 otherwise it will be written to a temporary file first. This option
2420 effectively prevents concurrent pushes.
2435 effectively prevents concurrent pushes.
2421
2436
2422 ``pullbundle``
2437 ``pullbundle``
2423 When set, the server will check pullbundles.manifest for bundles
2438 When set, the server will check pullbundles.manifest for bundles
2424 covering the requested heads and common nodes. The first matching
2439 covering the requested heads and common nodes. The first matching
2425 entry will be streamed to the client.
2440 entry will be streamed to the client.
2426
2441
2427 For HTTP transport, the stream will still use zlib compression
2442 For HTTP transport, the stream will still use zlib compression
2428 for older clients.
2443 for older clients.
2429
2444
2430 ``concurrent-push-mode``
2445 ``concurrent-push-mode``
2431 Level of allowed race condition between two pushing clients.
2446 Level of allowed race condition between two pushing clients.
2432
2447
2433 - 'strict': push is abort if another client touched the repository
2448 - 'strict': push is abort if another client touched the repository
2434 while the push was preparing.
2449 while the push was preparing.
2435 - 'check-related': push is only aborted if it affects head that got also
2450 - 'check-related': push is only aborted if it affects head that got also
2436 affected while the push was preparing. (default since 5.4)
2451 affected while the push was preparing. (default since 5.4)
2437
2452
2438 'check-related' only takes effect for compatible clients (version
2453 'check-related' only takes effect for compatible clients (version
2439 4.3 and later). Older clients will use 'strict'.
2454 4.3 and later). Older clients will use 'strict'.
2440
2455
2441 ``validate``
2456 ``validate``
2442 Whether to validate the completeness of pushed changesets by
2457 Whether to validate the completeness of pushed changesets by
2443 checking that all new file revisions specified in manifests are
2458 checking that all new file revisions specified in manifests are
2444 present. (default: False)
2459 present. (default: False)
2445
2460
2446 ``maxhttpheaderlen``
2461 ``maxhttpheaderlen``
2447 Instruct HTTP clients not to send request headers longer than this
2462 Instruct HTTP clients not to send request headers longer than this
2448 many bytes. (default: 1024)
2463 many bytes. (default: 1024)
2449
2464
2450 ``bundle1``
2465 ``bundle1``
2451 Whether to allow clients to push and pull using the legacy bundle1
2466 Whether to allow clients to push and pull using the legacy bundle1
2452 exchange format. (default: True)
2467 exchange format. (default: True)
2453
2468
2454 ``bundle1gd``
2469 ``bundle1gd``
2455 Like ``bundle1`` but only used if the repository is using the
2470 Like ``bundle1`` but only used if the repository is using the
2456 *generaldelta* storage format. (default: True)
2471 *generaldelta* storage format. (default: True)
2457
2472
2458 ``bundle1.push``
2473 ``bundle1.push``
2459 Whether to allow clients to push using the legacy bundle1 exchange
2474 Whether to allow clients to push using the legacy bundle1 exchange
2460 format. (default: True)
2475 format. (default: True)
2461
2476
2462 ``bundle1gd.push``
2477 ``bundle1gd.push``
2463 Like ``bundle1.push`` but only used if the repository is using the
2478 Like ``bundle1.push`` but only used if the repository is using the
2464 *generaldelta* storage format. (default: True)
2479 *generaldelta* storage format. (default: True)
2465
2480
2466 ``bundle1.pull``
2481 ``bundle1.pull``
2467 Whether to allow clients to pull using the legacy bundle1 exchange
2482 Whether to allow clients to pull using the legacy bundle1 exchange
2468 format. (default: True)
2483 format. (default: True)
2469
2484
2470 ``bundle1gd.pull``
2485 ``bundle1gd.pull``
2471 Like ``bundle1.pull`` but only used if the repository is using the
2486 Like ``bundle1.pull`` but only used if the repository is using the
2472 *generaldelta* storage format. (default: True)
2487 *generaldelta* storage format. (default: True)
2473
2488
2474 Large repositories using the *generaldelta* storage format should
2489 Large repositories using the *generaldelta* storage format should
2475 consider setting this option because converting *generaldelta*
2490 consider setting this option because converting *generaldelta*
2476 repositories to the exchange format required by the bundle1 data
2491 repositories to the exchange format required by the bundle1 data
2477 format can consume a lot of CPU.
2492 format can consume a lot of CPU.
2478
2493
2479 ``bundle2.stream``
2494 ``bundle2.stream``
2480 Whether to allow clients to pull using the bundle2 streaming protocol.
2495 Whether to allow clients to pull using the bundle2 streaming protocol.
2481 (default: True)
2496 (default: True)
2482
2497
2483 ``zliblevel``
2498 ``zliblevel``
2484 Integer between ``-1`` and ``9`` that controls the zlib compression level
2499 Integer between ``-1`` and ``9`` that controls the zlib compression level
2485 for wire protocol commands that send zlib compressed output (notably the
2500 for wire protocol commands that send zlib compressed output (notably the
2486 commands that send repository history data).
2501 commands that send repository history data).
2487
2502
2488 The default (``-1``) uses the default zlib compression level, which is
2503 The default (``-1``) uses the default zlib compression level, which is
2489 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2504 likely equivalent to ``6``. ``0`` means no compression. ``9`` means
2490 maximum compression.
2505 maximum compression.
2491
2506
2492 Setting this option allows server operators to make trade-offs between
2507 Setting this option allows server operators to make trade-offs between
2493 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2508 bandwidth and CPU used. Lowering the compression lowers CPU utilization
2494 but sends more bytes to clients.
2509 but sends more bytes to clients.
2495
2510
2496 This option only impacts the HTTP server.
2511 This option only impacts the HTTP server.
2497
2512
2498 ``zstdlevel``
2513 ``zstdlevel``
2499 Integer between ``1`` and ``22`` that controls the zstd compression level
2514 Integer between ``1`` and ``22`` that controls the zstd compression level
2500 for wire protocol commands. ``1`` is the minimal amount of compression and
2515 for wire protocol commands. ``1`` is the minimal amount of compression and
2501 ``22`` is the highest amount of compression.
2516 ``22`` is the highest amount of compression.
2502
2517
2503 The default (``3``) should be significantly faster than zlib while likely
2518 The default (``3``) should be significantly faster than zlib while likely
2504 delivering better compression ratios.
2519 delivering better compression ratios.
2505
2520
2506 This option only impacts the HTTP server.
2521 This option only impacts the HTTP server.
2507
2522
2508 See also ``server.zliblevel``.
2523 See also ``server.zliblevel``.
2509
2524
2510 ``view``
2525 ``view``
2511 Repository filter used when exchanging revisions with the peer.
2526 Repository filter used when exchanging revisions with the peer.
2512
2527
2513 The default view (``served``) excludes secret and hidden changesets.
2528 The default view (``served``) excludes secret and hidden changesets.
2514 Another useful value is ``immutable`` (no draft, secret or hidden
2529 Another useful value is ``immutable`` (no draft, secret or hidden
2515 changesets). (EXPERIMENTAL)
2530 changesets). (EXPERIMENTAL)
2516
2531
2517 ``smtp``
2532 ``smtp``
2518 --------
2533 --------
2519
2534
2520 Configuration for extensions that need to send email messages.
2535 Configuration for extensions that need to send email messages.
2521
2536
2522 ``host``
2537 ``host``
2523 Host name of mail server, e.g. "mail.example.com".
2538 Host name of mail server, e.g. "mail.example.com".
2524
2539
2525 ``port``
2540 ``port``
2526 Optional. Port to connect to on mail server. (default: 465 if
2541 Optional. Port to connect to on mail server. (default: 465 if
2527 ``tls`` is smtps; 25 otherwise)
2542 ``tls`` is smtps; 25 otherwise)
2528
2543
2529 ``tls``
2544 ``tls``
2530 Optional. Method to enable TLS when connecting to mail server: starttls,
2545 Optional. Method to enable TLS when connecting to mail server: starttls,
2531 smtps or none. (default: none)
2546 smtps or none. (default: none)
2532
2547
2533 ``username``
2548 ``username``
2534 Optional. User name for authenticating with the SMTP server.
2549 Optional. User name for authenticating with the SMTP server.
2535 (default: None)
2550 (default: None)
2536
2551
2537 ``password``
2552 ``password``
2538 Optional. Password for authenticating with the SMTP server. If not
2553 Optional. Password for authenticating with the SMTP server. If not
2539 specified, interactive sessions will prompt the user for a
2554 specified, interactive sessions will prompt the user for a
2540 password; non-interactive sessions will fail. (default: None)
2555 password; non-interactive sessions will fail. (default: None)
2541
2556
2542 ``local_hostname``
2557 ``local_hostname``
2543 Optional. The hostname that the sender can use to identify
2558 Optional. The hostname that the sender can use to identify
2544 itself to the MTA.
2559 itself to the MTA.
2545
2560
2546
2561
2547 ``subpaths``
2562 ``subpaths``
2548 ------------
2563 ------------
2549
2564
2550 Subrepository source URLs can go stale if a remote server changes name
2565 Subrepository source URLs can go stale if a remote server changes name
2551 or becomes temporarily unavailable. This section lets you define
2566 or becomes temporarily unavailable. This section lets you define
2552 rewrite rules of the form::
2567 rewrite rules of the form::
2553
2568
2554 <pattern> = <replacement>
2569 <pattern> = <replacement>
2555
2570
2556 where ``pattern`` is a regular expression matching a subrepository
2571 where ``pattern`` is a regular expression matching a subrepository
2557 source URL and ``replacement`` is the replacement string used to
2572 source URL and ``replacement`` is the replacement string used to
2558 rewrite it. Groups can be matched in ``pattern`` and referenced in
2573 rewrite it. Groups can be matched in ``pattern`` and referenced in
2559 ``replacements``. For instance::
2574 ``replacements``. For instance::
2560
2575
2561 http://server/(.*)-hg/ = http://hg.server/\1/
2576 http://server/(.*)-hg/ = http://hg.server/\1/
2562
2577
2563 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2578 rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
2564
2579
2565 Relative subrepository paths are first made absolute, and the
2580 Relative subrepository paths are first made absolute, and the
2566 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2581 rewrite rules are then applied on the full (absolute) path. If ``pattern``
2567 doesn't match the full path, an attempt is made to apply it on the
2582 doesn't match the full path, an attempt is made to apply it on the
2568 relative path alone. The rules are applied in definition order.
2583 relative path alone. The rules are applied in definition order.
2569
2584
2570 ``subrepos``
2585 ``subrepos``
2571 ------------
2586 ------------
2572
2587
2573 This section contains options that control the behavior of the
2588 This section contains options that control the behavior of the
2574 subrepositories feature. See also :hg:`help subrepos`.
2589 subrepositories feature. See also :hg:`help subrepos`.
2575
2590
2576 Security note: auditing in Mercurial is known to be insufficient to
2591 Security note: auditing in Mercurial is known to be insufficient to
2577 prevent clone-time code execution with carefully constructed Git
2592 prevent clone-time code execution with carefully constructed Git
2578 subrepos. It is unknown if a similar detect is present in Subversion
2593 subrepos. It is unknown if a similar detect is present in Subversion
2579 subrepos. Both Git and Subversion subrepos are disabled by default
2594 subrepos. Both Git and Subversion subrepos are disabled by default
2580 out of security concerns. These subrepo types can be enabled using
2595 out of security concerns. These subrepo types can be enabled using
2581 the respective options below.
2596 the respective options below.
2582
2597
2583 ``allowed``
2598 ``allowed``
2584 Whether subrepositories are allowed in the working directory.
2599 Whether subrepositories are allowed in the working directory.
2585
2600
2586 When false, commands involving subrepositories (like :hg:`update`)
2601 When false, commands involving subrepositories (like :hg:`update`)
2587 will fail for all subrepository types.
2602 will fail for all subrepository types.
2588 (default: true)
2603 (default: true)
2589
2604
2590 ``hg:allowed``
2605 ``hg:allowed``
2591 Whether Mercurial subrepositories are allowed in the working
2606 Whether Mercurial subrepositories are allowed in the working
2592 directory. This option only has an effect if ``subrepos.allowed``
2607 directory. This option only has an effect if ``subrepos.allowed``
2593 is true.
2608 is true.
2594 (default: true)
2609 (default: true)
2595
2610
2596 ``git:allowed``
2611 ``git:allowed``
2597 Whether Git subrepositories are allowed in the working directory.
2612 Whether Git subrepositories are allowed in the working directory.
2598 This option only has an effect if ``subrepos.allowed`` is true.
2613 This option only has an effect if ``subrepos.allowed`` is true.
2599
2614
2600 See the security note above before enabling Git subrepos.
2615 See the security note above before enabling Git subrepos.
2601 (default: false)
2616 (default: false)
2602
2617
2603 ``svn:allowed``
2618 ``svn:allowed``
2604 Whether Subversion subrepositories are allowed in the working
2619 Whether Subversion subrepositories are allowed in the working
2605 directory. This option only has an effect if ``subrepos.allowed``
2620 directory. This option only has an effect if ``subrepos.allowed``
2606 is true.
2621 is true.
2607
2622
2608 See the security note above before enabling Subversion subrepos.
2623 See the security note above before enabling Subversion subrepos.
2609 (default: false)
2624 (default: false)
2610
2625
2611 ``templatealias``
2626 ``templatealias``
2612 -----------------
2627 -----------------
2613
2628
2614 Alias definitions for templates. See :hg:`help templates` for details.
2629 Alias definitions for templates. See :hg:`help templates` for details.
2615
2630
2616 ``templates``
2631 ``templates``
2617 -------------
2632 -------------
2618
2633
2619 Use the ``[templates]`` section to define template strings.
2634 Use the ``[templates]`` section to define template strings.
2620 See :hg:`help templates` for details.
2635 See :hg:`help templates` for details.
2621
2636
2622 ``trusted``
2637 ``trusted``
2623 -----------
2638 -----------
2624
2639
2625 Mercurial will not use the settings in the
2640 Mercurial will not use the settings in the
2626 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2641 ``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
2627 user or to a trusted group, as various hgrc features allow arbitrary
2642 user or to a trusted group, as various hgrc features allow arbitrary
2628 commands to be run. This issue is often encountered when configuring
2643 commands to be run. This issue is often encountered when configuring
2629 hooks or extensions for shared repositories or servers. However,
2644 hooks or extensions for shared repositories or servers. However,
2630 the web interface will use some safe settings from the ``[web]``
2645 the web interface will use some safe settings from the ``[web]``
2631 section.
2646 section.
2632
2647
2633 This section specifies what users and groups are trusted. The
2648 This section specifies what users and groups are trusted. The
2634 current user is always trusted. To trust everybody, list a user or a
2649 current user is always trusted. To trust everybody, list a user or a
2635 group with name ``*``. These settings must be placed in an
2650 group with name ``*``. These settings must be placed in an
2636 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2651 *already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
2637 user or service running Mercurial.
2652 user or service running Mercurial.
2638
2653
2639 ``users``
2654 ``users``
2640 Comma-separated list of trusted users.
2655 Comma-separated list of trusted users.
2641
2656
2642 ``groups``
2657 ``groups``
2643 Comma-separated list of trusted groups.
2658 Comma-separated list of trusted groups.
2644
2659
2645
2660
2646 ``ui``
2661 ``ui``
2647 ------
2662 ------
2648
2663
2649 User interface controls.
2664 User interface controls.
2650
2665
2651 ``archivemeta``
2666 ``archivemeta``
2652 Whether to include the .hg_archival.txt file containing meta data
2667 Whether to include the .hg_archival.txt file containing meta data
2653 (hashes for the repository base and for tip) in archives created
2668 (hashes for the repository base and for tip) in archives created
2654 by the :hg:`archive` command or downloaded via hgweb.
2669 by the :hg:`archive` command or downloaded via hgweb.
2655 (default: True)
2670 (default: True)
2656
2671
2657 ``askusername``
2672 ``askusername``
2658 Whether to prompt for a username when committing. If True, and
2673 Whether to prompt for a username when committing. If True, and
2659 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2674 neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
2660 be prompted to enter a username. If no username is entered, the
2675 be prompted to enter a username. If no username is entered, the
2661 default ``USER@HOST`` is used instead.
2676 default ``USER@HOST`` is used instead.
2662 (default: False)
2677 (default: False)
2663
2678
2664 ``clonebundles``
2679 ``clonebundles``
2665 Whether the "clone bundles" feature is enabled.
2680 Whether the "clone bundles" feature is enabled.
2666
2681
2667 When enabled, :hg:`clone` may download and apply a server-advertised
2682 When enabled, :hg:`clone` may download and apply a server-advertised
2668 bundle file from a URL instead of using the normal exchange mechanism.
2683 bundle file from a URL instead of using the normal exchange mechanism.
2669
2684
2670 This can likely result in faster and more reliable clones.
2685 This can likely result in faster and more reliable clones.
2671
2686
2672 (default: True)
2687 (default: True)
2673
2688
2674 ``clonebundlefallback``
2689 ``clonebundlefallback``
2675 Whether failure to apply an advertised "clone bundle" from a server
2690 Whether failure to apply an advertised "clone bundle" from a server
2676 should result in fallback to a regular clone.
2691 should result in fallback to a regular clone.
2677
2692
2678 This is disabled by default because servers advertising "clone
2693 This is disabled by default because servers advertising "clone
2679 bundles" often do so to reduce server load. If advertised bundles
2694 bundles" often do so to reduce server load. If advertised bundles
2680 start mass failing and clients automatically fall back to a regular
2695 start mass failing and clients automatically fall back to a regular
2681 clone, this would add significant and unexpected load to the server
2696 clone, this would add significant and unexpected load to the server
2682 since the server is expecting clone operations to be offloaded to
2697 since the server is expecting clone operations to be offloaded to
2683 pre-generated bundles. Failing fast (the default behavior) ensures
2698 pre-generated bundles. Failing fast (the default behavior) ensures
2684 clients don't overwhelm the server when "clone bundle" application
2699 clients don't overwhelm the server when "clone bundle" application
2685 fails.
2700 fails.
2686
2701
2687 (default: False)
2702 (default: False)
2688
2703
2689 ``clonebundleprefers``
2704 ``clonebundleprefers``
2690 Defines preferences for which "clone bundles" to use.
2705 Defines preferences for which "clone bundles" to use.
2691
2706
2692 Servers advertising "clone bundles" may advertise multiple available
2707 Servers advertising "clone bundles" may advertise multiple available
2693 bundles. Each bundle may have different attributes, such as the bundle
2708 bundles. Each bundle may have different attributes, such as the bundle
2694 type and compression format. This option is used to prefer a particular
2709 type and compression format. This option is used to prefer a particular
2695 bundle over another.
2710 bundle over another.
2696
2711
2697 The following keys are defined by Mercurial:
2712 The following keys are defined by Mercurial:
2698
2713
2699 BUNDLESPEC
2714 BUNDLESPEC
2700 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2715 A bundle type specifier. These are strings passed to :hg:`bundle -t`.
2701 e.g. ``gzip-v2`` or ``bzip2-v1``.
2716 e.g. ``gzip-v2`` or ``bzip2-v1``.
2702
2717
2703 COMPRESSION
2718 COMPRESSION
2704 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2719 The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
2705
2720
2706 Server operators may define custom keys.
2721 Server operators may define custom keys.
2707
2722
2708 Example values: ``COMPRESSION=bzip2``,
2723 Example values: ``COMPRESSION=bzip2``,
2709 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2724 ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
2710
2725
2711 By default, the first bundle advertised by the server is used.
2726 By default, the first bundle advertised by the server is used.
2712
2727
2713 ``color``
2728 ``color``
2714 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2729 When to colorize output. Possible value are Boolean ("yes" or "no"), or
2715 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2730 "debug", or "always". (default: "yes"). "yes" will use color whenever it
2716 seems possible. See :hg:`help color` for details.
2731 seems possible. See :hg:`help color` for details.
2717
2732
2718 ``commitsubrepos``
2733 ``commitsubrepos``
2719 Whether to commit modified subrepositories when committing the
2734 Whether to commit modified subrepositories when committing the
2720 parent repository. If False and one subrepository has uncommitted
2735 parent repository. If False and one subrepository has uncommitted
2721 changes, abort the commit.
2736 changes, abort the commit.
2722 (default: False)
2737 (default: False)
2723
2738
2724 ``debug``
2739 ``debug``
2725 Print debugging information. (default: False)
2740 Print debugging information. (default: False)
2726
2741
2727 ``editor``
2742 ``editor``
2728 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2743 The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
2729
2744
2730 ``fallbackencoding``
2745 ``fallbackencoding``
2731 Encoding to try if it's not possible to decode the changelog using
2746 Encoding to try if it's not possible to decode the changelog using
2732 UTF-8. (default: ISO-8859-1)
2747 UTF-8. (default: ISO-8859-1)
2733
2748
2734 ``graphnodetemplate``
2749 ``graphnodetemplate``
2735 (DEPRECATED) Use ``command-templates.graphnode`` instead.
2750 (DEPRECATED) Use ``command-templates.graphnode`` instead.
2736
2751
2737 ``ignore``
2752 ``ignore``
2738 A file to read per-user ignore patterns from. This file should be
2753 A file to read per-user ignore patterns from. This file should be
2739 in the same format as a repository-wide .hgignore file. Filenames
2754 in the same format as a repository-wide .hgignore file. Filenames
2740 are relative to the repository root. This option supports hook syntax,
2755 are relative to the repository root. This option supports hook syntax,
2741 so if you want to specify multiple ignore files, you can do so by
2756 so if you want to specify multiple ignore files, you can do so by
2742 setting something like ``ignore.other = ~/.hgignore2``. For details
2757 setting something like ``ignore.other = ~/.hgignore2``. For details
2743 of the ignore file format, see the ``hgignore(5)`` man page.
2758 of the ignore file format, see the ``hgignore(5)`` man page.
2744
2759
2745 ``interactive``
2760 ``interactive``
2746 Allow to prompt the user. (default: True)
2761 Allow to prompt the user. (default: True)
2747
2762
2748 ``interface``
2763 ``interface``
2749 Select the default interface for interactive features (default: text).
2764 Select the default interface for interactive features (default: text).
2750 Possible values are 'text' and 'curses'.
2765 Possible values are 'text' and 'curses'.
2751
2766
2752 ``interface.chunkselector``
2767 ``interface.chunkselector``
2753 Select the interface for change recording (e.g. :hg:`commit -i`).
2768 Select the interface for change recording (e.g. :hg:`commit -i`).
2754 Possible values are 'text' and 'curses'.
2769 Possible values are 'text' and 'curses'.
2755 This config overrides the interface specified by ui.interface.
2770 This config overrides the interface specified by ui.interface.
2756
2771
2757 ``large-file-limit``
2772 ``large-file-limit``
2758 Largest file size that gives no memory use warning.
2773 Largest file size that gives no memory use warning.
2759 Possible values are integers or 0 to disable the check.
2774 Possible values are integers or 0 to disable the check.
2760 Value is expressed in bytes by default, one can use standard units for
2775 Value is expressed in bytes by default, one can use standard units for
2761 convenience (e.g. 10MB, 0.1GB, etc) (default: 10MB)
2776 convenience (e.g. 10MB, 0.1GB, etc) (default: 10MB)
2762
2777
2763 ``logtemplate``
2778 ``logtemplate``
2764 (DEPRECATED) Use ``command-templates.log`` instead.
2779 (DEPRECATED) Use ``command-templates.log`` instead.
2765
2780
2766 ``merge``
2781 ``merge``
2767 The conflict resolution program to use during a manual merge.
2782 The conflict resolution program to use during a manual merge.
2768 For more information on merge tools see :hg:`help merge-tools`.
2783 For more information on merge tools see :hg:`help merge-tools`.
2769 For configuring merge tools see the ``[merge-tools]`` section.
2784 For configuring merge tools see the ``[merge-tools]`` section.
2770
2785
2771 ``mergemarkers``
2786 ``mergemarkers``
2772 Sets the merge conflict marker label styling. The ``detailed`` style
2787 Sets the merge conflict marker label styling. The ``detailed`` style
2773 uses the ``command-templates.mergemarker`` setting to style the labels.
2788 uses the ``command-templates.mergemarker`` setting to style the labels.
2774 The ``basic`` style just uses 'local' and 'other' as the marker label.
2789 The ``basic`` style just uses 'local' and 'other' as the marker label.
2775 One of ``basic`` or ``detailed``.
2790 One of ``basic`` or ``detailed``.
2776 (default: ``basic``)
2791 (default: ``basic``)
2777
2792
2778 ``mergemarkertemplate``
2793 ``mergemarkertemplate``
2779 (DEPRECATED) Use ``command-templates.mergemarker`` instead.
2794 (DEPRECATED) Use ``command-templates.mergemarker`` instead.
2780
2795
2781 ``message-output``
2796 ``message-output``
2782 Where to write status and error messages. (default: ``stdio``)
2797 Where to write status and error messages. (default: ``stdio``)
2783
2798
2784 ``channel``
2799 ``channel``
2785 Use separate channel for structured output. (Command-server only)
2800 Use separate channel for structured output. (Command-server only)
2786 ``stderr``
2801 ``stderr``
2787 Everything to stderr.
2802 Everything to stderr.
2788 ``stdio``
2803 ``stdio``
2789 Status to stdout, and error to stderr.
2804 Status to stdout, and error to stderr.
2790
2805
2791 ``origbackuppath``
2806 ``origbackuppath``
2792 The path to a directory used to store generated .orig files. If the path is
2807 The path to a directory used to store generated .orig files. If the path is
2793 not a directory, one will be created. If set, files stored in this
2808 not a directory, one will be created. If set, files stored in this
2794 directory have the same name as the original file and do not have a .orig
2809 directory have the same name as the original file and do not have a .orig
2795 suffix.
2810 suffix.
2796
2811
2797 ``paginate``
2812 ``paginate``
2798 Control the pagination of command output (default: True). See :hg:`help pager`
2813 Control the pagination of command output (default: True). See :hg:`help pager`
2799 for details.
2814 for details.
2800
2815
2801 ``patch``
2816 ``patch``
2802 An optional external tool that ``hg import`` and some extensions
2817 An optional external tool that ``hg import`` and some extensions
2803 will use for applying patches. By default Mercurial uses an
2818 will use for applying patches. By default Mercurial uses an
2804 internal patch utility. The external tool must work as the common
2819 internal patch utility. The external tool must work as the common
2805 Unix ``patch`` program. In particular, it must accept a ``-p``
2820 Unix ``patch`` program. In particular, it must accept a ``-p``
2806 argument to strip patch headers, a ``-d`` argument to specify the
2821 argument to strip patch headers, a ``-d`` argument to specify the
2807 current directory, a file name to patch, and a patch file to take
2822 current directory, a file name to patch, and a patch file to take
2808 from stdin.
2823 from stdin.
2809
2824
2810 It is possible to specify a patch tool together with extra
2825 It is possible to specify a patch tool together with extra
2811 arguments. For example, setting this option to ``patch --merge``
2826 arguments. For example, setting this option to ``patch --merge``
2812 will use the ``patch`` program with its 2-way merge option.
2827 will use the ``patch`` program with its 2-way merge option.
2813
2828
2814 ``portablefilenames``
2829 ``portablefilenames``
2815 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2830 Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
2816 (default: ``warn``)
2831 (default: ``warn``)
2817
2832
2818 ``warn``
2833 ``warn``
2819 Print a warning message on POSIX platforms, if a file with a non-portable
2834 Print a warning message on POSIX platforms, if a file with a non-portable
2820 filename is added (e.g. a file with a name that can't be created on
2835 filename is added (e.g. a file with a name that can't be created on
2821 Windows because it contains reserved parts like ``AUX``, reserved
2836 Windows because it contains reserved parts like ``AUX``, reserved
2822 characters like ``:``, or would cause a case collision with an existing
2837 characters like ``:``, or would cause a case collision with an existing
2823 file).
2838 file).
2824
2839
2825 ``ignore``
2840 ``ignore``
2826 Don't print a warning.
2841 Don't print a warning.
2827
2842
2828 ``abort``
2843 ``abort``
2829 The command is aborted.
2844 The command is aborted.
2830
2845
2831 ``true``
2846 ``true``
2832 Alias for ``warn``.
2847 Alias for ``warn``.
2833
2848
2834 ``false``
2849 ``false``
2835 Alias for ``ignore``.
2850 Alias for ``ignore``.
2836
2851
2837 .. container:: windows
2852 .. container:: windows
2838
2853
2839 On Windows, this configuration option is ignored and the command aborted.
2854 On Windows, this configuration option is ignored and the command aborted.
2840
2855
2841 ``pre-merge-tool-output-template``
2856 ``pre-merge-tool-output-template``
2842 (DEPRECATED) Use ``command-template.pre-merge-tool-output`` instead.
2857 (DEPRECATED) Use ``command-template.pre-merge-tool-output`` instead.
2843
2858
2844 ``quiet``
2859 ``quiet``
2845 Reduce the amount of output printed.
2860 Reduce the amount of output printed.
2846 (default: False)
2861 (default: False)
2847
2862
2848 ``relative-paths``
2863 ``relative-paths``
2849 Prefer relative paths in the UI.
2864 Prefer relative paths in the UI.
2850
2865
2851 ``remotecmd``
2866 ``remotecmd``
2852 Remote command to use for clone/push/pull operations.
2867 Remote command to use for clone/push/pull operations.
2853 (default: ``hg``)
2868 (default: ``hg``)
2854
2869
2855 ``report_untrusted``
2870 ``report_untrusted``
2856 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2871 Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
2857 trusted user or group.
2872 trusted user or group.
2858 (default: True)
2873 (default: True)
2859
2874
2860 ``slash``
2875 ``slash``
2861 (Deprecated. Use ``slashpath`` template filter instead.)
2876 (Deprecated. Use ``slashpath`` template filter instead.)
2862
2877
2863 Display paths using a slash (``/``) as the path separator. This
2878 Display paths using a slash (``/``) as the path separator. This
2864 only makes a difference on systems where the default path
2879 only makes a difference on systems where the default path
2865 separator is not the slash character (e.g. Windows uses the
2880 separator is not the slash character (e.g. Windows uses the
2866 backslash character (``\``)).
2881 backslash character (``\``)).
2867 (default: False)
2882 (default: False)
2868
2883
2869 ``statuscopies``
2884 ``statuscopies``
2870 Display copies in the status command.
2885 Display copies in the status command.
2871
2886
2872 ``ssh``
2887 ``ssh``
2873 Command to use for SSH connections. (default: ``ssh``)
2888 Command to use for SSH connections. (default: ``ssh``)
2874
2889
2875 ``ssherrorhint``
2890 ``ssherrorhint``
2876 A hint shown to the user in the case of SSH error (e.g.
2891 A hint shown to the user in the case of SSH error (e.g.
2877 ``Please see http://company/internalwiki/ssh.html``)
2892 ``Please see http://company/internalwiki/ssh.html``)
2878
2893
2879 ``strict``
2894 ``strict``
2880 Require exact command names, instead of allowing unambiguous
2895 Require exact command names, instead of allowing unambiguous
2881 abbreviations. (default: False)
2896 abbreviations. (default: False)
2882
2897
2883 ``style``
2898 ``style``
2884 Name of style to use for command output.
2899 Name of style to use for command output.
2885
2900
2886 ``supportcontact``
2901 ``supportcontact``
2887 A URL where users should report a Mercurial traceback. Use this if you are a
2902 A URL where users should report a Mercurial traceback. Use this if you are a
2888 large organisation with its own Mercurial deployment process and crash
2903 large organisation with its own Mercurial deployment process and crash
2889 reports should be addressed to your internal support.
2904 reports should be addressed to your internal support.
2890
2905
2891 ``textwidth``
2906 ``textwidth``
2892 Maximum width of help text. A longer line generated by ``hg help`` or
2907 Maximum width of help text. A longer line generated by ``hg help`` or
2893 ``hg subcommand --help`` will be broken after white space to get this
2908 ``hg subcommand --help`` will be broken after white space to get this
2894 width or the terminal width, whichever comes first.
2909 width or the terminal width, whichever comes first.
2895 A non-positive value will disable this and the terminal width will be
2910 A non-positive value will disable this and the terminal width will be
2896 used. (default: 78)
2911 used. (default: 78)
2897
2912
2898 ``timeout``
2913 ``timeout``
2899 The timeout used when a lock is held (in seconds), a negative value
2914 The timeout used when a lock is held (in seconds), a negative value
2900 means no timeout. (default: 600)
2915 means no timeout. (default: 600)
2901
2916
2902 ``timeout.warn``
2917 ``timeout.warn``
2903 Time (in seconds) before a warning is printed about held lock. A negative
2918 Time (in seconds) before a warning is printed about held lock. A negative
2904 value means no warning. (default: 0)
2919 value means no warning. (default: 0)
2905
2920
2906 ``traceback``
2921 ``traceback``
2907 Mercurial always prints a traceback when an unknown exception
2922 Mercurial always prints a traceback when an unknown exception
2908 occurs. Setting this to True will make Mercurial print a traceback
2923 occurs. Setting this to True will make Mercurial print a traceback
2909 on all exceptions, even those recognized by Mercurial (such as
2924 on all exceptions, even those recognized by Mercurial (such as
2910 IOError or MemoryError). (default: False)
2925 IOError or MemoryError). (default: False)
2911
2926
2912 ``tweakdefaults``
2927 ``tweakdefaults``
2913
2928
2914 By default Mercurial's behavior changes very little from release
2929 By default Mercurial's behavior changes very little from release
2915 to release, but over time the recommended config settings
2930 to release, but over time the recommended config settings
2916 shift. Enable this config to opt in to get automatic tweaks to
2931 shift. Enable this config to opt in to get automatic tweaks to
2917 Mercurial's behavior over time. This config setting will have no
2932 Mercurial's behavior over time. This config setting will have no
2918 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2933 effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
2919 not include ``tweakdefaults``. (default: False)
2934 not include ``tweakdefaults``. (default: False)
2920
2935
2921 It currently means::
2936 It currently means::
2922
2937
2923 .. tweakdefaultsmarker
2938 .. tweakdefaultsmarker
2924
2939
2925 ``username``
2940 ``username``
2926 The committer of a changeset created when running "commit".
2941 The committer of a changeset created when running "commit".
2927 Typically a person's name and email address, e.g. ``Fred Widget
2942 Typically a person's name and email address, e.g. ``Fred Widget
2928 <fred@example.com>``. Environment variables in the
2943 <fred@example.com>``. Environment variables in the
2929 username are expanded.
2944 username are expanded.
2930
2945
2931 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2946 (default: ``$EMAIL`` or ``username@hostname``. If the username in
2932 hgrc is empty, e.g. if the system admin set ``username =`` in the
2947 hgrc is empty, e.g. if the system admin set ``username =`` in the
2933 system hgrc, it has to be specified manually or in a different
2948 system hgrc, it has to be specified manually or in a different
2934 hgrc file)
2949 hgrc file)
2935
2950
2936 ``verbose``
2951 ``verbose``
2937 Increase the amount of output printed. (default: False)
2952 Increase the amount of output printed. (default: False)
2938
2953
2939
2954
2940 ``command-templates``
2955 ``command-templates``
2941 ---------------------
2956 ---------------------
2942
2957
2943 Templates used for customizing the output of commands.
2958 Templates used for customizing the output of commands.
2944
2959
2945 ``graphnode``
2960 ``graphnode``
2946 The template used to print changeset nodes in an ASCII revision graph.
2961 The template used to print changeset nodes in an ASCII revision graph.
2947 (default: ``{graphnode}``)
2962 (default: ``{graphnode}``)
2948
2963
2949 ``log``
2964 ``log``
2950 Template string for commands that print changesets.
2965 Template string for commands that print changesets.
2951
2966
2952 ``mergemarker``
2967 ``mergemarker``
2953 The template used to print the commit description next to each conflict
2968 The template used to print the commit description next to each conflict
2954 marker during merge conflicts. See :hg:`help templates` for the template
2969 marker during merge conflicts. See :hg:`help templates` for the template
2955 format.
2970 format.
2956
2971
2957 Defaults to showing the hash, tags, branches, bookmarks, author, and
2972 Defaults to showing the hash, tags, branches, bookmarks, author, and
2958 the first line of the commit description.
2973 the first line of the commit description.
2959
2974
2960 If you use non-ASCII characters in names for tags, branches, bookmarks,
2975 If you use non-ASCII characters in names for tags, branches, bookmarks,
2961 authors, and/or commit descriptions, you must pay attention to encodings of
2976 authors, and/or commit descriptions, you must pay attention to encodings of
2962 managed files. At template expansion, non-ASCII characters use the encoding
2977 managed files. At template expansion, non-ASCII characters use the encoding
2963 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2978 specified by the ``--encoding`` global option, ``HGENCODING`` or other
2964 environment variables that govern your locale. If the encoding of the merge
2979 environment variables that govern your locale. If the encoding of the merge
2965 markers is different from the encoding of the merged files,
2980 markers is different from the encoding of the merged files,
2966 serious problems may occur.
2981 serious problems may occur.
2967
2982
2968 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2983 Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
2969
2984
2970 ``oneline-summary``
2985 ``oneline-summary``
2971 A template used by `hg rebase` and other commands for showing a one-line
2986 A template used by `hg rebase` and other commands for showing a one-line
2972 summary of a commit. If the template configured here is longer than one
2987 summary of a commit. If the template configured here is longer than one
2973 line, then only the first line is used.
2988 line, then only the first line is used.
2974
2989
2975 The template can be overridden per command by defining a template in
2990 The template can be overridden per command by defining a template in
2976 `oneline-summary.<command>`, where `<command>` can be e.g. "rebase".
2991 `oneline-summary.<command>`, where `<command>` can be e.g. "rebase".
2977
2992
2978 ``pre-merge-tool-output``
2993 ``pre-merge-tool-output``
2979 A template that is printed before executing an external merge tool. This can
2994 A template that is printed before executing an external merge tool. This can
2980 be used to print out additional context that might be useful to have during
2995 be used to print out additional context that might be useful to have during
2981 the conflict resolution, such as the description of the various commits
2996 the conflict resolution, such as the description of the various commits
2982 involved or bookmarks/tags.
2997 involved or bookmarks/tags.
2983
2998
2984 Additional information is available in the ``local`, ``base``, and ``other``
2999 Additional information is available in the ``local`, ``base``, and ``other``
2985 dicts. For example: ``{local.label}``, ``{base.name}``, or
3000 dicts. For example: ``{local.label}``, ``{base.name}``, or
2986 ``{other.islink}``.
3001 ``{other.islink}``.
2987
3002
2988
3003
2989 ``web``
3004 ``web``
2990 -------
3005 -------
2991
3006
2992 Web interface configuration. The settings in this section apply to
3007 Web interface configuration. The settings in this section apply to
2993 both the builtin webserver (started by :hg:`serve`) and the script you
3008 both the builtin webserver (started by :hg:`serve`) and the script you
2994 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
3009 run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
2995 and WSGI).
3010 and WSGI).
2996
3011
2997 The Mercurial webserver does no authentication (it does not prompt for
3012 The Mercurial webserver does no authentication (it does not prompt for
2998 usernames and passwords to validate *who* users are), but it does do
3013 usernames and passwords to validate *who* users are), but it does do
2999 authorization (it grants or denies access for *authenticated users*
3014 authorization (it grants or denies access for *authenticated users*
3000 based on settings in this section). You must either configure your
3015 based on settings in this section). You must either configure your
3001 webserver to do authentication for you, or disable the authorization
3016 webserver to do authentication for you, or disable the authorization
3002 checks.
3017 checks.
3003
3018
3004 For a quick setup in a trusted environment, e.g., a private LAN, where
3019 For a quick setup in a trusted environment, e.g., a private LAN, where
3005 you want it to accept pushes from anybody, you can use the following
3020 you want it to accept pushes from anybody, you can use the following
3006 command line::
3021 command line::
3007
3022
3008 $ hg --config web.allow-push=* --config web.push_ssl=False serve
3023 $ hg --config web.allow-push=* --config web.push_ssl=False serve
3009
3024
3010 Note that this will allow anybody to push anything to the server and
3025 Note that this will allow anybody to push anything to the server and
3011 that this should not be used for public servers.
3026 that this should not be used for public servers.
3012
3027
3013 The full set of options is:
3028 The full set of options is:
3014
3029
3015 ``accesslog``
3030 ``accesslog``
3016 Where to output the access log. (default: stdout)
3031 Where to output the access log. (default: stdout)
3017
3032
3018 ``address``
3033 ``address``
3019 Interface address to bind to. (default: all)
3034 Interface address to bind to. (default: all)
3020
3035
3021 ``allow-archive``
3036 ``allow-archive``
3022 List of archive format (bz2, gz, zip) allowed for downloading.
3037 List of archive format (bz2, gz, zip) allowed for downloading.
3023 (default: empty)
3038 (default: empty)
3024
3039
3025 ``allowbz2``
3040 ``allowbz2``
3026 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
3041 (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
3027 revisions.
3042 revisions.
3028 (default: False)
3043 (default: False)
3029
3044
3030 ``allowgz``
3045 ``allowgz``
3031 (DEPRECATED) Whether to allow .tar.gz downloading of repository
3046 (DEPRECATED) Whether to allow .tar.gz downloading of repository
3032 revisions.
3047 revisions.
3033 (default: False)
3048 (default: False)
3034
3049
3035 ``allow-pull``
3050 ``allow-pull``
3036 Whether to allow pulling from the repository. (default: True)
3051 Whether to allow pulling from the repository. (default: True)
3037
3052
3038 ``allow-push``
3053 ``allow-push``
3039 Whether to allow pushing to the repository. If empty or not set,
3054 Whether to allow pushing to the repository. If empty or not set,
3040 pushing is not allowed. If the special value ``*``, any remote
3055 pushing is not allowed. If the special value ``*``, any remote
3041 user can push, including unauthenticated users. Otherwise, the
3056 user can push, including unauthenticated users. Otherwise, the
3042 remote user must have been authenticated, and the authenticated
3057 remote user must have been authenticated, and the authenticated
3043 user name must be present in this list. The contents of the
3058 user name must be present in this list. The contents of the
3044 allow-push list are examined after the deny_push list.
3059 allow-push list are examined after the deny_push list.
3045
3060
3046 ``allow_read``
3061 ``allow_read``
3047 If the user has not already been denied repository access due to
3062 If the user has not already been denied repository access due to
3048 the contents of deny_read, this list determines whether to grant
3063 the contents of deny_read, this list determines whether to grant
3049 repository access to the user. If this list is not empty, and the
3064 repository access to the user. If this list is not empty, and the
3050 user is unauthenticated or not present in the list, then access is
3065 user is unauthenticated or not present in the list, then access is
3051 denied for the user. If the list is empty or not set, then access
3066 denied for the user. If the list is empty or not set, then access
3052 is permitted to all users by default. Setting allow_read to the
3067 is permitted to all users by default. Setting allow_read to the
3053 special value ``*`` is equivalent to it not being set (i.e. access
3068 special value ``*`` is equivalent to it not being set (i.e. access
3054 is permitted to all users). The contents of the allow_read list are
3069 is permitted to all users). The contents of the allow_read list are
3055 examined after the deny_read list.
3070 examined after the deny_read list.
3056
3071
3057 ``allowzip``
3072 ``allowzip``
3058 (DEPRECATED) Whether to allow .zip downloading of repository
3073 (DEPRECATED) Whether to allow .zip downloading of repository
3059 revisions. This feature creates temporary files.
3074 revisions. This feature creates temporary files.
3060 (default: False)
3075 (default: False)
3061
3076
3062 ``archivesubrepos``
3077 ``archivesubrepos``
3063 Whether to recurse into subrepositories when archiving.
3078 Whether to recurse into subrepositories when archiving.
3064 (default: False)
3079 (default: False)
3065
3080
3066 ``baseurl``
3081 ``baseurl``
3067 Base URL to use when publishing URLs in other locations, so
3082 Base URL to use when publishing URLs in other locations, so
3068 third-party tools like email notification hooks can construct
3083 third-party tools like email notification hooks can construct
3069 URLs. Example: ``http://hgserver/repos/``.
3084 URLs. Example: ``http://hgserver/repos/``.
3070
3085
3071 ``cacerts``
3086 ``cacerts``
3072 Path to file containing a list of PEM encoded certificate
3087 Path to file containing a list of PEM encoded certificate
3073 authority certificates. Environment variables and ``~user``
3088 authority certificates. Environment variables and ``~user``
3074 constructs are expanded in the filename. If specified on the
3089 constructs are expanded in the filename. If specified on the
3075 client, then it will verify the identity of remote HTTPS servers
3090 client, then it will verify the identity of remote HTTPS servers
3076 with these certificates.
3091 with these certificates.
3077
3092
3078 To disable SSL verification temporarily, specify ``--insecure`` from
3093 To disable SSL verification temporarily, specify ``--insecure`` from
3079 command line.
3094 command line.
3080
3095
3081 You can use OpenSSL's CA certificate file if your platform has
3096 You can use OpenSSL's CA certificate file if your platform has
3082 one. On most Linux systems this will be
3097 one. On most Linux systems this will be
3083 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
3098 ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
3084 generate this file manually. The form must be as follows::
3099 generate this file manually. The form must be as follows::
3085
3100
3086 -----BEGIN CERTIFICATE-----
3101 -----BEGIN CERTIFICATE-----
3087 ... (certificate in base64 PEM encoding) ...
3102 ... (certificate in base64 PEM encoding) ...
3088 -----END CERTIFICATE-----
3103 -----END CERTIFICATE-----
3089 -----BEGIN CERTIFICATE-----
3104 -----BEGIN CERTIFICATE-----
3090 ... (certificate in base64 PEM encoding) ...
3105 ... (certificate in base64 PEM encoding) ...
3091 -----END CERTIFICATE-----
3106 -----END CERTIFICATE-----
3092
3107
3093 ``cache``
3108 ``cache``
3094 Whether to support caching in hgweb. (default: True)
3109 Whether to support caching in hgweb. (default: True)
3095
3110
3096 ``certificate``
3111 ``certificate``
3097 Certificate to use when running :hg:`serve`.
3112 Certificate to use when running :hg:`serve`.
3098
3113
3099 ``collapse``
3114 ``collapse``
3100 With ``descend`` enabled, repositories in subdirectories are shown at
3115 With ``descend`` enabled, repositories in subdirectories are shown at
3101 a single level alongside repositories in the current path. With
3116 a single level alongside repositories in the current path. With
3102 ``collapse`` also enabled, repositories residing at a deeper level than
3117 ``collapse`` also enabled, repositories residing at a deeper level than
3103 the current path are grouped behind navigable directory entries that
3118 the current path are grouped behind navigable directory entries that
3104 lead to the locations of these repositories. In effect, this setting
3119 lead to the locations of these repositories. In effect, this setting
3105 collapses each collection of repositories found within a subdirectory
3120 collapses each collection of repositories found within a subdirectory
3106 into a single entry for that subdirectory. (default: False)
3121 into a single entry for that subdirectory. (default: False)
3107
3122
3108 ``comparisoncontext``
3123 ``comparisoncontext``
3109 Number of lines of context to show in side-by-side file comparison. If
3124 Number of lines of context to show in side-by-side file comparison. If
3110 negative or the value ``full``, whole files are shown. (default: 5)
3125 negative or the value ``full``, whole files are shown. (default: 5)
3111
3126
3112 This setting can be overridden by a ``context`` request parameter to the
3127 This setting can be overridden by a ``context`` request parameter to the
3113 ``comparison`` command, taking the same values.
3128 ``comparison`` command, taking the same values.
3114
3129
3115 ``contact``
3130 ``contact``
3116 Name or email address of the person in charge of the repository.
3131 Name or email address of the person in charge of the repository.
3117 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
3132 (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
3118
3133
3119 ``csp``
3134 ``csp``
3120 Send a ``Content-Security-Policy`` HTTP header with this value.
3135 Send a ``Content-Security-Policy`` HTTP header with this value.
3121
3136
3122 The value may contain a special string ``%nonce%``, which will be replaced
3137 The value may contain a special string ``%nonce%``, which will be replaced
3123 by a randomly-generated one-time use value. If the value contains
3138 by a randomly-generated one-time use value. If the value contains
3124 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
3139 ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
3125 one-time property of the nonce. This nonce will also be inserted into
3140 one-time property of the nonce. This nonce will also be inserted into
3126 ``<script>`` elements containing inline JavaScript.
3141 ``<script>`` elements containing inline JavaScript.
3127
3142
3128 Note: lots of HTML content sent by the server is derived from repository
3143 Note: lots of HTML content sent by the server is derived from repository
3129 data. Please consider the potential for malicious repository data to
3144 data. Please consider the potential for malicious repository data to
3130 "inject" itself into generated HTML content as part of your security
3145 "inject" itself into generated HTML content as part of your security
3131 threat model.
3146 threat model.
3132
3147
3133 ``deny_push``
3148 ``deny_push``
3134 Whether to deny pushing to the repository. If empty or not set,
3149 Whether to deny pushing to the repository. If empty or not set,
3135 push is not denied. If the special value ``*``, all remote users are
3150 push is not denied. If the special value ``*``, all remote users are
3136 denied push. Otherwise, unauthenticated users are all denied, and
3151 denied push. Otherwise, unauthenticated users are all denied, and
3137 any authenticated user name present in this list is also denied. The
3152 any authenticated user name present in this list is also denied. The
3138 contents of the deny_push list are examined before the allow-push list.
3153 contents of the deny_push list are examined before the allow-push list.
3139
3154
3140 ``deny_read``
3155 ``deny_read``
3141 Whether to deny reading/viewing of the repository. If this list is
3156 Whether to deny reading/viewing of the repository. If this list is
3142 not empty, unauthenticated users are all denied, and any
3157 not empty, unauthenticated users are all denied, and any
3143 authenticated user name present in this list is also denied access to
3158 authenticated user name present in this list is also denied access to
3144 the repository. If set to the special value ``*``, all remote users
3159 the repository. If set to the special value ``*``, all remote users
3145 are denied access (rarely needed ;). If deny_read is empty or not set,
3160 are denied access (rarely needed ;). If deny_read is empty or not set,
3146 the determination of repository access depends on the presence and
3161 the determination of repository access depends on the presence and
3147 content of the allow_read list (see description). If both
3162 content of the allow_read list (see description). If both
3148 deny_read and allow_read are empty or not set, then access is
3163 deny_read and allow_read are empty or not set, then access is
3149 permitted to all users by default. If the repository is being
3164 permitted to all users by default. If the repository is being
3150 served via hgwebdir, denied users will not be able to see it in
3165 served via hgwebdir, denied users will not be able to see it in
3151 the list of repositories. The contents of the deny_read list have
3166 the list of repositories. The contents of the deny_read list have
3152 priority over (are examined before) the contents of the allow_read
3167 priority over (are examined before) the contents of the allow_read
3153 list.
3168 list.
3154
3169
3155 ``descend``
3170 ``descend``
3156 hgwebdir indexes will not descend into subdirectories. Only repositories
3171 hgwebdir indexes will not descend into subdirectories. Only repositories
3157 directly in the current path will be shown (other repositories are still
3172 directly in the current path will be shown (other repositories are still
3158 available from the index corresponding to their containing path).
3173 available from the index corresponding to their containing path).
3159
3174
3160 ``description``
3175 ``description``
3161 Textual description of the repository's purpose or contents.
3176 Textual description of the repository's purpose or contents.
3162 (default: "unknown")
3177 (default: "unknown")
3163
3178
3164 ``encoding``
3179 ``encoding``
3165 Character encoding name. (default: the current locale charset)
3180 Character encoding name. (default: the current locale charset)
3166 Example: "UTF-8".
3181 Example: "UTF-8".
3167
3182
3168 ``errorlog``
3183 ``errorlog``
3169 Where to output the error log. (default: stderr)
3184 Where to output the error log. (default: stderr)
3170
3185
3171 ``guessmime``
3186 ``guessmime``
3172 Control MIME types for raw download of file content.
3187 Control MIME types for raw download of file content.
3173 Set to True to let hgweb guess the content type from the file
3188 Set to True to let hgweb guess the content type from the file
3174 extension. This will serve HTML files as ``text/html`` and might
3189 extension. This will serve HTML files as ``text/html`` and might
3175 allow cross-site scripting attacks when serving untrusted
3190 allow cross-site scripting attacks when serving untrusted
3176 repositories. (default: False)
3191 repositories. (default: False)
3177
3192
3178 ``hidden``
3193 ``hidden``
3179 Whether to hide the repository in the hgwebdir index.
3194 Whether to hide the repository in the hgwebdir index.
3180 (default: False)
3195 (default: False)
3181
3196
3182 ``ipv6``
3197 ``ipv6``
3183 Whether to use IPv6. (default: False)
3198 Whether to use IPv6. (default: False)
3184
3199
3185 ``labels``
3200 ``labels``
3186 List of string *labels* associated with the repository.
3201 List of string *labels* associated with the repository.
3187
3202
3188 Labels are exposed as a template keyword and can be used to customize
3203 Labels are exposed as a template keyword and can be used to customize
3189 output. e.g. the ``index`` template can group or filter repositories
3204 output. e.g. the ``index`` template can group or filter repositories
3190 by labels and the ``summary`` template can display additional content
3205 by labels and the ``summary`` template can display additional content
3191 if a specific label is present.
3206 if a specific label is present.
3192
3207
3193 ``logoimg``
3208 ``logoimg``
3194 File name of the logo image that some templates display on each page.
3209 File name of the logo image that some templates display on each page.
3195 The file name is relative to ``staticurl``. That is, the full path to
3210 The file name is relative to ``staticurl``. That is, the full path to
3196 the logo image is "staticurl/logoimg".
3211 the logo image is "staticurl/logoimg".
3197 If unset, ``hglogo.png`` will be used.
3212 If unset, ``hglogo.png`` will be used.
3198
3213
3199 ``logourl``
3214 ``logourl``
3200 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
3215 Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
3201 will be used.
3216 will be used.
3202
3217
3203 ``maxchanges``
3218 ``maxchanges``
3204 Maximum number of changes to list on the changelog. (default: 10)
3219 Maximum number of changes to list on the changelog. (default: 10)
3205
3220
3206 ``maxfiles``
3221 ``maxfiles``
3207 Maximum number of files to list per changeset. (default: 10)
3222 Maximum number of files to list per changeset. (default: 10)
3208
3223
3209 ``maxshortchanges``
3224 ``maxshortchanges``
3210 Maximum number of changes to list on the shortlog, graph or filelog
3225 Maximum number of changes to list on the shortlog, graph or filelog
3211 pages. (default: 60)
3226 pages. (default: 60)
3212
3227
3213 ``name``
3228 ``name``
3214 Repository name to use in the web interface.
3229 Repository name to use in the web interface.
3215 (default: current working directory)
3230 (default: current working directory)
3216
3231
3217 ``port``
3232 ``port``
3218 Port to listen on. (default: 8000)
3233 Port to listen on. (default: 8000)
3219
3234
3220 ``prefix``
3235 ``prefix``
3221 Prefix path to serve from. (default: '' (server root))
3236 Prefix path to serve from. (default: '' (server root))
3222
3237
3223 ``push_ssl``
3238 ``push_ssl``
3224 Whether to require that inbound pushes be transported over SSL to
3239 Whether to require that inbound pushes be transported over SSL to
3225 prevent password sniffing. (default: True)
3240 prevent password sniffing. (default: True)
3226
3241
3227 ``refreshinterval``
3242 ``refreshinterval``
3228 How frequently directory listings re-scan the filesystem for new
3243 How frequently directory listings re-scan the filesystem for new
3229 repositories, in seconds. This is relevant when wildcards are used
3244 repositories, in seconds. This is relevant when wildcards are used
3230 to define paths. Depending on how much filesystem traversal is
3245 to define paths. Depending on how much filesystem traversal is
3231 required, refreshing may negatively impact performance.
3246 required, refreshing may negatively impact performance.
3232
3247
3233 Values less than or equal to 0 always refresh.
3248 Values less than or equal to 0 always refresh.
3234 (default: 20)
3249 (default: 20)
3235
3250
3236 ``server-header``
3251 ``server-header``
3237 Value for HTTP ``Server`` response header.
3252 Value for HTTP ``Server`` response header.
3238
3253
3239 ``static``
3254 ``static``
3240 Directory where static files are served from.
3255 Directory where static files are served from.
3241
3256
3242 ``staticurl``
3257 ``staticurl``
3243 Base URL to use for static files. If unset, static files (e.g. the
3258 Base URL to use for static files. If unset, static files (e.g. the
3244 hgicon.png favicon) will be served by the CGI script itself. Use
3259 hgicon.png favicon) will be served by the CGI script itself. Use
3245 this setting to serve them directly with the HTTP server.
3260 this setting to serve them directly with the HTTP server.
3246 Example: ``http://hgserver/static/``.
3261 Example: ``http://hgserver/static/``.
3247
3262
3248 ``stripes``
3263 ``stripes``
3249 How many lines a "zebra stripe" should span in multi-line output.
3264 How many lines a "zebra stripe" should span in multi-line output.
3250 Set to 0 to disable. (default: 1)
3265 Set to 0 to disable. (default: 1)
3251
3266
3252 ``style``
3267 ``style``
3253 Which template map style to use. The available options are the names of
3268 Which template map style to use. The available options are the names of
3254 subdirectories in the HTML templates path. (default: ``paper``)
3269 subdirectories in the HTML templates path. (default: ``paper``)
3255 Example: ``monoblue``.
3270 Example: ``monoblue``.
3256
3271
3257 ``templates``
3272 ``templates``
3258 Where to find the HTML templates. The default path to the HTML templates
3273 Where to find the HTML templates. The default path to the HTML templates
3259 can be obtained from ``hg debuginstall``.
3274 can be obtained from ``hg debuginstall``.
3260
3275
3261 ``websub``
3276 ``websub``
3262 ----------
3277 ----------
3263
3278
3264 Web substitution filter definition. You can use this section to
3279 Web substitution filter definition. You can use this section to
3265 define a set of regular expression substitution patterns which
3280 define a set of regular expression substitution patterns which
3266 let you automatically modify the hgweb server output.
3281 let you automatically modify the hgweb server output.
3267
3282
3268 The default hgweb templates only apply these substitution patterns
3283 The default hgweb templates only apply these substitution patterns
3269 on the revision description fields. You can apply them anywhere
3284 on the revision description fields. You can apply them anywhere
3270 you want when you create your own templates by adding calls to the
3285 you want when you create your own templates by adding calls to the
3271 "websub" filter (usually after calling the "escape" filter).
3286 "websub" filter (usually after calling the "escape" filter).
3272
3287
3273 This can be used, for example, to convert issue references to links
3288 This can be used, for example, to convert issue references to links
3274 to your issue tracker, or to convert "markdown-like" syntax into
3289 to your issue tracker, or to convert "markdown-like" syntax into
3275 HTML (see the examples below).
3290 HTML (see the examples below).
3276
3291
3277 Each entry in this section names a substitution filter.
3292 Each entry in this section names a substitution filter.
3278 The value of each entry defines the substitution expression itself.
3293 The value of each entry defines the substitution expression itself.
3279 The websub expressions follow the old interhg extension syntax,
3294 The websub expressions follow the old interhg extension syntax,
3280 which in turn imitates the Unix sed replacement syntax::
3295 which in turn imitates the Unix sed replacement syntax::
3281
3296
3282 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
3297 patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
3283
3298
3284 You can use any separator other than "/". The final "i" is optional
3299 You can use any separator other than "/". The final "i" is optional
3285 and indicates that the search must be case insensitive.
3300 and indicates that the search must be case insensitive.
3286
3301
3287 Examples::
3302 Examples::
3288
3303
3289 [websub]
3304 [websub]
3290 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
3305 issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
3291 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
3306 italic = s/\b_(\S+)_\b/<i>\1<\/i>/
3292 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
3307 bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
3293
3308
3294 ``worker``
3309 ``worker``
3295 ----------
3310 ----------
3296
3311
3297 Parallel master/worker configuration. We currently perform working
3312 Parallel master/worker configuration. We currently perform working
3298 directory updates in parallel on Unix-like systems, which greatly
3313 directory updates in parallel on Unix-like systems, which greatly
3299 helps performance.
3314 helps performance.
3300
3315
3301 ``enabled``
3316 ``enabled``
3302 Whether to enable workers code to be used.
3317 Whether to enable workers code to be used.
3303 (default: true)
3318 (default: true)
3304
3319
3305 ``numcpus``
3320 ``numcpus``
3306 Number of CPUs to use for parallel operations. A zero or
3321 Number of CPUs to use for parallel operations. A zero or
3307 negative value is treated as ``use the default``.
3322 negative value is treated as ``use the default``.
3308 (default: 4 or the number of CPUs on the system, whichever is larger)
3323 (default: 4 or the number of CPUs on the system, whichever is larger)
3309
3324
3310 ``backgroundclose``
3325 ``backgroundclose``
3311 Whether to enable closing file handles on background threads during certain
3326 Whether to enable closing file handles on background threads during certain
3312 operations. Some platforms aren't very efficient at closing file
3327 operations. Some platforms aren't very efficient at closing file
3313 handles that have been written or appended to. By performing file closing
3328 handles that have been written or appended to. By performing file closing
3314 on background threads, file write rate can increase substantially.
3329 on background threads, file write rate can increase substantially.
3315 (default: true on Windows, false elsewhere)
3330 (default: true on Windows, false elsewhere)
3316
3331
3317 ``backgroundcloseminfilecount``
3332 ``backgroundcloseminfilecount``
3318 Minimum number of files required to trigger background file closing.
3333 Minimum number of files required to trigger background file closing.
3319 Operations not writing this many files won't start background close
3334 Operations not writing this many files won't start background close
3320 threads.
3335 threads.
3321 (default: 2048)
3336 (default: 2048)
3322
3337
3323 ``backgroundclosemaxqueue``
3338 ``backgroundclosemaxqueue``
3324 The maximum number of opened file handles waiting to be closed in the
3339 The maximum number of opened file handles waiting to be closed in the
3325 background. This option only has an effect if ``backgroundclose`` is
3340 background. This option only has an effect if ``backgroundclose`` is
3326 enabled.
3341 enabled.
3327 (default: 384)
3342 (default: 384)
3328
3343
3329 ``backgroundclosethreadcount``
3344 ``backgroundclosethreadcount``
3330 Number of threads to process background file closes. Only relevant if
3345 Number of threads to process background file closes. Only relevant if
3331 ``backgroundclose`` is enabled.
3346 ``backgroundclose`` is enabled.
3332 (default: 4)
3347 (default: 4)
@@ -1,3973 +1,3978 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
18 from typing import (
19 Optional,
19 Optional,
20 )
20 )
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 bin,
24 bin,
25 hex,
25 hex,
26 nullrev,
26 nullrev,
27 sha1nodeconstants,
27 sha1nodeconstants,
28 short,
28 short,
29 )
29 )
30 from .pycompat import (
30 from .pycompat import (
31 delattr,
31 delattr,
32 getattr,
32 getattr,
33 )
33 )
34 from . import (
34 from . import (
35 bookmarks,
35 bookmarks,
36 branchmap,
36 branchmap,
37 bundle2,
37 bundle2,
38 bundlecaches,
38 bundlecaches,
39 changegroup,
39 changegroup,
40 color,
40 color,
41 commit,
41 commit,
42 context,
42 context,
43 dirstate,
43 dirstate,
44 dirstateguard,
44 dirstateguard,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
104 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
105 # - '' for svfs relative paths
105 # - '' for svfs relative paths
106 _cachedfiles = set()
106 _cachedfiles = set()
107
107
108
108
109 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
110 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
111
111
112 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
113 if repo is None:
113 if repo is None:
114 return self
114 return self
115 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
117 try:
117 try:
118 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
119 except KeyError:
119 except KeyError:
120 pass
120 pass
121 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
122
122
123 def set(self, repo, value):
123 def set(self, repo, value):
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125
125
126
126
127 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
128 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137
137
138
138
139 class storecache(_basefilecache):
139 class storecache(_basefilecache):
140 """filecache for files in the store"""
140 """filecache for files in the store"""
141
141
142 def __init__(self, *paths):
142 def __init__(self, *paths):
143 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
144 for path in paths:
144 for path in paths:
145 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
146
146
147 def join(self, obj, fname):
147 def join(self, obj, fname):
148 return obj.sjoin(fname)
148 return obj.sjoin(fname)
149
149
150
150
151 class changelogcache(storecache):
151 class changelogcache(storecache):
152 """filecache for the changelog"""
152 """filecache for the changelog"""
153
153
154 def __init__(self):
154 def __init__(self):
155 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
156 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
158
158
159 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
160 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
163 return paths
163 return paths
164
164
165
165
166 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
167 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
168
168
169 def __init__(self):
169 def __init__(self):
170 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
171 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
173
173
174 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
175 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
178 return paths
178 return paths
179
179
180
180
181 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
182 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
183
183
184 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
185 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
186 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
189
189
190 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
191 fname, location = fnameandlocation
191 fname, location = fnameandlocation
192 if location == b'plain':
192 if location == b'plain':
193 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
194 else:
194 else:
195 if location != b'':
195 if location != b'':
196 raise error.ProgrammingError(
196 raise error.ProgrammingError(
197 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
198 )
198 )
199 return obj.sjoin(fname)
199 return obj.sjoin(fname)
200
200
201
201
202 def isfilecached(repo, name):
202 def isfilecached(repo, name):
203 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
204
204
205 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
206 """
206 """
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 if not cacheentry:
208 if not cacheentry:
209 return None, False
209 return None, False
210 return cacheentry.obj, True
210 return cacheentry.obj, True
211
211
212
212
213 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
214 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
215
215
216 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218 if unfi is repo:
218 if unfi is repo:
219 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
220 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
221
221
222
222
223 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
224 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
225
225
226 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
227 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
228
228
229
229
230 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
231 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
233
233
234
234
235 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
236 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
237
237
238 @functools.wraps(orig)
238 @functools.wraps(orig)
239 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
240 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
241
241
242 return wrapper
242 return wrapper
243
243
244
244
245 moderncaps = {
245 moderncaps = {
246 b'lookup',
246 b'lookup',
247 b'branchmap',
247 b'branchmap',
248 b'pushkey',
248 b'pushkey',
249 b'known',
249 b'known',
250 b'getbundle',
250 b'getbundle',
251 b'unbundle',
251 b'unbundle',
252 }
252 }
253 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 class localcommandexecutor:
257 class localcommandexecutor:
258 def __init__(self, peer):
258 def __init__(self, peer):
259 self._peer = peer
259 self._peer = peer
260 self._sent = False
260 self._sent = False
261 self._closed = False
261 self._closed = False
262
262
263 def __enter__(self):
263 def __enter__(self):
264 return self
264 return self
265
265
266 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
267 self.close()
267 self.close()
268
268
269 def callcommand(self, command, args):
269 def callcommand(self, command, args):
270 if self._sent:
270 if self._sent:
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
273 )
273 )
274
274
275 if self._closed:
275 if self._closed:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
278 )
278 )
279
279
280 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
281 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
282 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
283
283
284 f = futures.Future()
284 f = futures.Future()
285
285
286 try:
286 try:
287 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
288 except Exception:
288 except Exception:
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 else:
290 else:
291 f.set_result(result)
291 f.set_result(result)
292
292
293 return f
293 return f
294
294
295 def sendcommands(self):
295 def sendcommands(self):
296 self._sent = True
296 self._sent = True
297
297
298 def close(self):
298 def close(self):
299 self._closed = True
299 self._closed = True
300
300
301
301
302 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
303 class localpeer(repository.peer):
303 class localpeer(repository.peer):
304 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
305
305
306 def __init__(self, repo, caps=None):
306 def __init__(self, repo, caps=None):
307 super(localpeer, self).__init__()
307 super(localpeer, self).__init__()
308
308
309 if caps is None:
309 if caps is None:
310 caps = moderncaps.copy()
310 caps = moderncaps.copy()
311 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
312 self.ui = repo.ui
312 self.ui = repo.ui
313
313
314 if repo._wanted_sidedata:
314 if repo._wanted_sidedata:
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
317
317
318 self._caps = repo._restrictcapabilities(caps)
318 self._caps = repo._restrictcapabilities(caps)
319
319
320 # Begin of _basepeer interface.
320 # Begin of _basepeer interface.
321
321
322 def url(self):
322 def url(self):
323 return self._repo.url()
323 return self._repo.url()
324
324
325 def local(self):
325 def local(self):
326 return self._repo
326 return self._repo
327
327
328 def peer(self):
328 def peer(self):
329 return self
329 return self
330
330
331 def canpush(self):
331 def canpush(self):
332 return True
332 return True
333
333
334 def close(self):
334 def close(self):
335 self._repo.close()
335 self._repo.close()
336
336
337 # End of _basepeer interface.
337 # End of _basepeer interface.
338
338
339 # Begin of _basewirecommands interface.
339 # Begin of _basewirecommands interface.
340
340
341 def branchmap(self):
341 def branchmap(self):
342 return self._repo.branchmap()
342 return self._repo.branchmap()
343
343
344 def capabilities(self):
344 def capabilities(self):
345 return self._caps
345 return self._caps
346
346
347 def clonebundles(self):
347 def clonebundles(self):
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349
349
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 """Used to test argument passing over the wire"""
351 """Used to test argument passing over the wire"""
352 return b"%s %s %s %s %s" % (
352 return b"%s %s %s %s %s" % (
353 one,
353 one,
354 two,
354 two,
355 pycompat.bytestr(three),
355 pycompat.bytestr(three),
356 pycompat.bytestr(four),
356 pycompat.bytestr(four),
357 pycompat.bytestr(five),
357 pycompat.bytestr(five),
358 )
358 )
359
359
360 def getbundle(
360 def getbundle(
361 self,
361 self,
362 source,
362 source,
363 heads=None,
363 heads=None,
364 common=None,
364 common=None,
365 bundlecaps=None,
365 bundlecaps=None,
366 remote_sidedata=None,
366 remote_sidedata=None,
367 **kwargs
367 **kwargs
368 ):
368 ):
369 chunks = exchange.getbundlechunks(
369 chunks = exchange.getbundlechunks(
370 self._repo,
370 self._repo,
371 source,
371 source,
372 heads=heads,
372 heads=heads,
373 common=common,
373 common=common,
374 bundlecaps=bundlecaps,
374 bundlecaps=bundlecaps,
375 remote_sidedata=remote_sidedata,
375 remote_sidedata=remote_sidedata,
376 **kwargs
376 **kwargs
377 )[1]
377 )[1]
378 cb = util.chunkbuffer(chunks)
378 cb = util.chunkbuffer(chunks)
379
379
380 if exchange.bundle2requested(bundlecaps):
380 if exchange.bundle2requested(bundlecaps):
381 # When requesting a bundle2, getbundle returns a stream to make the
381 # When requesting a bundle2, getbundle returns a stream to make the
382 # wire level function happier. We need to build a proper object
382 # wire level function happier. We need to build a proper object
383 # from it in local peer.
383 # from it in local peer.
384 return bundle2.getunbundler(self.ui, cb)
384 return bundle2.getunbundler(self.ui, cb)
385 else:
385 else:
386 return changegroup.getunbundler(b'01', cb, None)
386 return changegroup.getunbundler(b'01', cb, None)
387
387
388 def heads(self):
388 def heads(self):
389 return self._repo.heads()
389 return self._repo.heads()
390
390
391 def known(self, nodes):
391 def known(self, nodes):
392 return self._repo.known(nodes)
392 return self._repo.known(nodes)
393
393
394 def listkeys(self, namespace):
394 def listkeys(self, namespace):
395 return self._repo.listkeys(namespace)
395 return self._repo.listkeys(namespace)
396
396
397 def lookup(self, key):
397 def lookup(self, key):
398 return self._repo.lookup(key)
398 return self._repo.lookup(key)
399
399
400 def pushkey(self, namespace, key, old, new):
400 def pushkey(self, namespace, key, old, new):
401 return self._repo.pushkey(namespace, key, old, new)
401 return self._repo.pushkey(namespace, key, old, new)
402
402
403 def stream_out(self):
403 def stream_out(self):
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405
405
406 def unbundle(self, bundle, heads, url):
406 def unbundle(self, bundle, heads, url):
407 """apply a bundle on a repo
407 """apply a bundle on a repo
408
408
409 This function handles the repo locking itself."""
409 This function handles the repo locking itself."""
410 try:
410 try:
411 try:
411 try:
412 bundle = exchange.readbundle(self.ui, bundle, None)
412 bundle = exchange.readbundle(self.ui, bundle, None)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 if util.safehasattr(ret, b'getchunks'):
414 if util.safehasattr(ret, b'getchunks'):
415 # This is a bundle20 object, turn it into an unbundler.
415 # This is a bundle20 object, turn it into an unbundler.
416 # This little dance should be dropped eventually when the
416 # This little dance should be dropped eventually when the
417 # API is finally improved.
417 # API is finally improved.
418 stream = util.chunkbuffer(ret.getchunks())
418 stream = util.chunkbuffer(ret.getchunks())
419 ret = bundle2.getunbundler(self.ui, stream)
419 ret = bundle2.getunbundler(self.ui, stream)
420 return ret
420 return ret
421 except Exception as exc:
421 except Exception as exc:
422 # If the exception contains output salvaged from a bundle2
422 # If the exception contains output salvaged from a bundle2
423 # reply, we need to make sure it is printed before continuing
423 # reply, we need to make sure it is printed before continuing
424 # to fail. So we build a bundle2 with such output and consume
424 # to fail. So we build a bundle2 with such output and consume
425 # it directly.
425 # it directly.
426 #
426 #
427 # This is not very elegant but allows a "simple" solution for
427 # This is not very elegant but allows a "simple" solution for
428 # issue4594
428 # issue4594
429 output = getattr(exc, '_bundle2salvagedoutput', ())
429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 if output:
430 if output:
431 bundler = bundle2.bundle20(self._repo.ui)
431 bundler = bundle2.bundle20(self._repo.ui)
432 for out in output:
432 for out in output:
433 bundler.addpart(out)
433 bundler.addpart(out)
434 stream = util.chunkbuffer(bundler.getchunks())
434 stream = util.chunkbuffer(bundler.getchunks())
435 b = bundle2.getunbundler(self.ui, stream)
435 b = bundle2.getunbundler(self.ui, stream)
436 bundle2.processbundle(self._repo, b)
436 bundle2.processbundle(self._repo, b)
437 raise
437 raise
438 except error.PushRaced as exc:
438 except error.PushRaced as exc:
439 raise error.ResponseError(
439 raise error.ResponseError(
440 _(b'push failed:'), stringutil.forcebytestr(exc)
440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 )
441 )
442
442
443 # End of _basewirecommands interface.
443 # End of _basewirecommands interface.
444
444
445 # Begin of peer interface.
445 # Begin of peer interface.
446
446
447 def commandexecutor(self):
447 def commandexecutor(self):
448 return localcommandexecutor(self)
448 return localcommandexecutor(self)
449
449
450 # End of peer interface.
450 # End of peer interface.
451
451
452
452
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 class locallegacypeer(localpeer):
454 class locallegacypeer(localpeer):
455 """peer extension which implements legacy methods too; used for tests with
455 """peer extension which implements legacy methods too; used for tests with
456 restricted capabilities"""
456 restricted capabilities"""
457
457
458 def __init__(self, repo):
458 def __init__(self, repo):
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
459 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
460
460
461 # Begin of baselegacywirecommands interface.
461 # Begin of baselegacywirecommands interface.
462
462
463 def between(self, pairs):
463 def between(self, pairs):
464 return self._repo.between(pairs)
464 return self._repo.between(pairs)
465
465
466 def branches(self, nodes):
466 def branches(self, nodes):
467 return self._repo.branches(nodes)
467 return self._repo.branches(nodes)
468
468
469 def changegroup(self, nodes, source):
469 def changegroup(self, nodes, source):
470 outgoing = discovery.outgoing(
470 outgoing = discovery.outgoing(
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 )
472 )
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474
474
475 def changegroupsubset(self, bases, heads, source):
475 def changegroupsubset(self, bases, heads, source):
476 outgoing = discovery.outgoing(
476 outgoing = discovery.outgoing(
477 self._repo, missingroots=bases, ancestorsof=heads
477 self._repo, missingroots=bases, ancestorsof=heads
478 )
478 )
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480
480
481 # End of baselegacywirecommands interface.
481 # End of baselegacywirecommands interface.
482
482
483
483
484 # Functions receiving (ui, features) that extensions can register to impact
484 # Functions receiving (ui, features) that extensions can register to impact
485 # the ability to load repositories with custom requirements. Only
485 # the ability to load repositories with custom requirements. Only
486 # functions defined in loaded extensions are called.
486 # functions defined in loaded extensions are called.
487 #
487 #
488 # The function receives a set of requirement strings that the repository
488 # The function receives a set of requirement strings that the repository
489 # is capable of opening. Functions will typically add elements to the
489 # is capable of opening. Functions will typically add elements to the
490 # set to reflect that the extension knows how to handle that requirements.
490 # set to reflect that the extension knows how to handle that requirements.
491 featuresetupfuncs = set()
491 featuresetupfuncs = set()
492
492
493
493
494 def _getsharedvfs(hgvfs, requirements):
494 def _getsharedvfs(hgvfs, requirements):
495 """returns the vfs object pointing to root of shared source
495 """returns the vfs object pointing to root of shared source
496 repo for a shared repository
496 repo for a shared repository
497
497
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
499 requirements is a set of requirements of current repo (shared one)
500 """
500 """
501 # The ``shared`` or ``relshared`` requirements indicate the
501 # The ``shared`` or ``relshared`` requirements indicate the
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 # This is an absolute path for ``shared`` and relative to
503 # This is an absolute path for ``shared`` and relative to
504 # ``.hg/`` for ``relshared``.
504 # ``.hg/`` for ``relshared``.
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508
508
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510
510
511 if not sharedvfs.exists():
511 if not sharedvfs.exists():
512 raise error.RepoError(
512 raise error.RepoError(
513 _(b'.hg/sharedpath points to nonexistent directory %s')
513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 % sharedvfs.base
514 % sharedvfs.base
515 )
515 )
516 return sharedvfs
516 return sharedvfs
517
517
518
518
519 def _readrequires(vfs, allowmissing):
519 def _readrequires(vfs, allowmissing):
520 """reads the require file present at root of this vfs
520 """reads the require file present at root of this vfs
521 and return a set of requirements
521 and return a set of requirements
522
522
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 # requires file contains a newline-delimited list of
524 # requires file contains a newline-delimited list of
525 # features/capabilities the opener (us) must have in order to use
525 # features/capabilities the opener (us) must have in order to use
526 # the repository. This file was introduced in Mercurial 0.9.2,
526 # the repository. This file was introduced in Mercurial 0.9.2,
527 # which means very old repositories may not have one. We assume
527 # which means very old repositories may not have one. We assume
528 # a missing file translates to no requirements.
528 # a missing file translates to no requirements.
529 read = vfs.tryread if allowmissing else vfs.read
529 read = vfs.tryread if allowmissing else vfs.read
530 return set(read(b'requires').splitlines())
530 return set(read(b'requires').splitlines())
531
531
532
532
533 def makelocalrepository(baseui, path: bytes, intents=None):
533 def makelocalrepository(baseui, path: bytes, intents=None):
534 """Create a local repository object.
534 """Create a local repository object.
535
535
536 Given arguments needed to construct a local repository, this function
536 Given arguments needed to construct a local repository, this function
537 performs various early repository loading functionality (such as
537 performs various early repository loading functionality (such as
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 the repository can be opened, derives a type suitable for representing
539 the repository can be opened, derives a type suitable for representing
540 that repository, and returns an instance of it.
540 that repository, and returns an instance of it.
541
541
542 The returned object conforms to the ``repository.completelocalrepository``
542 The returned object conforms to the ``repository.completelocalrepository``
543 interface.
543 interface.
544
544
545 The repository type is derived by calling a series of factory functions
545 The repository type is derived by calling a series of factory functions
546 for each aspect/interface of the final repository. These are defined by
546 for each aspect/interface of the final repository. These are defined by
547 ``REPO_INTERFACES``.
547 ``REPO_INTERFACES``.
548
548
549 Each factory function is called to produce a type implementing a specific
549 Each factory function is called to produce a type implementing a specific
550 interface. The cumulative list of returned types will be combined into a
550 interface. The cumulative list of returned types will be combined into a
551 new type and that type will be instantiated to represent the local
551 new type and that type will be instantiated to represent the local
552 repository.
552 repository.
553
553
554 The factory functions each receive various state that may be consulted
554 The factory functions each receive various state that may be consulted
555 as part of deriving a type.
555 as part of deriving a type.
556
556
557 Extensions should wrap these factory functions to customize repository type
557 Extensions should wrap these factory functions to customize repository type
558 creation. Note that an extension's wrapped function may be called even if
558 creation. Note that an extension's wrapped function may be called even if
559 that extension is not loaded for the repo being constructed. Extensions
559 that extension is not loaded for the repo being constructed. Extensions
560 should check if their ``__name__`` appears in the
560 should check if their ``__name__`` appears in the
561 ``extensionmodulenames`` set passed to the factory function and no-op if
561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 not.
562 not.
563 """
563 """
564 ui = baseui.copy()
564 ui = baseui.copy()
565 # Prevent copying repo configuration.
565 # Prevent copying repo configuration.
566 ui.copy = baseui.copy
566 ui.copy = baseui.copy
567
567
568 # Working directory VFS rooted at repository root.
568 # Working directory VFS rooted at repository root.
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570
570
571 # Main VFS for .hg/ directory.
571 # Main VFS for .hg/ directory.
572 hgpath = wdirvfs.join(b'.hg')
572 hgpath = wdirvfs.join(b'.hg')
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 # Whether this repository is shared one or not
574 # Whether this repository is shared one or not
575 shared = False
575 shared = False
576 # If this repository is shared, vfs pointing to shared repo
576 # If this repository is shared, vfs pointing to shared repo
577 sharedvfs = None
577 sharedvfs = None
578
578
579 # The .hg/ path should exist and should be a directory. All other
579 # The .hg/ path should exist and should be a directory. All other
580 # cases are errors.
580 # cases are errors.
581 if not hgvfs.isdir():
581 if not hgvfs.isdir():
582 try:
582 try:
583 hgvfs.stat()
583 hgvfs.stat()
584 except FileNotFoundError:
584 except FileNotFoundError:
585 pass
585 pass
586 except ValueError as e:
586 except ValueError as e:
587 # Can be raised on Python 3.8 when path is invalid.
587 # Can be raised on Python 3.8 when path is invalid.
588 raise error.Abort(
588 raise error.Abort(
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 )
590 )
591
591
592 raise error.RepoError(_(b'repository %s not found') % path)
592 raise error.RepoError(_(b'repository %s not found') % path)
593
593
594 requirements = _readrequires(hgvfs, True)
594 requirements = _readrequires(hgvfs, True)
595 shared = (
595 shared = (
596 requirementsmod.SHARED_REQUIREMENT in requirements
596 requirementsmod.SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 )
598 )
599 storevfs = None
599 storevfs = None
600 if shared:
600 if shared:
601 # This is a shared repo
601 # This is a shared repo
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 else:
604 else:
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606
606
607 # if .hg/requires contains the sharesafe requirement, it means
607 # if .hg/requires contains the sharesafe requirement, it means
608 # there exists a `.hg/store/requires` too and we should read it
608 # there exists a `.hg/store/requires` too and we should read it
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 # is not present, refer checkrequirementscompat() for that
611 # is not present, refer checkrequirementscompat() for that
612 #
612 #
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 # repository was shared the old way. We check the share source .hg/requires
614 # repository was shared the old way. We check the share source .hg/requires
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 # to be reshared
616 # to be reshared
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619
619
620 if (
620 if (
621 shared
621 shared
622 and requirementsmod.SHARESAFE_REQUIREMENT
622 and requirementsmod.SHARESAFE_REQUIREMENT
623 not in _readrequires(sharedvfs, True)
623 not in _readrequires(sharedvfs, True)
624 ):
624 ):
625 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
626 b'share', b'safe-mismatch.source-not-safe.warn'
626 b'share', b'safe-mismatch.source-not-safe.warn'
627 )
627 )
628 mismatch_config = ui.config(
628 mismatch_config = ui.config(
629 b'share', b'safe-mismatch.source-not-safe'
629 b'share', b'safe-mismatch.source-not-safe'
630 )
630 )
631 mismatch_verbose_upgrade = ui.configbool(
631 mismatch_verbose_upgrade = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
633 )
633 )
634 if mismatch_config in (
634 if mismatch_config in (
635 b'downgrade-allow',
635 b'downgrade-allow',
636 b'allow',
636 b'allow',
637 b'downgrade-abort',
637 b'downgrade-abort',
638 ):
638 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
640 from . import upgrade
641
641
642 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
643 ui,
643 ui,
644 hgvfs,
644 hgvfs,
645 sharedvfs,
645 sharedvfs,
646 requirements,
646 requirements,
647 mismatch_config,
647 mismatch_config,
648 mismatch_warn,
648 mismatch_warn,
649 mismatch_verbose_upgrade,
649 mismatch_verbose_upgrade,
650 )
650 )
651 elif mismatch_config == b'abort':
651 elif mismatch_config == b'abort':
652 raise error.Abort(
652 raise error.Abort(
653 _(b"share source does not support share-safe requirement"),
653 _(b"share source does not support share-safe requirement"),
654 hint=hint,
654 hint=hint,
655 )
655 )
656 else:
656 else:
657 raise error.Abort(
657 raise error.Abort(
658 _(
658 _(
659 b"share-safe mismatch with source.\nUnrecognized"
659 b"share-safe mismatch with source.\nUnrecognized"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" value '%s' of `share.safe-mismatch.source-not-safe`"
661 b" set."
661 b" set."
662 )
662 )
663 % mismatch_config,
663 % mismatch_config,
664 hint=hint,
664 hint=hint,
665 )
665 )
666 else:
666 else:
667 requirements |= _readrequires(storevfs, False)
667 requirements |= _readrequires(storevfs, False)
668 elif shared:
668 elif shared:
669 sourcerequires = _readrequires(sharedvfs, False)
669 sourcerequires = _readrequires(sharedvfs, False)
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
672 mismatch_warn = ui.configbool(
672 mismatch_warn = ui.configbool(
673 b'share', b'safe-mismatch.source-safe.warn'
673 b'share', b'safe-mismatch.source-safe.warn'
674 )
674 )
675 mismatch_verbose_upgrade = ui.configbool(
675 mismatch_verbose_upgrade = ui.configbool(
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
677 )
677 )
678 if mismatch_config in (
678 if mismatch_config in (
679 b'upgrade-allow',
679 b'upgrade-allow',
680 b'allow',
680 b'allow',
681 b'upgrade-abort',
681 b'upgrade-abort',
682 ):
682 ):
683 # prevent cyclic import localrepo -> upgrade -> localrepo
683 # prevent cyclic import localrepo -> upgrade -> localrepo
684 from . import upgrade
684 from . import upgrade
685
685
686 upgrade.upgrade_share_to_safe(
686 upgrade.upgrade_share_to_safe(
687 ui,
687 ui,
688 hgvfs,
688 hgvfs,
689 storevfs,
689 storevfs,
690 requirements,
690 requirements,
691 mismatch_config,
691 mismatch_config,
692 mismatch_warn,
692 mismatch_warn,
693 mismatch_verbose_upgrade,
693 mismatch_verbose_upgrade,
694 )
694 )
695 elif mismatch_config == b'abort':
695 elif mismatch_config == b'abort':
696 raise error.Abort(
696 raise error.Abort(
697 _(
697 _(
698 b'version mismatch: source uses share-safe'
698 b'version mismatch: source uses share-safe'
699 b' functionality while the current share does not'
699 b' functionality while the current share does not'
700 ),
700 ),
701 hint=hint,
701 hint=hint,
702 )
702 )
703 else:
703 else:
704 raise error.Abort(
704 raise error.Abort(
705 _(
705 _(
706 b"share-safe mismatch with source.\nUnrecognized"
706 b"share-safe mismatch with source.\nUnrecognized"
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 b" value '%s' of `share.safe-mismatch.source-safe` set."
708 )
708 )
709 % mismatch_config,
709 % mismatch_config,
710 hint=hint,
710 hint=hint,
711 )
711 )
712
712
713 # The .hg/hgrc file may load extensions or contain config options
713 # The .hg/hgrc file may load extensions or contain config options
714 # that influence repository construction. Attempt to load it and
714 # that influence repository construction. Attempt to load it and
715 # process any new extensions that it may have pulled in.
715 # process any new extensions that it may have pulled in.
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
718 extensions.loadall(ui)
718 extensions.loadall(ui)
719 extensions.populateui(ui)
719 extensions.populateui(ui)
720
720
721 # Set of module names of extensions loaded for this repository.
721 # Set of module names of extensions loaded for this repository.
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
723
723
724 supportedrequirements = gathersupportedrequirements(ui)
724 supportedrequirements = gathersupportedrequirements(ui)
725
725
726 # We first validate the requirements are known.
726 # We first validate the requirements are known.
727 ensurerequirementsrecognized(requirements, supportedrequirements)
727 ensurerequirementsrecognized(requirements, supportedrequirements)
728
728
729 # Then we validate that the known set is reasonable to use together.
729 # Then we validate that the known set is reasonable to use together.
730 ensurerequirementscompatible(ui, requirements)
730 ensurerequirementscompatible(ui, requirements)
731
731
732 # TODO there are unhandled edge cases related to opening repositories with
732 # TODO there are unhandled edge cases related to opening repositories with
733 # shared storage. If storage is shared, we should also test for requirements
733 # shared storage. If storage is shared, we should also test for requirements
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
735 # that repo, as that repo may load extensions needed to open it. This is a
735 # that repo, as that repo may load extensions needed to open it. This is a
736 # bit complicated because we don't want the other hgrc to overwrite settings
736 # bit complicated because we don't want the other hgrc to overwrite settings
737 # in this hgrc.
737 # in this hgrc.
738 #
738 #
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
740 # file when sharing repos. But if a requirement is added after the share is
740 # file when sharing repos. But if a requirement is added after the share is
741 # performed, thereby introducing a new requirement for the opener, we may
741 # performed, thereby introducing a new requirement for the opener, we may
742 # will not see that and could encounter a run-time error interacting with
742 # will not see that and could encounter a run-time error interacting with
743 # that shared store since it has an unknown-to-us requirement.
743 # that shared store since it has an unknown-to-us requirement.
744
744
745 # At this point, we know we should be capable of opening the repository.
745 # At this point, we know we should be capable of opening the repository.
746 # Now get on with doing that.
746 # Now get on with doing that.
747
747
748 features = set()
748 features = set()
749
749
750 # The "store" part of the repository holds versioned data. How it is
750 # The "store" part of the repository holds versioned data. How it is
751 # accessed is determined by various requirements. If `shared` or
751 # accessed is determined by various requirements. If `shared` or
752 # `relshared` requirements are present, this indicates current repository
752 # `relshared` requirements are present, this indicates current repository
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 # is a share and store exists in path mentioned in `.hg/sharedpath`
754 if shared:
754 if shared:
755 storebasepath = sharedvfs.base
755 storebasepath = sharedvfs.base
756 cachepath = sharedvfs.join(b'cache')
756 cachepath = sharedvfs.join(b'cache')
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
758 else:
758 else:
759 storebasepath = hgvfs.base
759 storebasepath = hgvfs.base
760 cachepath = hgvfs.join(b'cache')
760 cachepath = hgvfs.join(b'cache')
761 wcachepath = hgvfs.join(b'wcache')
761 wcachepath = hgvfs.join(b'wcache')
762
762
763 # The store has changed over time and the exact layout is dictated by
763 # The store has changed over time and the exact layout is dictated by
764 # requirements. The store interface abstracts differences across all
764 # requirements. The store interface abstracts differences across all
765 # of them.
765 # of them.
766 store = makestore(
766 store = makestore(
767 requirements,
767 requirements,
768 storebasepath,
768 storebasepath,
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 lambda base: vfsmod.vfs(base, cacheaudited=True),
770 )
770 )
771 hgvfs.createmode = store.createmode
771 hgvfs.createmode = store.createmode
772
772
773 storevfs = store.vfs
773 storevfs = store.vfs
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
775
775
776 if (
776 if (
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 requirementsmod.REVLOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
779 ):
779 ):
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 features.add(repository.REPO_FEATURE_SIDE_DATA)
781 # the revlogv2 docket introduced race condition that we need to fix
781 # the revlogv2 docket introduced race condition that we need to fix
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
783
783
784 # The cache vfs is used to manage cache files.
784 # The cache vfs is used to manage cache files.
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
786 cachevfs.createmode = store.createmode
786 cachevfs.createmode = store.createmode
787 # The cache vfs is used to manage cache files related to the working copy
787 # The cache vfs is used to manage cache files related to the working copy
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
789 wcachevfs.createmode = store.createmode
789 wcachevfs.createmode = store.createmode
790
790
791 # Now resolve the type for the repository object. We do this by repeatedly
791 # Now resolve the type for the repository object. We do this by repeatedly
792 # calling a factory function to produces types for specific aspects of the
792 # calling a factory function to produces types for specific aspects of the
793 # repo's operation. The aggregate returned types are used as base classes
793 # repo's operation. The aggregate returned types are used as base classes
794 # for a dynamically-derived type, which will represent our new repository.
794 # for a dynamically-derived type, which will represent our new repository.
795
795
796 bases = []
796 bases = []
797 extrastate = {}
797 extrastate = {}
798
798
799 for iface, fn in REPO_INTERFACES:
799 for iface, fn in REPO_INTERFACES:
800 # We pass all potentially useful state to give extensions tons of
800 # We pass all potentially useful state to give extensions tons of
801 # flexibility.
801 # flexibility.
802 typ = fn()(
802 typ = fn()(
803 ui=ui,
803 ui=ui,
804 intents=intents,
804 intents=intents,
805 requirements=requirements,
805 requirements=requirements,
806 features=features,
806 features=features,
807 wdirvfs=wdirvfs,
807 wdirvfs=wdirvfs,
808 hgvfs=hgvfs,
808 hgvfs=hgvfs,
809 store=store,
809 store=store,
810 storevfs=storevfs,
810 storevfs=storevfs,
811 storeoptions=storevfs.options,
811 storeoptions=storevfs.options,
812 cachevfs=cachevfs,
812 cachevfs=cachevfs,
813 wcachevfs=wcachevfs,
813 wcachevfs=wcachevfs,
814 extensionmodulenames=extensionmodulenames,
814 extensionmodulenames=extensionmodulenames,
815 extrastate=extrastate,
815 extrastate=extrastate,
816 baseclasses=bases,
816 baseclasses=bases,
817 )
817 )
818
818
819 if not isinstance(typ, type):
819 if not isinstance(typ, type):
820 raise error.ProgrammingError(
820 raise error.ProgrammingError(
821 b'unable to construct type for %s' % iface
821 b'unable to construct type for %s' % iface
822 )
822 )
823
823
824 bases.append(typ)
824 bases.append(typ)
825
825
826 # type() allows you to use characters in type names that wouldn't be
826 # type() allows you to use characters in type names that wouldn't be
827 # recognized as Python symbols in source code. We abuse that to add
827 # recognized as Python symbols in source code. We abuse that to add
828 # rich information about our constructed repo.
828 # rich information about our constructed repo.
829 name = pycompat.sysstr(
829 name = pycompat.sysstr(
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
831 )
831 )
832
832
833 cls = type(name, tuple(bases), {})
833 cls = type(name, tuple(bases), {})
834
834
835 return cls(
835 return cls(
836 baseui=baseui,
836 baseui=baseui,
837 ui=ui,
837 ui=ui,
838 origroot=path,
838 origroot=path,
839 wdirvfs=wdirvfs,
839 wdirvfs=wdirvfs,
840 hgvfs=hgvfs,
840 hgvfs=hgvfs,
841 requirements=requirements,
841 requirements=requirements,
842 supportedrequirements=supportedrequirements,
842 supportedrequirements=supportedrequirements,
843 sharedpath=storebasepath,
843 sharedpath=storebasepath,
844 store=store,
844 store=store,
845 cachevfs=cachevfs,
845 cachevfs=cachevfs,
846 wcachevfs=wcachevfs,
846 wcachevfs=wcachevfs,
847 features=features,
847 features=features,
848 intents=intents,
848 intents=intents,
849 )
849 )
850
850
851
851
852 def loadhgrc(
852 def loadhgrc(
853 ui,
853 ui,
854 wdirvfs: vfsmod.vfs,
854 wdirvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
855 hgvfs: vfsmod.vfs,
856 requirements,
856 requirements,
857 sharedvfs: Optional[vfsmod.vfs] = None,
857 sharedvfs: Optional[vfsmod.vfs] = None,
858 ):
858 ):
859 """Load hgrc files/content into a ui instance.
859 """Load hgrc files/content into a ui instance.
860
860
861 This is called during repository opening to load any additional
861 This is called during repository opening to load any additional
862 config files or settings relevant to the current repository.
862 config files or settings relevant to the current repository.
863
863
864 Returns a bool indicating whether any additional configs were loaded.
864 Returns a bool indicating whether any additional configs were loaded.
865
865
866 Extensions should monkeypatch this function to modify how per-repo
866 Extensions should monkeypatch this function to modify how per-repo
867 configs are loaded. For example, an extension may wish to pull in
867 configs are loaded. For example, an extension may wish to pull in
868 configs from alternate files or sources.
868 configs from alternate files or sources.
869
869
870 sharedvfs is vfs object pointing to source repo if the current one is a
870 sharedvfs is vfs object pointing to source repo if the current one is a
871 shared one
871 shared one
872 """
872 """
873 if not rcutil.use_repo_hgrc():
873 if not rcutil.use_repo_hgrc():
874 return False
874 return False
875
875
876 ret = False
876 ret = False
877 # first load config from shared source if we has to
877 # first load config from shared source if we has to
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
879 try:
879 try:
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
881 ret = True
881 ret = True
882 except IOError:
882 except IOError:
883 pass
883 pass
884
884
885 try:
885 try:
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
887 ret = True
887 ret = True
888 except IOError:
888 except IOError:
889 pass
889 pass
890
890
891 try:
891 try:
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
893 ret = True
893 ret = True
894 except IOError:
894 except IOError:
895 pass
895 pass
896
896
897 return ret
897 return ret
898
898
899
899
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
901 """Perform additional actions after .hg/hgrc is loaded.
901 """Perform additional actions after .hg/hgrc is loaded.
902
902
903 This function is called during repository loading immediately after
903 This function is called during repository loading immediately after
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
905
905
906 The function can be used to validate configs, automatically add
906 The function can be used to validate configs, automatically add
907 options (including extensions) based on requirements, etc.
907 options (including extensions) based on requirements, etc.
908 """
908 """
909
909
910 # Map of requirements to list of extensions to load automatically when
910 # Map of requirements to list of extensions to load automatically when
911 # requirement is present.
911 # requirement is present.
912 autoextensions = {
912 autoextensions = {
913 b'git': [b'git'],
913 b'git': [b'git'],
914 b'largefiles': [b'largefiles'],
914 b'largefiles': [b'largefiles'],
915 b'lfs': [b'lfs'],
915 b'lfs': [b'lfs'],
916 }
916 }
917
917
918 for requirement, names in sorted(autoextensions.items()):
918 for requirement, names in sorted(autoextensions.items()):
919 if requirement not in requirements:
919 if requirement not in requirements:
920 continue
920 continue
921
921
922 for name in names:
922 for name in names:
923 if not ui.hasconfig(b'extensions', name):
923 if not ui.hasconfig(b'extensions', name):
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924 ui.setconfig(b'extensions', name, b'', source=b'autoload')
925
925
926
926
927 def gathersupportedrequirements(ui):
927 def gathersupportedrequirements(ui):
928 """Determine the complete set of recognized requirements."""
928 """Determine the complete set of recognized requirements."""
929 # Start with all requirements supported by this file.
929 # Start with all requirements supported by this file.
930 supported = set(localrepository._basesupported)
930 supported = set(localrepository._basesupported)
931
931
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 # Execute ``featuresetupfuncs`` entries if they belong to an extension
933 # relevant to this ui instance.
933 # relevant to this ui instance.
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934 modules = {m.__name__ for n, m in extensions.extensions(ui)}
935
935
936 for fn in featuresetupfuncs:
936 for fn in featuresetupfuncs:
937 if fn.__module__ in modules:
937 if fn.__module__ in modules:
938 fn(ui, supported)
938 fn(ui, supported)
939
939
940 # Add derived requirements from registered compression engines.
940 # Add derived requirements from registered compression engines.
941 for name in util.compengines:
941 for name in util.compengines:
942 engine = util.compengines[name]
942 engine = util.compengines[name]
943 if engine.available() and engine.revlogheader():
943 if engine.available() and engine.revlogheader():
944 supported.add(b'exp-compression-%s' % name)
944 supported.add(b'exp-compression-%s' % name)
945 if engine.name() == b'zstd':
945 if engine.name() == b'zstd':
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
947
947
948 return supported
948 return supported
949
949
950
950
951 def ensurerequirementsrecognized(requirements, supported):
951 def ensurerequirementsrecognized(requirements, supported):
952 """Validate that a set of local requirements is recognized.
952 """Validate that a set of local requirements is recognized.
953
953
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 Receives a set of requirements. Raises an ``error.RepoError`` if there
955 exists any requirement in that set that currently loaded code doesn't
955 exists any requirement in that set that currently loaded code doesn't
956 recognize.
956 recognize.
957
957
958 Returns a set of supported requirements.
958 Returns a set of supported requirements.
959 """
959 """
960 missing = set()
960 missing = set()
961
961
962 for requirement in requirements:
962 for requirement in requirements:
963 if requirement in supported:
963 if requirement in supported:
964 continue
964 continue
965
965
966 if not requirement or not requirement[0:1].isalnum():
966 if not requirement or not requirement[0:1].isalnum():
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
968
968
969 missing.add(requirement)
969 missing.add(requirement)
970
970
971 if missing:
971 if missing:
972 raise error.RequirementError(
972 raise error.RequirementError(
973 _(b'repository requires features unknown to this Mercurial: %s')
973 _(b'repository requires features unknown to this Mercurial: %s')
974 % b' '.join(sorted(missing)),
974 % b' '.join(sorted(missing)),
975 hint=_(
975 hint=_(
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 b'see https://mercurial-scm.org/wiki/MissingRequirement '
977 b'for more information'
977 b'for more information'
978 ),
978 ),
979 )
979 )
980
980
981
981
982 def ensurerequirementscompatible(ui, requirements):
982 def ensurerequirementscompatible(ui, requirements):
983 """Validates that a set of recognized requirements is mutually compatible.
983 """Validates that a set of recognized requirements is mutually compatible.
984
984
985 Some requirements may not be compatible with others or require
985 Some requirements may not be compatible with others or require
986 config options that aren't enabled. This function is called during
986 config options that aren't enabled. This function is called during
987 repository opening to ensure that the set of requirements needed
987 repository opening to ensure that the set of requirements needed
988 to open a repository is sane and compatible with config options.
988 to open a repository is sane and compatible with config options.
989
989
990 Extensions can monkeypatch this function to perform additional
990 Extensions can monkeypatch this function to perform additional
991 checking.
991 checking.
992
992
993 ``error.RepoError`` should be raised on failure.
993 ``error.RepoError`` should be raised on failure.
994 """
994 """
995 if (
995 if (
996 requirementsmod.SPARSE_REQUIREMENT in requirements
996 requirementsmod.SPARSE_REQUIREMENT in requirements
997 and not sparse.enabled
997 and not sparse.enabled
998 ):
998 ):
999 raise error.RepoError(
999 raise error.RepoError(
1000 _(
1000 _(
1001 b'repository is using sparse feature but '
1001 b'repository is using sparse feature but '
1002 b'sparse is not enabled; enable the '
1002 b'sparse is not enabled; enable the '
1003 b'"sparse" extensions to access'
1003 b'"sparse" extensions to access'
1004 )
1004 )
1005 )
1005 )
1006
1006
1007
1007
1008 def makestore(requirements, path, vfstype):
1008 def makestore(requirements, path, vfstype):
1009 """Construct a storage object for a repository."""
1009 """Construct a storage object for a repository."""
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 if requirementsmod.STORE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1013 return storemod.fncachestore(path, vfstype, dotencode)
1013 return storemod.fncachestore(path, vfstype, dotencode)
1014
1014
1015 return storemod.encodedstore(path, vfstype)
1015 return storemod.encodedstore(path, vfstype)
1016
1016
1017 return storemod.basicstore(path, vfstype)
1017 return storemod.basicstore(path, vfstype)
1018
1018
1019
1019
1020 def resolvestorevfsoptions(ui, requirements, features):
1020 def resolvestorevfsoptions(ui, requirements, features):
1021 """Resolve the options to pass to the store vfs opener.
1021 """Resolve the options to pass to the store vfs opener.
1022
1022
1023 The returned dict is used to influence behavior of the storage layer.
1023 The returned dict is used to influence behavior of the storage layer.
1024 """
1024 """
1025 options = {}
1025 options = {}
1026
1026
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1028 options[b'treemanifest'] = True
1028 options[b'treemanifest'] = True
1029
1029
1030 # experimental config: format.manifestcachesize
1030 # experimental config: format.manifestcachesize
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1032 if manifestcachesize is not None:
1032 if manifestcachesize is not None:
1033 options[b'manifestcachesize'] = manifestcachesize
1033 options[b'manifestcachesize'] = manifestcachesize
1034
1034
1035 # In the absence of another requirement superseding a revlog-related
1035 # In the absence of another requirement superseding a revlog-related
1036 # requirement, we have to assume the repo is using revlog version 0.
1036 # requirement, we have to assume the repo is using revlog version 0.
1037 # This revlog format is super old and we don't bother trying to parse
1037 # This revlog format is super old and we don't bother trying to parse
1038 # opener options for it because those options wouldn't do anything
1038 # opener options for it because those options wouldn't do anything
1039 # meaningful on such old repos.
1039 # meaningful on such old repos.
1040 if (
1040 if (
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1043 ):
1043 ):
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1045 else: # explicitly mark repo as using revlogv0
1045 else: # explicitly mark repo as using revlogv0
1046 options[b'revlogv0'] = True
1046 options[b'revlogv0'] = True
1047
1047
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1049 options[b'copies-storage'] = b'changeset-sidedata'
1049 options[b'copies-storage'] = b'changeset-sidedata'
1050 else:
1050 else:
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1052 copiesextramode = (b'changeset-only', b'compatibility')
1053 if writecopiesto in copiesextramode:
1053 if writecopiesto in copiesextramode:
1054 options[b'copies-storage'] = b'extra'
1054 options[b'copies-storage'] = b'extra'
1055
1055
1056 return options
1056 return options
1057
1057
1058
1058
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 def resolverevlogstorevfsoptions(ui, requirements, features):
1060 """Resolve opener options specific to revlogs."""
1060 """Resolve opener options specific to revlogs."""
1061
1061
1062 options = {}
1062 options = {}
1063 options[b'flagprocessors'] = {}
1063 options[b'flagprocessors'] = {}
1064
1064
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1066 options[b'revlogv1'] = True
1066 options[b'revlogv1'] = True
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1068 options[b'revlogv2'] = True
1068 options[b'revlogv2'] = True
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1070 options[b'changelogv2'] = True
1070 options[b'changelogv2'] = True
1071
1071
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1073 options[b'generaldelta'] = True
1073 options[b'generaldelta'] = True
1074
1074
1075 # experimental config: format.chunkcachesize
1075 # experimental config: format.chunkcachesize
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1077 if chunkcachesize is not None:
1077 if chunkcachesize is not None:
1078 options[b'chunkcachesize'] = chunkcachesize
1078 options[b'chunkcachesize'] = chunkcachesize
1079
1079
1080 deltabothparents = ui.configbool(
1080 deltabothparents = ui.configbool(
1081 b'storage', b'revlog.optimize-delta-parent-choice'
1081 b'storage', b'revlog.optimize-delta-parent-choice'
1082 )
1082 )
1083 options[b'deltabothparents'] = deltabothparents
1083 options[b'deltabothparents'] = deltabothparents
1084 dps_cgds = ui.configint(
1085 b'storage',
1086 b'revlog.delta-parent-search.candidate-group-chunk-size',
1087 )
1088 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1084 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1089 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1085
1090
1086 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1091 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1087 options[b'issue6528.fix-incoming'] = issue6528
1092 options[b'issue6528.fix-incoming'] = issue6528
1088
1093
1089 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1094 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1090 lazydeltabase = False
1095 lazydeltabase = False
1091 if lazydelta:
1096 if lazydelta:
1092 lazydeltabase = ui.configbool(
1097 lazydeltabase = ui.configbool(
1093 b'storage', b'revlog.reuse-external-delta-parent'
1098 b'storage', b'revlog.reuse-external-delta-parent'
1094 )
1099 )
1095 if lazydeltabase is None:
1100 if lazydeltabase is None:
1096 lazydeltabase = not scmutil.gddeltaconfig(ui)
1101 lazydeltabase = not scmutil.gddeltaconfig(ui)
1097 options[b'lazydelta'] = lazydelta
1102 options[b'lazydelta'] = lazydelta
1098 options[b'lazydeltabase'] = lazydeltabase
1103 options[b'lazydeltabase'] = lazydeltabase
1099
1104
1100 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1105 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1101 if 0 <= chainspan:
1106 if 0 <= chainspan:
1102 options[b'maxdeltachainspan'] = chainspan
1107 options[b'maxdeltachainspan'] = chainspan
1103
1108
1104 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1109 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1105 if mmapindexthreshold is not None:
1110 if mmapindexthreshold is not None:
1106 options[b'mmapindexthreshold'] = mmapindexthreshold
1111 options[b'mmapindexthreshold'] = mmapindexthreshold
1107
1112
1108 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1113 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1109 srdensitythres = float(
1114 srdensitythres = float(
1110 ui.config(b'experimental', b'sparse-read.density-threshold')
1115 ui.config(b'experimental', b'sparse-read.density-threshold')
1111 )
1116 )
1112 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1117 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1113 options[b'with-sparse-read'] = withsparseread
1118 options[b'with-sparse-read'] = withsparseread
1114 options[b'sparse-read-density-threshold'] = srdensitythres
1119 options[b'sparse-read-density-threshold'] = srdensitythres
1115 options[b'sparse-read-min-gap-size'] = srmingapsize
1120 options[b'sparse-read-min-gap-size'] = srmingapsize
1116
1121
1117 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1122 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1118 options[b'sparse-revlog'] = sparserevlog
1123 options[b'sparse-revlog'] = sparserevlog
1119 if sparserevlog:
1124 if sparserevlog:
1120 options[b'generaldelta'] = True
1125 options[b'generaldelta'] = True
1121
1126
1122 maxchainlen = None
1127 maxchainlen = None
1123 if sparserevlog:
1128 if sparserevlog:
1124 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1129 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1125 # experimental config: format.maxchainlen
1130 # experimental config: format.maxchainlen
1126 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1131 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1127 if maxchainlen is not None:
1132 if maxchainlen is not None:
1128 options[b'maxchainlen'] = maxchainlen
1133 options[b'maxchainlen'] = maxchainlen
1129
1134
1130 for r in requirements:
1135 for r in requirements:
1131 # we allow multiple compression engine requirement to co-exist because
1136 # we allow multiple compression engine requirement to co-exist because
1132 # strickly speaking, revlog seems to support mixed compression style.
1137 # strickly speaking, revlog seems to support mixed compression style.
1133 #
1138 #
1134 # The compression used for new entries will be "the last one"
1139 # The compression used for new entries will be "the last one"
1135 prefix = r.startswith
1140 prefix = r.startswith
1136 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1141 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1137 options[b'compengine'] = r.split(b'-', 2)[2]
1142 options[b'compengine'] = r.split(b'-', 2)[2]
1138
1143
1139 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1144 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1140 if options[b'zlib.level'] is not None:
1145 if options[b'zlib.level'] is not None:
1141 if not (0 <= options[b'zlib.level'] <= 9):
1146 if not (0 <= options[b'zlib.level'] <= 9):
1142 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1147 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1143 raise error.Abort(msg % options[b'zlib.level'])
1148 raise error.Abort(msg % options[b'zlib.level'])
1144 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1149 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1145 if options[b'zstd.level'] is not None:
1150 if options[b'zstd.level'] is not None:
1146 if not (0 <= options[b'zstd.level'] <= 22):
1151 if not (0 <= options[b'zstd.level'] <= 22):
1147 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1152 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1148 raise error.Abort(msg % options[b'zstd.level'])
1153 raise error.Abort(msg % options[b'zstd.level'])
1149
1154
1150 if requirementsmod.NARROW_REQUIREMENT in requirements:
1155 if requirementsmod.NARROW_REQUIREMENT in requirements:
1151 options[b'enableellipsis'] = True
1156 options[b'enableellipsis'] = True
1152
1157
1153 if ui.configbool(b'experimental', b'rust.index'):
1158 if ui.configbool(b'experimental', b'rust.index'):
1154 options[b'rust.index'] = True
1159 options[b'rust.index'] = True
1155 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1160 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1156 slow_path = ui.config(
1161 slow_path = ui.config(
1157 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 b'storage', b'revlog.persistent-nodemap.slow-path'
1158 )
1163 )
1159 if slow_path not in (b'allow', b'warn', b'abort'):
1164 if slow_path not in (b'allow', b'warn', b'abort'):
1160 default = ui.config_default(
1165 default = ui.config_default(
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1166 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 )
1167 )
1163 msg = _(
1168 msg = _(
1164 b'unknown value for config '
1169 b'unknown value for config '
1165 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1170 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1166 )
1171 )
1167 ui.warn(msg % slow_path)
1172 ui.warn(msg % slow_path)
1168 if not ui.quiet:
1173 if not ui.quiet:
1169 ui.warn(_(b'falling back to default value: %s\n') % default)
1174 ui.warn(_(b'falling back to default value: %s\n') % default)
1170 slow_path = default
1175 slow_path = default
1171
1176
1172 msg = _(
1177 msg = _(
1173 b"accessing `persistent-nodemap` repository without associated "
1178 b"accessing `persistent-nodemap` repository without associated "
1174 b"fast implementation."
1179 b"fast implementation."
1175 )
1180 )
1176 hint = _(
1181 hint = _(
1177 b"check `hg help config.format.use-persistent-nodemap` "
1182 b"check `hg help config.format.use-persistent-nodemap` "
1178 b"for details"
1183 b"for details"
1179 )
1184 )
1180 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1185 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1181 if slow_path == b'warn':
1186 if slow_path == b'warn':
1182 msg = b"warning: " + msg + b'\n'
1187 msg = b"warning: " + msg + b'\n'
1183 ui.warn(msg)
1188 ui.warn(msg)
1184 if not ui.quiet:
1189 if not ui.quiet:
1185 hint = b'(' + hint + b')\n'
1190 hint = b'(' + hint + b')\n'
1186 ui.warn(hint)
1191 ui.warn(hint)
1187 if slow_path == b'abort':
1192 if slow_path == b'abort':
1188 raise error.Abort(msg, hint=hint)
1193 raise error.Abort(msg, hint=hint)
1189 options[b'persistent-nodemap'] = True
1194 options[b'persistent-nodemap'] = True
1190 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1195 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1191 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1196 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1192 if slow_path not in (b'allow', b'warn', b'abort'):
1197 if slow_path not in (b'allow', b'warn', b'abort'):
1193 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1198 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1194 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1199 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1195 ui.warn(msg % slow_path)
1200 ui.warn(msg % slow_path)
1196 if not ui.quiet:
1201 if not ui.quiet:
1197 ui.warn(_(b'falling back to default value: %s\n') % default)
1202 ui.warn(_(b'falling back to default value: %s\n') % default)
1198 slow_path = default
1203 slow_path = default
1199
1204
1200 msg = _(
1205 msg = _(
1201 b"accessing `dirstate-v2` repository without associated "
1206 b"accessing `dirstate-v2` repository without associated "
1202 b"fast implementation."
1207 b"fast implementation."
1203 )
1208 )
1204 hint = _(
1209 hint = _(
1205 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1210 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1206 )
1211 )
1207 if not dirstate.HAS_FAST_DIRSTATE_V2:
1212 if not dirstate.HAS_FAST_DIRSTATE_V2:
1208 if slow_path == b'warn':
1213 if slow_path == b'warn':
1209 msg = b"warning: " + msg + b'\n'
1214 msg = b"warning: " + msg + b'\n'
1210 ui.warn(msg)
1215 ui.warn(msg)
1211 if not ui.quiet:
1216 if not ui.quiet:
1212 hint = b'(' + hint + b')\n'
1217 hint = b'(' + hint + b')\n'
1213 ui.warn(hint)
1218 ui.warn(hint)
1214 if slow_path == b'abort':
1219 if slow_path == b'abort':
1215 raise error.Abort(msg, hint=hint)
1220 raise error.Abort(msg, hint=hint)
1216 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1221 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1217 options[b'persistent-nodemap.mmap'] = True
1222 options[b'persistent-nodemap.mmap'] = True
1218 if ui.configbool(b'devel', b'persistent-nodemap'):
1223 if ui.configbool(b'devel', b'persistent-nodemap'):
1219 options[b'devel-force-nodemap'] = True
1224 options[b'devel-force-nodemap'] = True
1220
1225
1221 return options
1226 return options
1222
1227
1223
1228
1224 def makemain(**kwargs):
1229 def makemain(**kwargs):
1225 """Produce a type conforming to ``ilocalrepositorymain``."""
1230 """Produce a type conforming to ``ilocalrepositorymain``."""
1226 return localrepository
1231 return localrepository
1227
1232
1228
1233
1229 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1234 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1230 class revlogfilestorage:
1235 class revlogfilestorage:
1231 """File storage when using revlogs."""
1236 """File storage when using revlogs."""
1232
1237
1233 def file(self, path):
1238 def file(self, path):
1234 if path.startswith(b'/'):
1239 if path.startswith(b'/'):
1235 path = path[1:]
1240 path = path[1:]
1236
1241
1237 return filelog.filelog(self.svfs, path)
1242 return filelog.filelog(self.svfs, path)
1238
1243
1239
1244
1240 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1241 class revlognarrowfilestorage:
1246 class revlognarrowfilestorage:
1242 """File storage when using revlogs and narrow files."""
1247 """File storage when using revlogs and narrow files."""
1243
1248
1244 def file(self, path):
1249 def file(self, path):
1245 if path.startswith(b'/'):
1250 if path.startswith(b'/'):
1246 path = path[1:]
1251 path = path[1:]
1247
1252
1248 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1253 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1249
1254
1250
1255
1251 def makefilestorage(requirements, features, **kwargs):
1256 def makefilestorage(requirements, features, **kwargs):
1252 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1257 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1253 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1258 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1254 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1259 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1255
1260
1256 if requirementsmod.NARROW_REQUIREMENT in requirements:
1261 if requirementsmod.NARROW_REQUIREMENT in requirements:
1257 return revlognarrowfilestorage
1262 return revlognarrowfilestorage
1258 else:
1263 else:
1259 return revlogfilestorage
1264 return revlogfilestorage
1260
1265
1261
1266
1262 # List of repository interfaces and factory functions for them. Each
1267 # List of repository interfaces and factory functions for them. Each
1263 # will be called in order during ``makelocalrepository()`` to iteratively
1268 # will be called in order during ``makelocalrepository()`` to iteratively
1264 # derive the final type for a local repository instance. We capture the
1269 # derive the final type for a local repository instance. We capture the
1265 # function as a lambda so we don't hold a reference and the module-level
1270 # function as a lambda so we don't hold a reference and the module-level
1266 # functions can be wrapped.
1271 # functions can be wrapped.
1267 REPO_INTERFACES = [
1272 REPO_INTERFACES = [
1268 (repository.ilocalrepositorymain, lambda: makemain),
1273 (repository.ilocalrepositorymain, lambda: makemain),
1269 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1274 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1270 ]
1275 ]
1271
1276
1272
1277
1273 @interfaceutil.implementer(repository.ilocalrepositorymain)
1278 @interfaceutil.implementer(repository.ilocalrepositorymain)
1274 class localrepository:
1279 class localrepository:
1275 """Main class for representing local repositories.
1280 """Main class for representing local repositories.
1276
1281
1277 All local repositories are instances of this class.
1282 All local repositories are instances of this class.
1278
1283
1279 Constructed on its own, instances of this class are not usable as
1284 Constructed on its own, instances of this class are not usable as
1280 repository objects. To obtain a usable repository object, call
1285 repository objects. To obtain a usable repository object, call
1281 ``hg.repository()``, ``localrepo.instance()``, or
1286 ``hg.repository()``, ``localrepo.instance()``, or
1282 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1287 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1283 ``instance()`` adds support for creating new repositories.
1288 ``instance()`` adds support for creating new repositories.
1284 ``hg.repository()`` adds more extension integration, including calling
1289 ``hg.repository()`` adds more extension integration, including calling
1285 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1290 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1286 used.
1291 used.
1287 """
1292 """
1288
1293
1289 _basesupported = {
1294 _basesupported = {
1290 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1295 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1291 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1296 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1292 requirementsmod.CHANGELOGV2_REQUIREMENT,
1297 requirementsmod.CHANGELOGV2_REQUIREMENT,
1293 requirementsmod.COPIESSDC_REQUIREMENT,
1298 requirementsmod.COPIESSDC_REQUIREMENT,
1294 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1299 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1295 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1300 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1296 requirementsmod.DOTENCODE_REQUIREMENT,
1301 requirementsmod.DOTENCODE_REQUIREMENT,
1297 requirementsmod.FNCACHE_REQUIREMENT,
1302 requirementsmod.FNCACHE_REQUIREMENT,
1298 requirementsmod.GENERALDELTA_REQUIREMENT,
1303 requirementsmod.GENERALDELTA_REQUIREMENT,
1299 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1304 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1300 requirementsmod.NODEMAP_REQUIREMENT,
1305 requirementsmod.NODEMAP_REQUIREMENT,
1301 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1306 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1302 requirementsmod.REVLOGV1_REQUIREMENT,
1307 requirementsmod.REVLOGV1_REQUIREMENT,
1303 requirementsmod.REVLOGV2_REQUIREMENT,
1308 requirementsmod.REVLOGV2_REQUIREMENT,
1304 requirementsmod.SHARED_REQUIREMENT,
1309 requirementsmod.SHARED_REQUIREMENT,
1305 requirementsmod.SHARESAFE_REQUIREMENT,
1310 requirementsmod.SHARESAFE_REQUIREMENT,
1306 requirementsmod.SPARSE_REQUIREMENT,
1311 requirementsmod.SPARSE_REQUIREMENT,
1307 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1312 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1308 requirementsmod.STORE_REQUIREMENT,
1313 requirementsmod.STORE_REQUIREMENT,
1309 requirementsmod.TREEMANIFEST_REQUIREMENT,
1314 requirementsmod.TREEMANIFEST_REQUIREMENT,
1310 }
1315 }
1311
1316
1312 # list of prefix for file which can be written without 'wlock'
1317 # list of prefix for file which can be written without 'wlock'
1313 # Extensions should extend this list when needed
1318 # Extensions should extend this list when needed
1314 _wlockfreeprefix = {
1319 _wlockfreeprefix = {
1315 # We migh consider requiring 'wlock' for the next
1320 # We migh consider requiring 'wlock' for the next
1316 # two, but pretty much all the existing code assume
1321 # two, but pretty much all the existing code assume
1317 # wlock is not needed so we keep them excluded for
1322 # wlock is not needed so we keep them excluded for
1318 # now.
1323 # now.
1319 b'hgrc',
1324 b'hgrc',
1320 b'requires',
1325 b'requires',
1321 # XXX cache is a complicatged business someone
1326 # XXX cache is a complicatged business someone
1322 # should investigate this in depth at some point
1327 # should investigate this in depth at some point
1323 b'cache/',
1328 b'cache/',
1324 # XXX shouldn't be dirstate covered by the wlock?
1329 # XXX shouldn't be dirstate covered by the wlock?
1325 b'dirstate',
1330 b'dirstate',
1326 # XXX bisect was still a bit too messy at the time
1331 # XXX bisect was still a bit too messy at the time
1327 # this changeset was introduced. Someone should fix
1332 # this changeset was introduced. Someone should fix
1328 # the remainig bit and drop this line
1333 # the remainig bit and drop this line
1329 b'bisect.state',
1334 b'bisect.state',
1330 }
1335 }
1331
1336
1332 def __init__(
1337 def __init__(
1333 self,
1338 self,
1334 baseui,
1339 baseui,
1335 ui,
1340 ui,
1336 origroot: bytes,
1341 origroot: bytes,
1337 wdirvfs: vfsmod.vfs,
1342 wdirvfs: vfsmod.vfs,
1338 hgvfs: vfsmod.vfs,
1343 hgvfs: vfsmod.vfs,
1339 requirements,
1344 requirements,
1340 supportedrequirements,
1345 supportedrequirements,
1341 sharedpath: bytes,
1346 sharedpath: bytes,
1342 store,
1347 store,
1343 cachevfs: vfsmod.vfs,
1348 cachevfs: vfsmod.vfs,
1344 wcachevfs: vfsmod.vfs,
1349 wcachevfs: vfsmod.vfs,
1345 features,
1350 features,
1346 intents=None,
1351 intents=None,
1347 ):
1352 ):
1348 """Create a new local repository instance.
1353 """Create a new local repository instance.
1349
1354
1350 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1351 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1352 object.
1357 object.
1353
1358
1354 Arguments:
1359 Arguments:
1355
1360
1356 baseui
1361 baseui
1357 ``ui.ui`` instance that ``ui`` argument was based off of.
1362 ``ui.ui`` instance that ``ui`` argument was based off of.
1358
1363
1359 ui
1364 ui
1360 ``ui.ui`` instance for use by the repository.
1365 ``ui.ui`` instance for use by the repository.
1361
1366
1362 origroot
1367 origroot
1363 ``bytes`` path to working directory root of this repository.
1368 ``bytes`` path to working directory root of this repository.
1364
1369
1365 wdirvfs
1370 wdirvfs
1366 ``vfs.vfs`` rooted at the working directory.
1371 ``vfs.vfs`` rooted at the working directory.
1367
1372
1368 hgvfs
1373 hgvfs
1369 ``vfs.vfs`` rooted at .hg/
1374 ``vfs.vfs`` rooted at .hg/
1370
1375
1371 requirements
1376 requirements
1372 ``set`` of bytestrings representing repository opening requirements.
1377 ``set`` of bytestrings representing repository opening requirements.
1373
1378
1374 supportedrequirements
1379 supportedrequirements
1375 ``set`` of bytestrings representing repository requirements that we
1380 ``set`` of bytestrings representing repository requirements that we
1376 know how to open. May be a supetset of ``requirements``.
1381 know how to open. May be a supetset of ``requirements``.
1377
1382
1378 sharedpath
1383 sharedpath
1379 ``bytes`` Defining path to storage base directory. Points to a
1384 ``bytes`` Defining path to storage base directory. Points to a
1380 ``.hg/`` directory somewhere.
1385 ``.hg/`` directory somewhere.
1381
1386
1382 store
1387 store
1383 ``store.basicstore`` (or derived) instance providing access to
1388 ``store.basicstore`` (or derived) instance providing access to
1384 versioned storage.
1389 versioned storage.
1385
1390
1386 cachevfs
1391 cachevfs
1387 ``vfs.vfs`` used for cache files.
1392 ``vfs.vfs`` used for cache files.
1388
1393
1389 wcachevfs
1394 wcachevfs
1390 ``vfs.vfs`` used for cache files related to the working copy.
1395 ``vfs.vfs`` used for cache files related to the working copy.
1391
1396
1392 features
1397 features
1393 ``set`` of bytestrings defining features/capabilities of this
1398 ``set`` of bytestrings defining features/capabilities of this
1394 instance.
1399 instance.
1395
1400
1396 intents
1401 intents
1397 ``set`` of system strings indicating what this repo will be used
1402 ``set`` of system strings indicating what this repo will be used
1398 for.
1403 for.
1399 """
1404 """
1400 self.baseui = baseui
1405 self.baseui = baseui
1401 self.ui = ui
1406 self.ui = ui
1402 self.origroot = origroot
1407 self.origroot = origroot
1403 # vfs rooted at working directory.
1408 # vfs rooted at working directory.
1404 self.wvfs = wdirvfs
1409 self.wvfs = wdirvfs
1405 self.root = wdirvfs.base
1410 self.root = wdirvfs.base
1406 # vfs rooted at .hg/. Used to access most non-store paths.
1411 # vfs rooted at .hg/. Used to access most non-store paths.
1407 self.vfs = hgvfs
1412 self.vfs = hgvfs
1408 self.path = hgvfs.base
1413 self.path = hgvfs.base
1409 self.requirements = requirements
1414 self.requirements = requirements
1410 self.nodeconstants = sha1nodeconstants
1415 self.nodeconstants = sha1nodeconstants
1411 self.nullid = self.nodeconstants.nullid
1416 self.nullid = self.nodeconstants.nullid
1412 self.supported = supportedrequirements
1417 self.supported = supportedrequirements
1413 self.sharedpath = sharedpath
1418 self.sharedpath = sharedpath
1414 self.store = store
1419 self.store = store
1415 self.cachevfs = cachevfs
1420 self.cachevfs = cachevfs
1416 self.wcachevfs = wcachevfs
1421 self.wcachevfs = wcachevfs
1417 self.features = features
1422 self.features = features
1418
1423
1419 self.filtername = None
1424 self.filtername = None
1420
1425
1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 b'devel', b'check-locks'
1427 b'devel', b'check-locks'
1423 ):
1428 ):
1424 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 self.vfs.audit = self._getvfsward(self.vfs.audit)
1425 # A list of callback to shape the phase if no data were found.
1430 # A list of callback to shape the phase if no data were found.
1426 # Callback are in the form: func(repo, roots) --> processed root.
1431 # Callback are in the form: func(repo, roots) --> processed root.
1427 # This list it to be filled by extension during repo setup
1432 # This list it to be filled by extension during repo setup
1428 self._phasedefaults = []
1433 self._phasedefaults = []
1429
1434
1430 color.setup(self.ui)
1435 color.setup(self.ui)
1431
1436
1432 self.spath = self.store.path
1437 self.spath = self.store.path
1433 self.svfs = self.store.vfs
1438 self.svfs = self.store.vfs
1434 self.sjoin = self.store.join
1439 self.sjoin = self.store.join
1435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1436 b'devel', b'check-locks'
1441 b'devel', b'check-locks'
1437 ):
1442 ):
1438 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1439 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1440 else: # standard vfs
1445 else: # standard vfs
1441 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1442
1447
1443 self._dirstatevalidatewarned = False
1448 self._dirstatevalidatewarned = False
1444
1449
1445 self._branchcaches = branchmap.BranchMapCache()
1450 self._branchcaches = branchmap.BranchMapCache()
1446 self._revbranchcache = None
1451 self._revbranchcache = None
1447 self._filterpats = {}
1452 self._filterpats = {}
1448 self._datafilters = {}
1453 self._datafilters = {}
1449 self._transref = self._lockref = self._wlockref = None
1454 self._transref = self._lockref = self._wlockref = None
1450
1455
1451 # A cache for various files under .hg/ that tracks file changes,
1456 # A cache for various files under .hg/ that tracks file changes,
1452 # (used by the filecache decorator)
1457 # (used by the filecache decorator)
1453 #
1458 #
1454 # Maps a property name to its util.filecacheentry
1459 # Maps a property name to its util.filecacheentry
1455 self._filecache = {}
1460 self._filecache = {}
1456
1461
1457 # hold sets of revision to be filtered
1462 # hold sets of revision to be filtered
1458 # should be cleared when something might have changed the filter value:
1463 # should be cleared when something might have changed the filter value:
1459 # - new changesets,
1464 # - new changesets,
1460 # - phase change,
1465 # - phase change,
1461 # - new obsolescence marker,
1466 # - new obsolescence marker,
1462 # - working directory parent change,
1467 # - working directory parent change,
1463 # - bookmark changes
1468 # - bookmark changes
1464 self.filteredrevcache = {}
1469 self.filteredrevcache = {}
1465
1470
1466 # post-dirstate-status hooks
1471 # post-dirstate-status hooks
1467 self._postdsstatus = []
1472 self._postdsstatus = []
1468
1473
1469 # generic mapping between names and nodes
1474 # generic mapping between names and nodes
1470 self.names = namespaces.namespaces()
1475 self.names = namespaces.namespaces()
1471
1476
1472 # Key to signature value.
1477 # Key to signature value.
1473 self._sparsesignaturecache = {}
1478 self._sparsesignaturecache = {}
1474 # Signature to cached matcher instance.
1479 # Signature to cached matcher instance.
1475 self._sparsematchercache = {}
1480 self._sparsematchercache = {}
1476
1481
1477 self._extrafilterid = repoview.extrafilter(ui)
1482 self._extrafilterid = repoview.extrafilter(ui)
1478
1483
1479 self.filecopiesmode = None
1484 self.filecopiesmode = None
1480 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1485 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1481 self.filecopiesmode = b'changeset-sidedata'
1486 self.filecopiesmode = b'changeset-sidedata'
1482
1487
1483 self._wanted_sidedata = set()
1488 self._wanted_sidedata = set()
1484 self._sidedata_computers = {}
1489 self._sidedata_computers = {}
1485 sidedatamod.set_sidedata_spec_for_repo(self)
1490 sidedatamod.set_sidedata_spec_for_repo(self)
1486
1491
1487 def _getvfsward(self, origfunc):
1492 def _getvfsward(self, origfunc):
1488 """build a ward for self.vfs"""
1493 """build a ward for self.vfs"""
1489 rref = weakref.ref(self)
1494 rref = weakref.ref(self)
1490
1495
1491 def checkvfs(path, mode=None):
1496 def checkvfs(path, mode=None):
1492 ret = origfunc(path, mode=mode)
1497 ret = origfunc(path, mode=mode)
1493 repo = rref()
1498 repo = rref()
1494 if (
1499 if (
1495 repo is None
1500 repo is None
1496 or not util.safehasattr(repo, b'_wlockref')
1501 or not util.safehasattr(repo, b'_wlockref')
1497 or not util.safehasattr(repo, b'_lockref')
1502 or not util.safehasattr(repo, b'_lockref')
1498 ):
1503 ):
1499 return
1504 return
1500 if mode in (None, b'r', b'rb'):
1505 if mode in (None, b'r', b'rb'):
1501 return
1506 return
1502 if path.startswith(repo.path):
1507 if path.startswith(repo.path):
1503 # truncate name relative to the repository (.hg)
1508 # truncate name relative to the repository (.hg)
1504 path = path[len(repo.path) + 1 :]
1509 path = path[len(repo.path) + 1 :]
1505 if path.startswith(b'cache/'):
1510 if path.startswith(b'cache/'):
1506 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1511 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1507 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1512 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1508 # path prefixes covered by 'lock'
1513 # path prefixes covered by 'lock'
1509 vfs_path_prefixes = (
1514 vfs_path_prefixes = (
1510 b'journal.',
1515 b'journal.',
1511 b'undo.',
1516 b'undo.',
1512 b'strip-backup/',
1517 b'strip-backup/',
1513 b'cache/',
1518 b'cache/',
1514 )
1519 )
1515 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1520 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1516 if repo._currentlock(repo._lockref) is None:
1521 if repo._currentlock(repo._lockref) is None:
1517 repo.ui.develwarn(
1522 repo.ui.develwarn(
1518 b'write with no lock: "%s"' % path,
1523 b'write with no lock: "%s"' % path,
1519 stacklevel=3,
1524 stacklevel=3,
1520 config=b'check-locks',
1525 config=b'check-locks',
1521 )
1526 )
1522 elif repo._currentlock(repo._wlockref) is None:
1527 elif repo._currentlock(repo._wlockref) is None:
1523 # rest of vfs files are covered by 'wlock'
1528 # rest of vfs files are covered by 'wlock'
1524 #
1529 #
1525 # exclude special files
1530 # exclude special files
1526 for prefix in self._wlockfreeprefix:
1531 for prefix in self._wlockfreeprefix:
1527 if path.startswith(prefix):
1532 if path.startswith(prefix):
1528 return
1533 return
1529 repo.ui.develwarn(
1534 repo.ui.develwarn(
1530 b'write with no wlock: "%s"' % path,
1535 b'write with no wlock: "%s"' % path,
1531 stacklevel=3,
1536 stacklevel=3,
1532 config=b'check-locks',
1537 config=b'check-locks',
1533 )
1538 )
1534 return ret
1539 return ret
1535
1540
1536 return checkvfs
1541 return checkvfs
1537
1542
1538 def _getsvfsward(self, origfunc):
1543 def _getsvfsward(self, origfunc):
1539 """build a ward for self.svfs"""
1544 """build a ward for self.svfs"""
1540 rref = weakref.ref(self)
1545 rref = weakref.ref(self)
1541
1546
1542 def checksvfs(path, mode=None):
1547 def checksvfs(path, mode=None):
1543 ret = origfunc(path, mode=mode)
1548 ret = origfunc(path, mode=mode)
1544 repo = rref()
1549 repo = rref()
1545 if repo is None or not util.safehasattr(repo, b'_lockref'):
1550 if repo is None or not util.safehasattr(repo, b'_lockref'):
1546 return
1551 return
1547 if mode in (None, b'r', b'rb'):
1552 if mode in (None, b'r', b'rb'):
1548 return
1553 return
1549 if path.startswith(repo.sharedpath):
1554 if path.startswith(repo.sharedpath):
1550 # truncate name relative to the repository (.hg)
1555 # truncate name relative to the repository (.hg)
1551 path = path[len(repo.sharedpath) + 1 :]
1556 path = path[len(repo.sharedpath) + 1 :]
1552 if repo._currentlock(repo._lockref) is None:
1557 if repo._currentlock(repo._lockref) is None:
1553 repo.ui.develwarn(
1558 repo.ui.develwarn(
1554 b'write with no lock: "%s"' % path, stacklevel=4
1559 b'write with no lock: "%s"' % path, stacklevel=4
1555 )
1560 )
1556 return ret
1561 return ret
1557
1562
1558 return checksvfs
1563 return checksvfs
1559
1564
1560 def close(self):
1565 def close(self):
1561 self._writecaches()
1566 self._writecaches()
1562
1567
1563 def _writecaches(self):
1568 def _writecaches(self):
1564 if self._revbranchcache:
1569 if self._revbranchcache:
1565 self._revbranchcache.write()
1570 self._revbranchcache.write()
1566
1571
1567 def _restrictcapabilities(self, caps):
1572 def _restrictcapabilities(self, caps):
1568 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1573 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1569 caps = set(caps)
1574 caps = set(caps)
1570 capsblob = bundle2.encodecaps(
1575 capsblob = bundle2.encodecaps(
1571 bundle2.getrepocaps(self, role=b'client')
1576 bundle2.getrepocaps(self, role=b'client')
1572 )
1577 )
1573 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1578 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1574 if self.ui.configbool(b'experimental', b'narrow'):
1579 if self.ui.configbool(b'experimental', b'narrow'):
1575 caps.add(wireprototypes.NARROWCAP)
1580 caps.add(wireprototypes.NARROWCAP)
1576 return caps
1581 return caps
1577
1582
1578 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1583 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1579 # self -> auditor -> self._checknested -> self
1584 # self -> auditor -> self._checknested -> self
1580
1585
1581 @property
1586 @property
1582 def auditor(self):
1587 def auditor(self):
1583 # This is only used by context.workingctx.match in order to
1588 # This is only used by context.workingctx.match in order to
1584 # detect files in subrepos.
1589 # detect files in subrepos.
1585 return pathutil.pathauditor(self.root, callback=self._checknested)
1590 return pathutil.pathauditor(self.root, callback=self._checknested)
1586
1591
1587 @property
1592 @property
1588 def nofsauditor(self):
1593 def nofsauditor(self):
1589 # This is only used by context.basectx.match in order to detect
1594 # This is only used by context.basectx.match in order to detect
1590 # files in subrepos.
1595 # files in subrepos.
1591 return pathutil.pathauditor(
1596 return pathutil.pathauditor(
1592 self.root, callback=self._checknested, realfs=False, cached=True
1597 self.root, callback=self._checknested, realfs=False, cached=True
1593 )
1598 )
1594
1599
1595 def _checknested(self, path):
1600 def _checknested(self, path):
1596 """Determine if path is a legal nested repository."""
1601 """Determine if path is a legal nested repository."""
1597 if not path.startswith(self.root):
1602 if not path.startswith(self.root):
1598 return False
1603 return False
1599 subpath = path[len(self.root) + 1 :]
1604 subpath = path[len(self.root) + 1 :]
1600 normsubpath = util.pconvert(subpath)
1605 normsubpath = util.pconvert(subpath)
1601
1606
1602 # XXX: Checking against the current working copy is wrong in
1607 # XXX: Checking against the current working copy is wrong in
1603 # the sense that it can reject things like
1608 # the sense that it can reject things like
1604 #
1609 #
1605 # $ hg cat -r 10 sub/x.txt
1610 # $ hg cat -r 10 sub/x.txt
1606 #
1611 #
1607 # if sub/ is no longer a subrepository in the working copy
1612 # if sub/ is no longer a subrepository in the working copy
1608 # parent revision.
1613 # parent revision.
1609 #
1614 #
1610 # However, it can of course also allow things that would have
1615 # However, it can of course also allow things that would have
1611 # been rejected before, such as the above cat command if sub/
1616 # been rejected before, such as the above cat command if sub/
1612 # is a subrepository now, but was a normal directory before.
1617 # is a subrepository now, but was a normal directory before.
1613 # The old path auditor would have rejected by mistake since it
1618 # The old path auditor would have rejected by mistake since it
1614 # panics when it sees sub/.hg/.
1619 # panics when it sees sub/.hg/.
1615 #
1620 #
1616 # All in all, checking against the working copy seems sensible
1621 # All in all, checking against the working copy seems sensible
1617 # since we want to prevent access to nested repositories on
1622 # since we want to prevent access to nested repositories on
1618 # the filesystem *now*.
1623 # the filesystem *now*.
1619 ctx = self[None]
1624 ctx = self[None]
1620 parts = util.splitpath(subpath)
1625 parts = util.splitpath(subpath)
1621 while parts:
1626 while parts:
1622 prefix = b'/'.join(parts)
1627 prefix = b'/'.join(parts)
1623 if prefix in ctx.substate:
1628 if prefix in ctx.substate:
1624 if prefix == normsubpath:
1629 if prefix == normsubpath:
1625 return True
1630 return True
1626 else:
1631 else:
1627 sub = ctx.sub(prefix)
1632 sub = ctx.sub(prefix)
1628 return sub.checknested(subpath[len(prefix) + 1 :])
1633 return sub.checknested(subpath[len(prefix) + 1 :])
1629 else:
1634 else:
1630 parts.pop()
1635 parts.pop()
1631 return False
1636 return False
1632
1637
1633 def peer(self):
1638 def peer(self):
1634 return localpeer(self) # not cached to avoid reference cycle
1639 return localpeer(self) # not cached to avoid reference cycle
1635
1640
1636 def unfiltered(self):
1641 def unfiltered(self):
1637 """Return unfiltered version of the repository
1642 """Return unfiltered version of the repository
1638
1643
1639 Intended to be overwritten by filtered repo."""
1644 Intended to be overwritten by filtered repo."""
1640 return self
1645 return self
1641
1646
1642 def filtered(self, name, visibilityexceptions=None):
1647 def filtered(self, name, visibilityexceptions=None):
1643 """Return a filtered version of a repository
1648 """Return a filtered version of a repository
1644
1649
1645 The `name` parameter is the identifier of the requested view. This
1650 The `name` parameter is the identifier of the requested view. This
1646 will return a repoview object set "exactly" to the specified view.
1651 will return a repoview object set "exactly" to the specified view.
1647
1652
1648 This function does not apply recursive filtering to a repository. For
1653 This function does not apply recursive filtering to a repository. For
1649 example calling `repo.filtered("served")` will return a repoview using
1654 example calling `repo.filtered("served")` will return a repoview using
1650 the "served" view, regardless of the initial view used by `repo`.
1655 the "served" view, regardless of the initial view used by `repo`.
1651
1656
1652 In other word, there is always only one level of `repoview` "filtering".
1657 In other word, there is always only one level of `repoview` "filtering".
1653 """
1658 """
1654 if self._extrafilterid is not None and b'%' not in name:
1659 if self._extrafilterid is not None and b'%' not in name:
1655 name = name + b'%' + self._extrafilterid
1660 name = name + b'%' + self._extrafilterid
1656
1661
1657 cls = repoview.newtype(self.unfiltered().__class__)
1662 cls = repoview.newtype(self.unfiltered().__class__)
1658 return cls(self, name, visibilityexceptions)
1663 return cls(self, name, visibilityexceptions)
1659
1664
1660 @mixedrepostorecache(
1665 @mixedrepostorecache(
1661 (b'bookmarks', b'plain'),
1666 (b'bookmarks', b'plain'),
1662 (b'bookmarks.current', b'plain'),
1667 (b'bookmarks.current', b'plain'),
1663 (b'bookmarks', b''),
1668 (b'bookmarks', b''),
1664 (b'00changelog.i', b''),
1669 (b'00changelog.i', b''),
1665 )
1670 )
1666 def _bookmarks(self):
1671 def _bookmarks(self):
1667 # Since the multiple files involved in the transaction cannot be
1672 # Since the multiple files involved in the transaction cannot be
1668 # written atomically (with current repository format), there is a race
1673 # written atomically (with current repository format), there is a race
1669 # condition here.
1674 # condition here.
1670 #
1675 #
1671 # 1) changelog content A is read
1676 # 1) changelog content A is read
1672 # 2) outside transaction update changelog to content B
1677 # 2) outside transaction update changelog to content B
1673 # 3) outside transaction update bookmark file referring to content B
1678 # 3) outside transaction update bookmark file referring to content B
1674 # 4) bookmarks file content is read and filtered against changelog-A
1679 # 4) bookmarks file content is read and filtered against changelog-A
1675 #
1680 #
1676 # When this happens, bookmarks against nodes missing from A are dropped.
1681 # When this happens, bookmarks against nodes missing from A are dropped.
1677 #
1682 #
1678 # Having this happening during read is not great, but it become worse
1683 # Having this happening during read is not great, but it become worse
1679 # when this happen during write because the bookmarks to the "unknown"
1684 # when this happen during write because the bookmarks to the "unknown"
1680 # nodes will be dropped for good. However, writes happen within locks.
1685 # nodes will be dropped for good. However, writes happen within locks.
1681 # This locking makes it possible to have a race free consistent read.
1686 # This locking makes it possible to have a race free consistent read.
1682 # For this purpose data read from disc before locking are
1687 # For this purpose data read from disc before locking are
1683 # "invalidated" right after the locks are taken. This invalidations are
1688 # "invalidated" right after the locks are taken. This invalidations are
1684 # "light", the `filecache` mechanism keep the data in memory and will
1689 # "light", the `filecache` mechanism keep the data in memory and will
1685 # reuse them if the underlying files did not changed. Not parsing the
1690 # reuse them if the underlying files did not changed. Not parsing the
1686 # same data multiple times helps performances.
1691 # same data multiple times helps performances.
1687 #
1692 #
1688 # Unfortunately in the case describe above, the files tracked by the
1693 # Unfortunately in the case describe above, the files tracked by the
1689 # bookmarks file cache might not have changed, but the in-memory
1694 # bookmarks file cache might not have changed, but the in-memory
1690 # content is still "wrong" because we used an older changelog content
1695 # content is still "wrong" because we used an older changelog content
1691 # to process the on-disk data. So after locking, the changelog would be
1696 # to process the on-disk data. So after locking, the changelog would be
1692 # refreshed but `_bookmarks` would be preserved.
1697 # refreshed but `_bookmarks` would be preserved.
1693 # Adding `00changelog.i` to the list of tracked file is not
1698 # Adding `00changelog.i` to the list of tracked file is not
1694 # enough, because at the time we build the content for `_bookmarks` in
1699 # enough, because at the time we build the content for `_bookmarks` in
1695 # (4), the changelog file has already diverged from the content used
1700 # (4), the changelog file has already diverged from the content used
1696 # for loading `changelog` in (1)
1701 # for loading `changelog` in (1)
1697 #
1702 #
1698 # To prevent the issue, we force the changelog to be explicitly
1703 # To prevent the issue, we force the changelog to be explicitly
1699 # reloaded while computing `_bookmarks`. The data race can still happen
1704 # reloaded while computing `_bookmarks`. The data race can still happen
1700 # without the lock (with a narrower window), but it would no longer go
1705 # without the lock (with a narrower window), but it would no longer go
1701 # undetected during the lock time refresh.
1706 # undetected during the lock time refresh.
1702 #
1707 #
1703 # The new schedule is as follow
1708 # The new schedule is as follow
1704 #
1709 #
1705 # 1) filecache logic detect that `_bookmarks` needs to be computed
1710 # 1) filecache logic detect that `_bookmarks` needs to be computed
1706 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1711 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1707 # 3) We force `changelog` filecache to be tested
1712 # 3) We force `changelog` filecache to be tested
1708 # 4) cachestat for `changelog` are captured (for changelog)
1713 # 4) cachestat for `changelog` are captured (for changelog)
1709 # 5) `_bookmarks` is computed and cached
1714 # 5) `_bookmarks` is computed and cached
1710 #
1715 #
1711 # The step in (3) ensure we have a changelog at least as recent as the
1716 # The step in (3) ensure we have a changelog at least as recent as the
1712 # cache stat computed in (1). As a result at locking time:
1717 # cache stat computed in (1). As a result at locking time:
1713 # * if the changelog did not changed since (1) -> we can reuse the data
1718 # * if the changelog did not changed since (1) -> we can reuse the data
1714 # * otherwise -> the bookmarks get refreshed.
1719 # * otherwise -> the bookmarks get refreshed.
1715 self._refreshchangelog()
1720 self._refreshchangelog()
1716 return bookmarks.bmstore(self)
1721 return bookmarks.bmstore(self)
1717
1722
1718 def _refreshchangelog(self):
1723 def _refreshchangelog(self):
1719 """make sure the in memory changelog match the on-disk one"""
1724 """make sure the in memory changelog match the on-disk one"""
1720 if 'changelog' in vars(self) and self.currenttransaction() is None:
1725 if 'changelog' in vars(self) and self.currenttransaction() is None:
1721 del self.changelog
1726 del self.changelog
1722
1727
1723 @property
1728 @property
1724 def _activebookmark(self):
1729 def _activebookmark(self):
1725 return self._bookmarks.active
1730 return self._bookmarks.active
1726
1731
1727 # _phasesets depend on changelog. what we need is to call
1732 # _phasesets depend on changelog. what we need is to call
1728 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1733 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1729 # can't be easily expressed in filecache mechanism.
1734 # can't be easily expressed in filecache mechanism.
1730 @storecache(b'phaseroots', b'00changelog.i')
1735 @storecache(b'phaseroots', b'00changelog.i')
1731 def _phasecache(self):
1736 def _phasecache(self):
1732 return phases.phasecache(self, self._phasedefaults)
1737 return phases.phasecache(self, self._phasedefaults)
1733
1738
1734 @storecache(b'obsstore')
1739 @storecache(b'obsstore')
1735 def obsstore(self):
1740 def obsstore(self):
1736 return obsolete.makestore(self.ui, self)
1741 return obsolete.makestore(self.ui, self)
1737
1742
1738 @changelogcache()
1743 @changelogcache()
1739 def changelog(repo):
1744 def changelog(repo):
1740 # load dirstate before changelog to avoid race see issue6303
1745 # load dirstate before changelog to avoid race see issue6303
1741 repo.dirstate.prefetch_parents()
1746 repo.dirstate.prefetch_parents()
1742 return repo.store.changelog(
1747 return repo.store.changelog(
1743 txnutil.mayhavepending(repo.root),
1748 txnutil.mayhavepending(repo.root),
1744 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1749 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1745 )
1750 )
1746
1751
1747 @manifestlogcache()
1752 @manifestlogcache()
1748 def manifestlog(self):
1753 def manifestlog(self):
1749 return self.store.manifestlog(self, self._storenarrowmatch)
1754 return self.store.manifestlog(self, self._storenarrowmatch)
1750
1755
1751 @repofilecache(b'dirstate')
1756 @repofilecache(b'dirstate')
1752 def dirstate(self):
1757 def dirstate(self):
1753 return self._makedirstate()
1758 return self._makedirstate()
1754
1759
1755 def _makedirstate(self):
1760 def _makedirstate(self):
1756 """Extension point for wrapping the dirstate per-repo."""
1761 """Extension point for wrapping the dirstate per-repo."""
1757 sparsematchfn = None
1762 sparsematchfn = None
1758 if sparse.use_sparse(self):
1763 if sparse.use_sparse(self):
1759 sparsematchfn = lambda: sparse.matcher(self)
1764 sparsematchfn = lambda: sparse.matcher(self)
1760 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1765 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1761 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1766 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1762 use_dirstate_v2 = v2_req in self.requirements
1767 use_dirstate_v2 = v2_req in self.requirements
1763 use_tracked_hint = th in self.requirements
1768 use_tracked_hint = th in self.requirements
1764
1769
1765 return dirstate.dirstate(
1770 return dirstate.dirstate(
1766 self.vfs,
1771 self.vfs,
1767 self.ui,
1772 self.ui,
1768 self.root,
1773 self.root,
1769 self._dirstatevalidate,
1774 self._dirstatevalidate,
1770 sparsematchfn,
1775 sparsematchfn,
1771 self.nodeconstants,
1776 self.nodeconstants,
1772 use_dirstate_v2,
1777 use_dirstate_v2,
1773 use_tracked_hint=use_tracked_hint,
1778 use_tracked_hint=use_tracked_hint,
1774 )
1779 )
1775
1780
1776 def _dirstatevalidate(self, node):
1781 def _dirstatevalidate(self, node):
1777 try:
1782 try:
1778 self.changelog.rev(node)
1783 self.changelog.rev(node)
1779 return node
1784 return node
1780 except error.LookupError:
1785 except error.LookupError:
1781 if not self._dirstatevalidatewarned:
1786 if not self._dirstatevalidatewarned:
1782 self._dirstatevalidatewarned = True
1787 self._dirstatevalidatewarned = True
1783 self.ui.warn(
1788 self.ui.warn(
1784 _(b"warning: ignoring unknown working parent %s!\n")
1789 _(b"warning: ignoring unknown working parent %s!\n")
1785 % short(node)
1790 % short(node)
1786 )
1791 )
1787 return self.nullid
1792 return self.nullid
1788
1793
1789 @storecache(narrowspec.FILENAME)
1794 @storecache(narrowspec.FILENAME)
1790 def narrowpats(self):
1795 def narrowpats(self):
1791 """matcher patterns for this repository's narrowspec
1796 """matcher patterns for this repository's narrowspec
1792
1797
1793 A tuple of (includes, excludes).
1798 A tuple of (includes, excludes).
1794 """
1799 """
1795 return narrowspec.load(self)
1800 return narrowspec.load(self)
1796
1801
1797 @storecache(narrowspec.FILENAME)
1802 @storecache(narrowspec.FILENAME)
1798 def _storenarrowmatch(self):
1803 def _storenarrowmatch(self):
1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1804 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1800 return matchmod.always()
1805 return matchmod.always()
1801 include, exclude = self.narrowpats
1806 include, exclude = self.narrowpats
1802 return narrowspec.match(self.root, include=include, exclude=exclude)
1807 return narrowspec.match(self.root, include=include, exclude=exclude)
1803
1808
1804 @storecache(narrowspec.FILENAME)
1809 @storecache(narrowspec.FILENAME)
1805 def _narrowmatch(self):
1810 def _narrowmatch(self):
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1811 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1807 return matchmod.always()
1812 return matchmod.always()
1808 narrowspec.checkworkingcopynarrowspec(self)
1813 narrowspec.checkworkingcopynarrowspec(self)
1809 include, exclude = self.narrowpats
1814 include, exclude = self.narrowpats
1810 return narrowspec.match(self.root, include=include, exclude=exclude)
1815 return narrowspec.match(self.root, include=include, exclude=exclude)
1811
1816
1812 def narrowmatch(self, match=None, includeexact=False):
1817 def narrowmatch(self, match=None, includeexact=False):
1813 """matcher corresponding the the repo's narrowspec
1818 """matcher corresponding the the repo's narrowspec
1814
1819
1815 If `match` is given, then that will be intersected with the narrow
1820 If `match` is given, then that will be intersected with the narrow
1816 matcher.
1821 matcher.
1817
1822
1818 If `includeexact` is True, then any exact matches from `match` will
1823 If `includeexact` is True, then any exact matches from `match` will
1819 be included even if they're outside the narrowspec.
1824 be included even if they're outside the narrowspec.
1820 """
1825 """
1821 if match:
1826 if match:
1822 if includeexact and not self._narrowmatch.always():
1827 if includeexact and not self._narrowmatch.always():
1823 # do not exclude explicitly-specified paths so that they can
1828 # do not exclude explicitly-specified paths so that they can
1824 # be warned later on
1829 # be warned later on
1825 em = matchmod.exact(match.files())
1830 em = matchmod.exact(match.files())
1826 nm = matchmod.unionmatcher([self._narrowmatch, em])
1831 nm = matchmod.unionmatcher([self._narrowmatch, em])
1827 return matchmod.intersectmatchers(match, nm)
1832 return matchmod.intersectmatchers(match, nm)
1828 return matchmod.intersectmatchers(match, self._narrowmatch)
1833 return matchmod.intersectmatchers(match, self._narrowmatch)
1829 return self._narrowmatch
1834 return self._narrowmatch
1830
1835
1831 def setnarrowpats(self, newincludes, newexcludes):
1836 def setnarrowpats(self, newincludes, newexcludes):
1832 narrowspec.save(self, newincludes, newexcludes)
1837 narrowspec.save(self, newincludes, newexcludes)
1833 self.invalidate(clearfilecache=True)
1838 self.invalidate(clearfilecache=True)
1834
1839
1835 @unfilteredpropertycache
1840 @unfilteredpropertycache
1836 def _quick_access_changeid_null(self):
1841 def _quick_access_changeid_null(self):
1837 return {
1842 return {
1838 b'null': (nullrev, self.nodeconstants.nullid),
1843 b'null': (nullrev, self.nodeconstants.nullid),
1839 nullrev: (nullrev, self.nodeconstants.nullid),
1844 nullrev: (nullrev, self.nodeconstants.nullid),
1840 self.nullid: (nullrev, self.nullid),
1845 self.nullid: (nullrev, self.nullid),
1841 }
1846 }
1842
1847
1843 @unfilteredpropertycache
1848 @unfilteredpropertycache
1844 def _quick_access_changeid_wc(self):
1849 def _quick_access_changeid_wc(self):
1845 # also fast path access to the working copy parents
1850 # also fast path access to the working copy parents
1846 # however, only do it for filter that ensure wc is visible.
1851 # however, only do it for filter that ensure wc is visible.
1847 quick = self._quick_access_changeid_null.copy()
1852 quick = self._quick_access_changeid_null.copy()
1848 cl = self.unfiltered().changelog
1853 cl = self.unfiltered().changelog
1849 for node in self.dirstate.parents():
1854 for node in self.dirstate.parents():
1850 if node == self.nullid:
1855 if node == self.nullid:
1851 continue
1856 continue
1852 rev = cl.index.get_rev(node)
1857 rev = cl.index.get_rev(node)
1853 if rev is None:
1858 if rev is None:
1854 # unknown working copy parent case:
1859 # unknown working copy parent case:
1855 #
1860 #
1856 # skip the fast path and let higher code deal with it
1861 # skip the fast path and let higher code deal with it
1857 continue
1862 continue
1858 pair = (rev, node)
1863 pair = (rev, node)
1859 quick[rev] = pair
1864 quick[rev] = pair
1860 quick[node] = pair
1865 quick[node] = pair
1861 # also add the parents of the parents
1866 # also add the parents of the parents
1862 for r in cl.parentrevs(rev):
1867 for r in cl.parentrevs(rev):
1863 if r == nullrev:
1868 if r == nullrev:
1864 continue
1869 continue
1865 n = cl.node(r)
1870 n = cl.node(r)
1866 pair = (r, n)
1871 pair = (r, n)
1867 quick[r] = pair
1872 quick[r] = pair
1868 quick[n] = pair
1873 quick[n] = pair
1869 p1node = self.dirstate.p1()
1874 p1node = self.dirstate.p1()
1870 if p1node != self.nullid:
1875 if p1node != self.nullid:
1871 quick[b'.'] = quick[p1node]
1876 quick[b'.'] = quick[p1node]
1872 return quick
1877 return quick
1873
1878
1874 @unfilteredmethod
1879 @unfilteredmethod
1875 def _quick_access_changeid_invalidate(self):
1880 def _quick_access_changeid_invalidate(self):
1876 if '_quick_access_changeid_wc' in vars(self):
1881 if '_quick_access_changeid_wc' in vars(self):
1877 del self.__dict__['_quick_access_changeid_wc']
1882 del self.__dict__['_quick_access_changeid_wc']
1878
1883
1879 @property
1884 @property
1880 def _quick_access_changeid(self):
1885 def _quick_access_changeid(self):
1881 """an helper dictionnary for __getitem__ calls
1886 """an helper dictionnary for __getitem__ calls
1882
1887
1883 This contains a list of symbol we can recognise right away without
1888 This contains a list of symbol we can recognise right away without
1884 further processing.
1889 further processing.
1885 """
1890 """
1886 if self.filtername in repoview.filter_has_wc:
1891 if self.filtername in repoview.filter_has_wc:
1887 return self._quick_access_changeid_wc
1892 return self._quick_access_changeid_wc
1888 return self._quick_access_changeid_null
1893 return self._quick_access_changeid_null
1889
1894
1890 def __getitem__(self, changeid):
1895 def __getitem__(self, changeid):
1891 # dealing with special cases
1896 # dealing with special cases
1892 if changeid is None:
1897 if changeid is None:
1893 return context.workingctx(self)
1898 return context.workingctx(self)
1894 if isinstance(changeid, context.basectx):
1899 if isinstance(changeid, context.basectx):
1895 return changeid
1900 return changeid
1896
1901
1897 # dealing with multiple revisions
1902 # dealing with multiple revisions
1898 if isinstance(changeid, slice):
1903 if isinstance(changeid, slice):
1899 # wdirrev isn't contiguous so the slice shouldn't include it
1904 # wdirrev isn't contiguous so the slice shouldn't include it
1900 return [
1905 return [
1901 self[i]
1906 self[i]
1902 for i in range(*changeid.indices(len(self)))
1907 for i in range(*changeid.indices(len(self)))
1903 if i not in self.changelog.filteredrevs
1908 if i not in self.changelog.filteredrevs
1904 ]
1909 ]
1905
1910
1906 # dealing with some special values
1911 # dealing with some special values
1907 quick_access = self._quick_access_changeid.get(changeid)
1912 quick_access = self._quick_access_changeid.get(changeid)
1908 if quick_access is not None:
1913 if quick_access is not None:
1909 rev, node = quick_access
1914 rev, node = quick_access
1910 return context.changectx(self, rev, node, maybe_filtered=False)
1915 return context.changectx(self, rev, node, maybe_filtered=False)
1911 if changeid == b'tip':
1916 if changeid == b'tip':
1912 node = self.changelog.tip()
1917 node = self.changelog.tip()
1913 rev = self.changelog.rev(node)
1918 rev = self.changelog.rev(node)
1914 return context.changectx(self, rev, node)
1919 return context.changectx(self, rev, node)
1915
1920
1916 # dealing with arbitrary values
1921 # dealing with arbitrary values
1917 try:
1922 try:
1918 if isinstance(changeid, int):
1923 if isinstance(changeid, int):
1919 node = self.changelog.node(changeid)
1924 node = self.changelog.node(changeid)
1920 rev = changeid
1925 rev = changeid
1921 elif changeid == b'.':
1926 elif changeid == b'.':
1922 # this is a hack to delay/avoid loading obsmarkers
1927 # this is a hack to delay/avoid loading obsmarkers
1923 # when we know that '.' won't be hidden
1928 # when we know that '.' won't be hidden
1924 node = self.dirstate.p1()
1929 node = self.dirstate.p1()
1925 rev = self.unfiltered().changelog.rev(node)
1930 rev = self.unfiltered().changelog.rev(node)
1926 elif len(changeid) == self.nodeconstants.nodelen:
1931 elif len(changeid) == self.nodeconstants.nodelen:
1927 try:
1932 try:
1928 node = changeid
1933 node = changeid
1929 rev = self.changelog.rev(changeid)
1934 rev = self.changelog.rev(changeid)
1930 except error.FilteredLookupError:
1935 except error.FilteredLookupError:
1931 changeid = hex(changeid) # for the error message
1936 changeid = hex(changeid) # for the error message
1932 raise
1937 raise
1933 except LookupError:
1938 except LookupError:
1934 # check if it might have come from damaged dirstate
1939 # check if it might have come from damaged dirstate
1935 #
1940 #
1936 # XXX we could avoid the unfiltered if we had a recognizable
1941 # XXX we could avoid the unfiltered if we had a recognizable
1937 # exception for filtered changeset access
1942 # exception for filtered changeset access
1938 if (
1943 if (
1939 self.local()
1944 self.local()
1940 and changeid in self.unfiltered().dirstate.parents()
1945 and changeid in self.unfiltered().dirstate.parents()
1941 ):
1946 ):
1942 msg = _(b"working directory has unknown parent '%s'!")
1947 msg = _(b"working directory has unknown parent '%s'!")
1943 raise error.Abort(msg % short(changeid))
1948 raise error.Abort(msg % short(changeid))
1944 changeid = hex(changeid) # for the error message
1949 changeid = hex(changeid) # for the error message
1945 raise
1950 raise
1946
1951
1947 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1952 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1948 node = bin(changeid)
1953 node = bin(changeid)
1949 rev = self.changelog.rev(node)
1954 rev = self.changelog.rev(node)
1950 else:
1955 else:
1951 raise error.ProgrammingError(
1956 raise error.ProgrammingError(
1952 b"unsupported changeid '%s' of type %s"
1957 b"unsupported changeid '%s' of type %s"
1953 % (changeid, pycompat.bytestr(type(changeid)))
1958 % (changeid, pycompat.bytestr(type(changeid)))
1954 )
1959 )
1955
1960
1956 return context.changectx(self, rev, node)
1961 return context.changectx(self, rev, node)
1957
1962
1958 except (error.FilteredIndexError, error.FilteredLookupError):
1963 except (error.FilteredIndexError, error.FilteredLookupError):
1959 raise error.FilteredRepoLookupError(
1964 raise error.FilteredRepoLookupError(
1960 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1965 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1961 )
1966 )
1962 except (IndexError, LookupError):
1967 except (IndexError, LookupError):
1963 raise error.RepoLookupError(
1968 raise error.RepoLookupError(
1964 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1969 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1965 )
1970 )
1966 except error.WdirUnsupported:
1971 except error.WdirUnsupported:
1967 return context.workingctx(self)
1972 return context.workingctx(self)
1968
1973
1969 def __contains__(self, changeid):
1974 def __contains__(self, changeid):
1970 """True if the given changeid exists"""
1975 """True if the given changeid exists"""
1971 try:
1976 try:
1972 self[changeid]
1977 self[changeid]
1973 return True
1978 return True
1974 except error.RepoLookupError:
1979 except error.RepoLookupError:
1975 return False
1980 return False
1976
1981
1977 def __nonzero__(self):
1982 def __nonzero__(self):
1978 return True
1983 return True
1979
1984
1980 __bool__ = __nonzero__
1985 __bool__ = __nonzero__
1981
1986
1982 def __len__(self):
1987 def __len__(self):
1983 # no need to pay the cost of repoview.changelog
1988 # no need to pay the cost of repoview.changelog
1984 unfi = self.unfiltered()
1989 unfi = self.unfiltered()
1985 return len(unfi.changelog)
1990 return len(unfi.changelog)
1986
1991
1987 def __iter__(self):
1992 def __iter__(self):
1988 return iter(self.changelog)
1993 return iter(self.changelog)
1989
1994
1990 def revs(self, expr: bytes, *args):
1995 def revs(self, expr: bytes, *args):
1991 """Find revisions matching a revset.
1996 """Find revisions matching a revset.
1992
1997
1993 The revset is specified as a string ``expr`` that may contain
1998 The revset is specified as a string ``expr`` that may contain
1994 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1999 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1995
2000
1996 Revset aliases from the configuration are not expanded. To expand
2001 Revset aliases from the configuration are not expanded. To expand
1997 user aliases, consider calling ``scmutil.revrange()`` or
2002 user aliases, consider calling ``scmutil.revrange()`` or
1998 ``repo.anyrevs([expr], user=True)``.
2003 ``repo.anyrevs([expr], user=True)``.
1999
2004
2000 Returns a smartset.abstractsmartset, which is a list-like interface
2005 Returns a smartset.abstractsmartset, which is a list-like interface
2001 that contains integer revisions.
2006 that contains integer revisions.
2002 """
2007 """
2003 tree = revsetlang.spectree(expr, *args)
2008 tree = revsetlang.spectree(expr, *args)
2004 return revset.makematcher(tree)(self)
2009 return revset.makematcher(tree)(self)
2005
2010
2006 def set(self, expr: bytes, *args):
2011 def set(self, expr: bytes, *args):
2007 """Find revisions matching a revset and emit changectx instances.
2012 """Find revisions matching a revset and emit changectx instances.
2008
2013
2009 This is a convenience wrapper around ``revs()`` that iterates the
2014 This is a convenience wrapper around ``revs()`` that iterates the
2010 result and is a generator of changectx instances.
2015 result and is a generator of changectx instances.
2011
2016
2012 Revset aliases from the configuration are not expanded. To expand
2017 Revset aliases from the configuration are not expanded. To expand
2013 user aliases, consider calling ``scmutil.revrange()``.
2018 user aliases, consider calling ``scmutil.revrange()``.
2014 """
2019 """
2015 for r in self.revs(expr, *args):
2020 for r in self.revs(expr, *args):
2016 yield self[r]
2021 yield self[r]
2017
2022
2018 def anyrevs(self, specs: bytes, user=False, localalias=None):
2023 def anyrevs(self, specs: bytes, user=False, localalias=None):
2019 """Find revisions matching one of the given revsets.
2024 """Find revisions matching one of the given revsets.
2020
2025
2021 Revset aliases from the configuration are not expanded by default. To
2026 Revset aliases from the configuration are not expanded by default. To
2022 expand user aliases, specify ``user=True``. To provide some local
2027 expand user aliases, specify ``user=True``. To provide some local
2023 definitions overriding user aliases, set ``localalias`` to
2028 definitions overriding user aliases, set ``localalias`` to
2024 ``{name: definitionstring}``.
2029 ``{name: definitionstring}``.
2025 """
2030 """
2026 if specs == [b'null']:
2031 if specs == [b'null']:
2027 return revset.baseset([nullrev])
2032 return revset.baseset([nullrev])
2028 if specs == [b'.']:
2033 if specs == [b'.']:
2029 quick_data = self._quick_access_changeid.get(b'.')
2034 quick_data = self._quick_access_changeid.get(b'.')
2030 if quick_data is not None:
2035 if quick_data is not None:
2031 return revset.baseset([quick_data[0]])
2036 return revset.baseset([quick_data[0]])
2032 if user:
2037 if user:
2033 m = revset.matchany(
2038 m = revset.matchany(
2034 self.ui,
2039 self.ui,
2035 specs,
2040 specs,
2036 lookup=revset.lookupfn(self),
2041 lookup=revset.lookupfn(self),
2037 localalias=localalias,
2042 localalias=localalias,
2038 )
2043 )
2039 else:
2044 else:
2040 m = revset.matchany(None, specs, localalias=localalias)
2045 m = revset.matchany(None, specs, localalias=localalias)
2041 return m(self)
2046 return m(self)
2042
2047
2043 def url(self) -> bytes:
2048 def url(self) -> bytes:
2044 return b'file:' + self.root
2049 return b'file:' + self.root
2045
2050
2046 def hook(self, name, throw=False, **args):
2051 def hook(self, name, throw=False, **args):
2047 """Call a hook, passing this repo instance.
2052 """Call a hook, passing this repo instance.
2048
2053
2049 This a convenience method to aid invoking hooks. Extensions likely
2054 This a convenience method to aid invoking hooks. Extensions likely
2050 won't call this unless they have registered a custom hook or are
2055 won't call this unless they have registered a custom hook or are
2051 replacing code that is expected to call a hook.
2056 replacing code that is expected to call a hook.
2052 """
2057 """
2053 return hook.hook(self.ui, self, name, throw, **args)
2058 return hook.hook(self.ui, self, name, throw, **args)
2054
2059
2055 @filteredpropertycache
2060 @filteredpropertycache
2056 def _tagscache(self):
2061 def _tagscache(self):
2057 """Returns a tagscache object that contains various tags related
2062 """Returns a tagscache object that contains various tags related
2058 caches."""
2063 caches."""
2059
2064
2060 # This simplifies its cache management by having one decorated
2065 # This simplifies its cache management by having one decorated
2061 # function (this one) and the rest simply fetch things from it.
2066 # function (this one) and the rest simply fetch things from it.
2062 class tagscache:
2067 class tagscache:
2063 def __init__(self):
2068 def __init__(self):
2064 # These two define the set of tags for this repository. tags
2069 # These two define the set of tags for this repository. tags
2065 # maps tag name to node; tagtypes maps tag name to 'global' or
2070 # maps tag name to node; tagtypes maps tag name to 'global' or
2066 # 'local'. (Global tags are defined by .hgtags across all
2071 # 'local'. (Global tags are defined by .hgtags across all
2067 # heads, and local tags are defined in .hg/localtags.)
2072 # heads, and local tags are defined in .hg/localtags.)
2068 # They constitute the in-memory cache of tags.
2073 # They constitute the in-memory cache of tags.
2069 self.tags = self.tagtypes = None
2074 self.tags = self.tagtypes = None
2070
2075
2071 self.nodetagscache = self.tagslist = None
2076 self.nodetagscache = self.tagslist = None
2072
2077
2073 cache = tagscache()
2078 cache = tagscache()
2074 cache.tags, cache.tagtypes = self._findtags()
2079 cache.tags, cache.tagtypes = self._findtags()
2075
2080
2076 return cache
2081 return cache
2077
2082
2078 def tags(self):
2083 def tags(self):
2079 '''return a mapping of tag to node'''
2084 '''return a mapping of tag to node'''
2080 t = {}
2085 t = {}
2081 if self.changelog.filteredrevs:
2086 if self.changelog.filteredrevs:
2082 tags, tt = self._findtags()
2087 tags, tt = self._findtags()
2083 else:
2088 else:
2084 tags = self._tagscache.tags
2089 tags = self._tagscache.tags
2085 rev = self.changelog.rev
2090 rev = self.changelog.rev
2086 for k, v in tags.items():
2091 for k, v in tags.items():
2087 try:
2092 try:
2088 # ignore tags to unknown nodes
2093 # ignore tags to unknown nodes
2089 rev(v)
2094 rev(v)
2090 t[k] = v
2095 t[k] = v
2091 except (error.LookupError, ValueError):
2096 except (error.LookupError, ValueError):
2092 pass
2097 pass
2093 return t
2098 return t
2094
2099
2095 def _findtags(self):
2100 def _findtags(self):
2096 """Do the hard work of finding tags. Return a pair of dicts
2101 """Do the hard work of finding tags. Return a pair of dicts
2097 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2102 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2098 maps tag name to a string like \'global\' or \'local\'.
2103 maps tag name to a string like \'global\' or \'local\'.
2099 Subclasses or extensions are free to add their own tags, but
2104 Subclasses or extensions are free to add their own tags, but
2100 should be aware that the returned dicts will be retained for the
2105 should be aware that the returned dicts will be retained for the
2101 duration of the localrepo object."""
2106 duration of the localrepo object."""
2102
2107
2103 # XXX what tagtype should subclasses/extensions use? Currently
2108 # XXX what tagtype should subclasses/extensions use? Currently
2104 # mq and bookmarks add tags, but do not set the tagtype at all.
2109 # mq and bookmarks add tags, but do not set the tagtype at all.
2105 # Should each extension invent its own tag type? Should there
2110 # Should each extension invent its own tag type? Should there
2106 # be one tagtype for all such "virtual" tags? Or is the status
2111 # be one tagtype for all such "virtual" tags? Or is the status
2107 # quo fine?
2112 # quo fine?
2108
2113
2109 # map tag name to (node, hist)
2114 # map tag name to (node, hist)
2110 alltags = tagsmod.findglobaltags(self.ui, self)
2115 alltags = tagsmod.findglobaltags(self.ui, self)
2111 # map tag name to tag type
2116 # map tag name to tag type
2112 tagtypes = {tag: b'global' for tag in alltags}
2117 tagtypes = {tag: b'global' for tag in alltags}
2113
2118
2114 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2119 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2115
2120
2116 # Build the return dicts. Have to re-encode tag names because
2121 # Build the return dicts. Have to re-encode tag names because
2117 # the tags module always uses UTF-8 (in order not to lose info
2122 # the tags module always uses UTF-8 (in order not to lose info
2118 # writing to the cache), but the rest of Mercurial wants them in
2123 # writing to the cache), but the rest of Mercurial wants them in
2119 # local encoding.
2124 # local encoding.
2120 tags = {}
2125 tags = {}
2121 for (name, (node, hist)) in alltags.items():
2126 for (name, (node, hist)) in alltags.items():
2122 if node != self.nullid:
2127 if node != self.nullid:
2123 tags[encoding.tolocal(name)] = node
2128 tags[encoding.tolocal(name)] = node
2124 tags[b'tip'] = self.changelog.tip()
2129 tags[b'tip'] = self.changelog.tip()
2125 tagtypes = {
2130 tagtypes = {
2126 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2131 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2127 }
2132 }
2128 return (tags, tagtypes)
2133 return (tags, tagtypes)
2129
2134
2130 def tagtype(self, tagname):
2135 def tagtype(self, tagname):
2131 """
2136 """
2132 return the type of the given tag. result can be:
2137 return the type of the given tag. result can be:
2133
2138
2134 'local' : a local tag
2139 'local' : a local tag
2135 'global' : a global tag
2140 'global' : a global tag
2136 None : tag does not exist
2141 None : tag does not exist
2137 """
2142 """
2138
2143
2139 return self._tagscache.tagtypes.get(tagname)
2144 return self._tagscache.tagtypes.get(tagname)
2140
2145
2141 def tagslist(self):
2146 def tagslist(self):
2142 '''return a list of tags ordered by revision'''
2147 '''return a list of tags ordered by revision'''
2143 if not self._tagscache.tagslist:
2148 if not self._tagscache.tagslist:
2144 l = []
2149 l = []
2145 for t, n in self.tags().items():
2150 for t, n in self.tags().items():
2146 l.append((self.changelog.rev(n), t, n))
2151 l.append((self.changelog.rev(n), t, n))
2147 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2152 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2148
2153
2149 return self._tagscache.tagslist
2154 return self._tagscache.tagslist
2150
2155
2151 def nodetags(self, node):
2156 def nodetags(self, node):
2152 '''return the tags associated with a node'''
2157 '''return the tags associated with a node'''
2153 if not self._tagscache.nodetagscache:
2158 if not self._tagscache.nodetagscache:
2154 nodetagscache = {}
2159 nodetagscache = {}
2155 for t, n in self._tagscache.tags.items():
2160 for t, n in self._tagscache.tags.items():
2156 nodetagscache.setdefault(n, []).append(t)
2161 nodetagscache.setdefault(n, []).append(t)
2157 for tags in nodetagscache.values():
2162 for tags in nodetagscache.values():
2158 tags.sort()
2163 tags.sort()
2159 self._tagscache.nodetagscache = nodetagscache
2164 self._tagscache.nodetagscache = nodetagscache
2160 return self._tagscache.nodetagscache.get(node, [])
2165 return self._tagscache.nodetagscache.get(node, [])
2161
2166
2162 def nodebookmarks(self, node):
2167 def nodebookmarks(self, node):
2163 """return the list of bookmarks pointing to the specified node"""
2168 """return the list of bookmarks pointing to the specified node"""
2164 return self._bookmarks.names(node)
2169 return self._bookmarks.names(node)
2165
2170
2166 def branchmap(self):
2171 def branchmap(self):
2167 """returns a dictionary {branch: [branchheads]} with branchheads
2172 """returns a dictionary {branch: [branchheads]} with branchheads
2168 ordered by increasing revision number"""
2173 ordered by increasing revision number"""
2169 return self._branchcaches[self]
2174 return self._branchcaches[self]
2170
2175
2171 @unfilteredmethod
2176 @unfilteredmethod
2172 def revbranchcache(self):
2177 def revbranchcache(self):
2173 if not self._revbranchcache:
2178 if not self._revbranchcache:
2174 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2179 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2175 return self._revbranchcache
2180 return self._revbranchcache
2176
2181
2177 def register_changeset(self, rev, changelogrevision):
2182 def register_changeset(self, rev, changelogrevision):
2178 self.revbranchcache().setdata(rev, changelogrevision)
2183 self.revbranchcache().setdata(rev, changelogrevision)
2179
2184
2180 def branchtip(self, branch, ignoremissing=False):
2185 def branchtip(self, branch, ignoremissing=False):
2181 """return the tip node for a given branch
2186 """return the tip node for a given branch
2182
2187
2183 If ignoremissing is True, then this method will not raise an error.
2188 If ignoremissing is True, then this method will not raise an error.
2184 This is helpful for callers that only expect None for a missing branch
2189 This is helpful for callers that only expect None for a missing branch
2185 (e.g. namespace).
2190 (e.g. namespace).
2186
2191
2187 """
2192 """
2188 try:
2193 try:
2189 return self.branchmap().branchtip(branch)
2194 return self.branchmap().branchtip(branch)
2190 except KeyError:
2195 except KeyError:
2191 if not ignoremissing:
2196 if not ignoremissing:
2192 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2197 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2193 else:
2198 else:
2194 pass
2199 pass
2195
2200
2196 def lookup(self, key):
2201 def lookup(self, key):
2197 node = scmutil.revsymbol(self, key).node()
2202 node = scmutil.revsymbol(self, key).node()
2198 if node is None:
2203 if node is None:
2199 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2204 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2200 return node
2205 return node
2201
2206
2202 def lookupbranch(self, key):
2207 def lookupbranch(self, key):
2203 if self.branchmap().hasbranch(key):
2208 if self.branchmap().hasbranch(key):
2204 return key
2209 return key
2205
2210
2206 return scmutil.revsymbol(self, key).branch()
2211 return scmutil.revsymbol(self, key).branch()
2207
2212
2208 def known(self, nodes):
2213 def known(self, nodes):
2209 cl = self.changelog
2214 cl = self.changelog
2210 get_rev = cl.index.get_rev
2215 get_rev = cl.index.get_rev
2211 filtered = cl.filteredrevs
2216 filtered = cl.filteredrevs
2212 result = []
2217 result = []
2213 for n in nodes:
2218 for n in nodes:
2214 r = get_rev(n)
2219 r = get_rev(n)
2215 resp = not (r is None or r in filtered)
2220 resp = not (r is None or r in filtered)
2216 result.append(resp)
2221 result.append(resp)
2217 return result
2222 return result
2218
2223
2219 def local(self):
2224 def local(self):
2220 return self
2225 return self
2221
2226
2222 def publishing(self):
2227 def publishing(self):
2223 # it's safe (and desirable) to trust the publish flag unconditionally
2228 # it's safe (and desirable) to trust the publish flag unconditionally
2224 # so that we don't finalize changes shared between users via ssh or nfs
2229 # so that we don't finalize changes shared between users via ssh or nfs
2225 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2230 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2226
2231
2227 def cancopy(self):
2232 def cancopy(self):
2228 # so statichttprepo's override of local() works
2233 # so statichttprepo's override of local() works
2229 if not self.local():
2234 if not self.local():
2230 return False
2235 return False
2231 if not self.publishing():
2236 if not self.publishing():
2232 return True
2237 return True
2233 # if publishing we can't copy if there is filtered content
2238 # if publishing we can't copy if there is filtered content
2234 return not self.filtered(b'visible').changelog.filteredrevs
2239 return not self.filtered(b'visible').changelog.filteredrevs
2235
2240
2236 def shared(self):
2241 def shared(self):
2237 '''the type of shared repository (None if not shared)'''
2242 '''the type of shared repository (None if not shared)'''
2238 if self.sharedpath != self.path:
2243 if self.sharedpath != self.path:
2239 return b'store'
2244 return b'store'
2240 return None
2245 return None
2241
2246
2242 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2247 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2243 return self.vfs.reljoin(self.root, f, *insidef)
2248 return self.vfs.reljoin(self.root, f, *insidef)
2244
2249
2245 def setparents(self, p1, p2=None):
2250 def setparents(self, p1, p2=None):
2246 if p2 is None:
2251 if p2 is None:
2247 p2 = self.nullid
2252 p2 = self.nullid
2248 self[None].setparents(p1, p2)
2253 self[None].setparents(p1, p2)
2249 self._quick_access_changeid_invalidate()
2254 self._quick_access_changeid_invalidate()
2250
2255
2251 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2256 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2252 """changeid must be a changeset revision, if specified.
2257 """changeid must be a changeset revision, if specified.
2253 fileid can be a file revision or node."""
2258 fileid can be a file revision or node."""
2254 return context.filectx(
2259 return context.filectx(
2255 self, path, changeid, fileid, changectx=changectx
2260 self, path, changeid, fileid, changectx=changectx
2256 )
2261 )
2257
2262
2258 def getcwd(self) -> bytes:
2263 def getcwd(self) -> bytes:
2259 return self.dirstate.getcwd()
2264 return self.dirstate.getcwd()
2260
2265
2261 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2266 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2262 return self.dirstate.pathto(f, cwd)
2267 return self.dirstate.pathto(f, cwd)
2263
2268
2264 def _loadfilter(self, filter):
2269 def _loadfilter(self, filter):
2265 if filter not in self._filterpats:
2270 if filter not in self._filterpats:
2266 l = []
2271 l = []
2267 for pat, cmd in self.ui.configitems(filter):
2272 for pat, cmd in self.ui.configitems(filter):
2268 if cmd == b'!':
2273 if cmd == b'!':
2269 continue
2274 continue
2270 mf = matchmod.match(self.root, b'', [pat])
2275 mf = matchmod.match(self.root, b'', [pat])
2271 fn = None
2276 fn = None
2272 params = cmd
2277 params = cmd
2273 for name, filterfn in self._datafilters.items():
2278 for name, filterfn in self._datafilters.items():
2274 if cmd.startswith(name):
2279 if cmd.startswith(name):
2275 fn = filterfn
2280 fn = filterfn
2276 params = cmd[len(name) :].lstrip()
2281 params = cmd[len(name) :].lstrip()
2277 break
2282 break
2278 if not fn:
2283 if not fn:
2279 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2284 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2280 fn.__name__ = 'commandfilter'
2285 fn.__name__ = 'commandfilter'
2281 # Wrap old filters not supporting keyword arguments
2286 # Wrap old filters not supporting keyword arguments
2282 if not pycompat.getargspec(fn)[2]:
2287 if not pycompat.getargspec(fn)[2]:
2283 oldfn = fn
2288 oldfn = fn
2284 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2289 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2285 fn.__name__ = 'compat-' + oldfn.__name__
2290 fn.__name__ = 'compat-' + oldfn.__name__
2286 l.append((mf, fn, params))
2291 l.append((mf, fn, params))
2287 self._filterpats[filter] = l
2292 self._filterpats[filter] = l
2288 return self._filterpats[filter]
2293 return self._filterpats[filter]
2289
2294
2290 def _filter(self, filterpats, filename, data):
2295 def _filter(self, filterpats, filename, data):
2291 for mf, fn, cmd in filterpats:
2296 for mf, fn, cmd in filterpats:
2292 if mf(filename):
2297 if mf(filename):
2293 self.ui.debug(
2298 self.ui.debug(
2294 b"filtering %s through %s\n"
2299 b"filtering %s through %s\n"
2295 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2300 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2296 )
2301 )
2297 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2302 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2298 break
2303 break
2299
2304
2300 return data
2305 return data
2301
2306
2302 @unfilteredpropertycache
2307 @unfilteredpropertycache
2303 def _encodefilterpats(self):
2308 def _encodefilterpats(self):
2304 return self._loadfilter(b'encode')
2309 return self._loadfilter(b'encode')
2305
2310
2306 @unfilteredpropertycache
2311 @unfilteredpropertycache
2307 def _decodefilterpats(self):
2312 def _decodefilterpats(self):
2308 return self._loadfilter(b'decode')
2313 return self._loadfilter(b'decode')
2309
2314
2310 def adddatafilter(self, name, filter):
2315 def adddatafilter(self, name, filter):
2311 self._datafilters[name] = filter
2316 self._datafilters[name] = filter
2312
2317
2313 def wread(self, filename: bytes) -> bytes:
2318 def wread(self, filename: bytes) -> bytes:
2314 if self.wvfs.islink(filename):
2319 if self.wvfs.islink(filename):
2315 data = self.wvfs.readlink(filename)
2320 data = self.wvfs.readlink(filename)
2316 else:
2321 else:
2317 data = self.wvfs.read(filename)
2322 data = self.wvfs.read(filename)
2318 return self._filter(self._encodefilterpats, filename, data)
2323 return self._filter(self._encodefilterpats, filename, data)
2319
2324
2320 def wwrite(
2325 def wwrite(
2321 self,
2326 self,
2322 filename: bytes,
2327 filename: bytes,
2323 data: bytes,
2328 data: bytes,
2324 flags: bytes,
2329 flags: bytes,
2325 backgroundclose=False,
2330 backgroundclose=False,
2326 **kwargs
2331 **kwargs
2327 ) -> int:
2332 ) -> int:
2328 """write ``data`` into ``filename`` in the working directory
2333 """write ``data`` into ``filename`` in the working directory
2329
2334
2330 This returns length of written (maybe decoded) data.
2335 This returns length of written (maybe decoded) data.
2331 """
2336 """
2332 data = self._filter(self._decodefilterpats, filename, data)
2337 data = self._filter(self._decodefilterpats, filename, data)
2333 if b'l' in flags:
2338 if b'l' in flags:
2334 self.wvfs.symlink(data, filename)
2339 self.wvfs.symlink(data, filename)
2335 else:
2340 else:
2336 self.wvfs.write(
2341 self.wvfs.write(
2337 filename, data, backgroundclose=backgroundclose, **kwargs
2342 filename, data, backgroundclose=backgroundclose, **kwargs
2338 )
2343 )
2339 if b'x' in flags:
2344 if b'x' in flags:
2340 self.wvfs.setflags(filename, False, True)
2345 self.wvfs.setflags(filename, False, True)
2341 else:
2346 else:
2342 self.wvfs.setflags(filename, False, False)
2347 self.wvfs.setflags(filename, False, False)
2343 return len(data)
2348 return len(data)
2344
2349
2345 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2350 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2346 return self._filter(self._decodefilterpats, filename, data)
2351 return self._filter(self._decodefilterpats, filename, data)
2347
2352
2348 def currenttransaction(self):
2353 def currenttransaction(self):
2349 """return the current transaction or None if non exists"""
2354 """return the current transaction or None if non exists"""
2350 if self._transref:
2355 if self._transref:
2351 tr = self._transref()
2356 tr = self._transref()
2352 else:
2357 else:
2353 tr = None
2358 tr = None
2354
2359
2355 if tr and tr.running():
2360 if tr and tr.running():
2356 return tr
2361 return tr
2357 return None
2362 return None
2358
2363
2359 def transaction(self, desc, report=None):
2364 def transaction(self, desc, report=None):
2360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2365 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2361 b'devel', b'check-locks'
2366 b'devel', b'check-locks'
2362 ):
2367 ):
2363 if self._currentlock(self._lockref) is None:
2368 if self._currentlock(self._lockref) is None:
2364 raise error.ProgrammingError(b'transaction requires locking')
2369 raise error.ProgrammingError(b'transaction requires locking')
2365 tr = self.currenttransaction()
2370 tr = self.currenttransaction()
2366 if tr is not None:
2371 if tr is not None:
2367 return tr.nest(name=desc)
2372 return tr.nest(name=desc)
2368
2373
2369 # abort here if the journal already exists
2374 # abort here if the journal already exists
2370 if self.svfs.exists(b"journal"):
2375 if self.svfs.exists(b"journal"):
2371 raise error.RepoError(
2376 raise error.RepoError(
2372 _(b"abandoned transaction found"),
2377 _(b"abandoned transaction found"),
2373 hint=_(b"run 'hg recover' to clean up transaction"),
2378 hint=_(b"run 'hg recover' to clean up transaction"),
2374 )
2379 )
2375
2380
2376 idbase = b"%.40f#%f" % (random.random(), time.time())
2381 idbase = b"%.40f#%f" % (random.random(), time.time())
2377 ha = hex(hashutil.sha1(idbase).digest())
2382 ha = hex(hashutil.sha1(idbase).digest())
2378 txnid = b'TXN:' + ha
2383 txnid = b'TXN:' + ha
2379 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2384 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2380
2385
2381 self._writejournal(desc)
2386 self._writejournal(desc)
2382 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2387 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2383 if report:
2388 if report:
2384 rp = report
2389 rp = report
2385 else:
2390 else:
2386 rp = self.ui.warn
2391 rp = self.ui.warn
2387 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2392 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2388 # we must avoid cyclic reference between repo and transaction.
2393 # we must avoid cyclic reference between repo and transaction.
2389 reporef = weakref.ref(self)
2394 reporef = weakref.ref(self)
2390 # Code to track tag movement
2395 # Code to track tag movement
2391 #
2396 #
2392 # Since tags are all handled as file content, it is actually quite hard
2397 # Since tags are all handled as file content, it is actually quite hard
2393 # to track these movement from a code perspective. So we fallback to a
2398 # to track these movement from a code perspective. So we fallback to a
2394 # tracking at the repository level. One could envision to track changes
2399 # tracking at the repository level. One could envision to track changes
2395 # to the '.hgtags' file through changegroup apply but that fails to
2400 # to the '.hgtags' file through changegroup apply but that fails to
2396 # cope with case where transaction expose new heads without changegroup
2401 # cope with case where transaction expose new heads without changegroup
2397 # being involved (eg: phase movement).
2402 # being involved (eg: phase movement).
2398 #
2403 #
2399 # For now, We gate the feature behind a flag since this likely comes
2404 # For now, We gate the feature behind a flag since this likely comes
2400 # with performance impacts. The current code run more often than needed
2405 # with performance impacts. The current code run more often than needed
2401 # and do not use caches as much as it could. The current focus is on
2406 # and do not use caches as much as it could. The current focus is on
2402 # the behavior of the feature so we disable it by default. The flag
2407 # the behavior of the feature so we disable it by default. The flag
2403 # will be removed when we are happy with the performance impact.
2408 # will be removed when we are happy with the performance impact.
2404 #
2409 #
2405 # Once this feature is no longer experimental move the following
2410 # Once this feature is no longer experimental move the following
2406 # documentation to the appropriate help section:
2411 # documentation to the appropriate help section:
2407 #
2412 #
2408 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2413 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2409 # tags (new or changed or deleted tags). In addition the details of
2414 # tags (new or changed or deleted tags). In addition the details of
2410 # these changes are made available in a file at:
2415 # these changes are made available in a file at:
2411 # ``REPOROOT/.hg/changes/tags.changes``.
2416 # ``REPOROOT/.hg/changes/tags.changes``.
2412 # Make sure you check for HG_TAG_MOVED before reading that file as it
2417 # Make sure you check for HG_TAG_MOVED before reading that file as it
2413 # might exist from a previous transaction even if no tag were touched
2418 # might exist from a previous transaction even if no tag were touched
2414 # in this one. Changes are recorded in a line base format::
2419 # in this one. Changes are recorded in a line base format::
2415 #
2420 #
2416 # <action> <hex-node> <tag-name>\n
2421 # <action> <hex-node> <tag-name>\n
2417 #
2422 #
2418 # Actions are defined as follow:
2423 # Actions are defined as follow:
2419 # "-R": tag is removed,
2424 # "-R": tag is removed,
2420 # "+A": tag is added,
2425 # "+A": tag is added,
2421 # "-M": tag is moved (old value),
2426 # "-M": tag is moved (old value),
2422 # "+M": tag is moved (new value),
2427 # "+M": tag is moved (new value),
2423 tracktags = lambda x: None
2428 tracktags = lambda x: None
2424 # experimental config: experimental.hook-track-tags
2429 # experimental config: experimental.hook-track-tags
2425 shouldtracktags = self.ui.configbool(
2430 shouldtracktags = self.ui.configbool(
2426 b'experimental', b'hook-track-tags'
2431 b'experimental', b'hook-track-tags'
2427 )
2432 )
2428 if desc != b'strip' and shouldtracktags:
2433 if desc != b'strip' and shouldtracktags:
2429 oldheads = self.changelog.headrevs()
2434 oldheads = self.changelog.headrevs()
2430
2435
2431 def tracktags(tr2):
2436 def tracktags(tr2):
2432 repo = reporef()
2437 repo = reporef()
2433 assert repo is not None # help pytype
2438 assert repo is not None # help pytype
2434 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2439 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2435 newheads = repo.changelog.headrevs()
2440 newheads = repo.changelog.headrevs()
2436 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2441 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2437 # notes: we compare lists here.
2442 # notes: we compare lists here.
2438 # As we do it only once buiding set would not be cheaper
2443 # As we do it only once buiding set would not be cheaper
2439 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2444 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2440 if changes:
2445 if changes:
2441 tr2.hookargs[b'tag_moved'] = b'1'
2446 tr2.hookargs[b'tag_moved'] = b'1'
2442 with repo.vfs(
2447 with repo.vfs(
2443 b'changes/tags.changes', b'w', atomictemp=True
2448 b'changes/tags.changes', b'w', atomictemp=True
2444 ) as changesfile:
2449 ) as changesfile:
2445 # note: we do not register the file to the transaction
2450 # note: we do not register the file to the transaction
2446 # because we needs it to still exist on the transaction
2451 # because we needs it to still exist on the transaction
2447 # is close (for txnclose hooks)
2452 # is close (for txnclose hooks)
2448 tagsmod.writediff(changesfile, changes)
2453 tagsmod.writediff(changesfile, changes)
2449
2454
2450 def validate(tr2):
2455 def validate(tr2):
2451 """will run pre-closing hooks"""
2456 """will run pre-closing hooks"""
2452 # XXX the transaction API is a bit lacking here so we take a hacky
2457 # XXX the transaction API is a bit lacking here so we take a hacky
2453 # path for now
2458 # path for now
2454 #
2459 #
2455 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2460 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2456 # dict is copied before these run. In addition we needs the data
2461 # dict is copied before these run. In addition we needs the data
2457 # available to in memory hooks too.
2462 # available to in memory hooks too.
2458 #
2463 #
2459 # Moreover, we also need to make sure this runs before txnclose
2464 # Moreover, we also need to make sure this runs before txnclose
2460 # hooks and there is no "pending" mechanism that would execute
2465 # hooks and there is no "pending" mechanism that would execute
2461 # logic only if hooks are about to run.
2466 # logic only if hooks are about to run.
2462 #
2467 #
2463 # Fixing this limitation of the transaction is also needed to track
2468 # Fixing this limitation of the transaction is also needed to track
2464 # other families of changes (bookmarks, phases, obsolescence).
2469 # other families of changes (bookmarks, phases, obsolescence).
2465 #
2470 #
2466 # This will have to be fixed before we remove the experimental
2471 # This will have to be fixed before we remove the experimental
2467 # gating.
2472 # gating.
2468 tracktags(tr2)
2473 tracktags(tr2)
2469 repo = reporef()
2474 repo = reporef()
2470 assert repo is not None # help pytype
2475 assert repo is not None # help pytype
2471
2476
2472 singleheadopt = (b'experimental', b'single-head-per-branch')
2477 singleheadopt = (b'experimental', b'single-head-per-branch')
2473 singlehead = repo.ui.configbool(*singleheadopt)
2478 singlehead = repo.ui.configbool(*singleheadopt)
2474 if singlehead:
2479 if singlehead:
2475 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2480 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2476 accountclosed = singleheadsub.get(
2481 accountclosed = singleheadsub.get(
2477 b"account-closed-heads", False
2482 b"account-closed-heads", False
2478 )
2483 )
2479 if singleheadsub.get(b"public-changes-only", False):
2484 if singleheadsub.get(b"public-changes-only", False):
2480 filtername = b"immutable"
2485 filtername = b"immutable"
2481 else:
2486 else:
2482 filtername = b"visible"
2487 filtername = b"visible"
2483 scmutil.enforcesinglehead(
2488 scmutil.enforcesinglehead(
2484 repo, tr2, desc, accountclosed, filtername
2489 repo, tr2, desc, accountclosed, filtername
2485 )
2490 )
2486 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2491 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2487 for name, (old, new) in sorted(
2492 for name, (old, new) in sorted(
2488 tr.changes[b'bookmarks'].items()
2493 tr.changes[b'bookmarks'].items()
2489 ):
2494 ):
2490 args = tr.hookargs.copy()
2495 args = tr.hookargs.copy()
2491 args.update(bookmarks.preparehookargs(name, old, new))
2496 args.update(bookmarks.preparehookargs(name, old, new))
2492 repo.hook(
2497 repo.hook(
2493 b'pretxnclose-bookmark',
2498 b'pretxnclose-bookmark',
2494 throw=True,
2499 throw=True,
2495 **pycompat.strkwargs(args)
2500 **pycompat.strkwargs(args)
2496 )
2501 )
2497 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2502 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2498 cl = repo.unfiltered().changelog
2503 cl = repo.unfiltered().changelog
2499 for revs, (old, new) in tr.changes[b'phases']:
2504 for revs, (old, new) in tr.changes[b'phases']:
2500 for rev in revs:
2505 for rev in revs:
2501 args = tr.hookargs.copy()
2506 args = tr.hookargs.copy()
2502 node = hex(cl.node(rev))
2507 node = hex(cl.node(rev))
2503 args.update(phases.preparehookargs(node, old, new))
2508 args.update(phases.preparehookargs(node, old, new))
2504 repo.hook(
2509 repo.hook(
2505 b'pretxnclose-phase',
2510 b'pretxnclose-phase',
2506 throw=True,
2511 throw=True,
2507 **pycompat.strkwargs(args)
2512 **pycompat.strkwargs(args)
2508 )
2513 )
2509
2514
2510 repo.hook(
2515 repo.hook(
2511 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2516 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2512 )
2517 )
2513
2518
2514 def releasefn(tr, success):
2519 def releasefn(tr, success):
2515 repo = reporef()
2520 repo = reporef()
2516 if repo is None:
2521 if repo is None:
2517 # If the repo has been GC'd (and this release function is being
2522 # If the repo has been GC'd (and this release function is being
2518 # called from transaction.__del__), there's not much we can do,
2523 # called from transaction.__del__), there's not much we can do,
2519 # so just leave the unfinished transaction there and let the
2524 # so just leave the unfinished transaction there and let the
2520 # user run `hg recover`.
2525 # user run `hg recover`.
2521 return
2526 return
2522 if success:
2527 if success:
2523 # this should be explicitly invoked here, because
2528 # this should be explicitly invoked here, because
2524 # in-memory changes aren't written out at closing
2529 # in-memory changes aren't written out at closing
2525 # transaction, if tr.addfilegenerator (via
2530 # transaction, if tr.addfilegenerator (via
2526 # dirstate.write or so) isn't invoked while
2531 # dirstate.write or so) isn't invoked while
2527 # transaction running
2532 # transaction running
2528 repo.dirstate.write(None)
2533 repo.dirstate.write(None)
2529 else:
2534 else:
2530 # discard all changes (including ones already written
2535 # discard all changes (including ones already written
2531 # out) in this transaction
2536 # out) in this transaction
2532 narrowspec.restorebackup(self, b'journal.narrowspec')
2537 narrowspec.restorebackup(self, b'journal.narrowspec')
2533 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2538 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2534 repo.dirstate.restorebackup(None, b'journal.dirstate')
2539 repo.dirstate.restorebackup(None, b'journal.dirstate')
2535
2540
2536 repo.invalidate(clearfilecache=True)
2541 repo.invalidate(clearfilecache=True)
2537
2542
2538 tr = transaction.transaction(
2543 tr = transaction.transaction(
2539 rp,
2544 rp,
2540 self.svfs,
2545 self.svfs,
2541 vfsmap,
2546 vfsmap,
2542 b"journal",
2547 b"journal",
2543 b"undo",
2548 b"undo",
2544 aftertrans(renames),
2549 aftertrans(renames),
2545 self.store.createmode,
2550 self.store.createmode,
2546 validator=validate,
2551 validator=validate,
2547 releasefn=releasefn,
2552 releasefn=releasefn,
2548 checkambigfiles=_cachedfiles,
2553 checkambigfiles=_cachedfiles,
2549 name=desc,
2554 name=desc,
2550 )
2555 )
2551 tr.changes[b'origrepolen'] = len(self)
2556 tr.changes[b'origrepolen'] = len(self)
2552 tr.changes[b'obsmarkers'] = set()
2557 tr.changes[b'obsmarkers'] = set()
2553 tr.changes[b'phases'] = []
2558 tr.changes[b'phases'] = []
2554 tr.changes[b'bookmarks'] = {}
2559 tr.changes[b'bookmarks'] = {}
2555
2560
2556 tr.hookargs[b'txnid'] = txnid
2561 tr.hookargs[b'txnid'] = txnid
2557 tr.hookargs[b'txnname'] = desc
2562 tr.hookargs[b'txnname'] = desc
2558 tr.hookargs[b'changes'] = tr.changes
2563 tr.hookargs[b'changes'] = tr.changes
2559 # note: writing the fncache only during finalize mean that the file is
2564 # note: writing the fncache only during finalize mean that the file is
2560 # outdated when running hooks. As fncache is used for streaming clone,
2565 # outdated when running hooks. As fncache is used for streaming clone,
2561 # this is not expected to break anything that happen during the hooks.
2566 # this is not expected to break anything that happen during the hooks.
2562 tr.addfinalize(b'flush-fncache', self.store.write)
2567 tr.addfinalize(b'flush-fncache', self.store.write)
2563
2568
2564 def txnclosehook(tr2):
2569 def txnclosehook(tr2):
2565 """To be run if transaction is successful, will schedule a hook run"""
2570 """To be run if transaction is successful, will schedule a hook run"""
2566 # Don't reference tr2 in hook() so we don't hold a reference.
2571 # Don't reference tr2 in hook() so we don't hold a reference.
2567 # This reduces memory consumption when there are multiple
2572 # This reduces memory consumption when there are multiple
2568 # transactions per lock. This can likely go away if issue5045
2573 # transactions per lock. This can likely go away if issue5045
2569 # fixes the function accumulation.
2574 # fixes the function accumulation.
2570 hookargs = tr2.hookargs
2575 hookargs = tr2.hookargs
2571
2576
2572 def hookfunc(unused_success):
2577 def hookfunc(unused_success):
2573 repo = reporef()
2578 repo = reporef()
2574 assert repo is not None # help pytype
2579 assert repo is not None # help pytype
2575
2580
2576 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2581 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2577 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2582 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2578 for name, (old, new) in bmchanges:
2583 for name, (old, new) in bmchanges:
2579 args = tr.hookargs.copy()
2584 args = tr.hookargs.copy()
2580 args.update(bookmarks.preparehookargs(name, old, new))
2585 args.update(bookmarks.preparehookargs(name, old, new))
2581 repo.hook(
2586 repo.hook(
2582 b'txnclose-bookmark',
2587 b'txnclose-bookmark',
2583 throw=False,
2588 throw=False,
2584 **pycompat.strkwargs(args)
2589 **pycompat.strkwargs(args)
2585 )
2590 )
2586
2591
2587 if hook.hashook(repo.ui, b'txnclose-phase'):
2592 if hook.hashook(repo.ui, b'txnclose-phase'):
2588 cl = repo.unfiltered().changelog
2593 cl = repo.unfiltered().changelog
2589 phasemv = sorted(
2594 phasemv = sorted(
2590 tr.changes[b'phases'], key=lambda r: r[0][0]
2595 tr.changes[b'phases'], key=lambda r: r[0][0]
2591 )
2596 )
2592 for revs, (old, new) in phasemv:
2597 for revs, (old, new) in phasemv:
2593 for rev in revs:
2598 for rev in revs:
2594 args = tr.hookargs.copy()
2599 args = tr.hookargs.copy()
2595 node = hex(cl.node(rev))
2600 node = hex(cl.node(rev))
2596 args.update(phases.preparehookargs(node, old, new))
2601 args.update(phases.preparehookargs(node, old, new))
2597 repo.hook(
2602 repo.hook(
2598 b'txnclose-phase',
2603 b'txnclose-phase',
2599 throw=False,
2604 throw=False,
2600 **pycompat.strkwargs(args)
2605 **pycompat.strkwargs(args)
2601 )
2606 )
2602
2607
2603 repo.hook(
2608 repo.hook(
2604 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2609 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2605 )
2610 )
2606
2611
2607 repo = reporef()
2612 repo = reporef()
2608 assert repo is not None # help pytype
2613 assert repo is not None # help pytype
2609 repo._afterlock(hookfunc)
2614 repo._afterlock(hookfunc)
2610
2615
2611 tr.addfinalize(b'txnclose-hook', txnclosehook)
2616 tr.addfinalize(b'txnclose-hook', txnclosehook)
2612 # Include a leading "-" to make it happen before the transaction summary
2617 # Include a leading "-" to make it happen before the transaction summary
2613 # reports registered via scmutil.registersummarycallback() whose names
2618 # reports registered via scmutil.registersummarycallback() whose names
2614 # are 00-txnreport etc. That way, the caches will be warm when the
2619 # are 00-txnreport etc. That way, the caches will be warm when the
2615 # callbacks run.
2620 # callbacks run.
2616 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2621 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2617
2622
2618 def txnaborthook(tr2):
2623 def txnaborthook(tr2):
2619 """To be run if transaction is aborted"""
2624 """To be run if transaction is aborted"""
2620 repo = reporef()
2625 repo = reporef()
2621 assert repo is not None # help pytype
2626 assert repo is not None # help pytype
2622 repo.hook(
2627 repo.hook(
2623 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2628 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2624 )
2629 )
2625
2630
2626 tr.addabort(b'txnabort-hook', txnaborthook)
2631 tr.addabort(b'txnabort-hook', txnaborthook)
2627 # avoid eager cache invalidation. in-memory data should be identical
2632 # avoid eager cache invalidation. in-memory data should be identical
2628 # to stored data if transaction has no error.
2633 # to stored data if transaction has no error.
2629 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2634 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2630 self._transref = weakref.ref(tr)
2635 self._transref = weakref.ref(tr)
2631 scmutil.registersummarycallback(self, tr, desc)
2636 scmutil.registersummarycallback(self, tr, desc)
2632 return tr
2637 return tr
2633
2638
2634 def _journalfiles(self):
2639 def _journalfiles(self):
2635 first = (
2640 first = (
2636 (self.svfs, b'journal'),
2641 (self.svfs, b'journal'),
2637 (self.svfs, b'journal.narrowspec'),
2642 (self.svfs, b'journal.narrowspec'),
2638 (self.vfs, b'journal.narrowspec.dirstate'),
2643 (self.vfs, b'journal.narrowspec.dirstate'),
2639 (self.vfs, b'journal.dirstate'),
2644 (self.vfs, b'journal.dirstate'),
2640 )
2645 )
2641 middle = []
2646 middle = []
2642 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2647 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2643 if dirstate_data is not None:
2648 if dirstate_data is not None:
2644 middle.append((self.vfs, dirstate_data))
2649 middle.append((self.vfs, dirstate_data))
2645 end = (
2650 end = (
2646 (self.vfs, b'journal.branch'),
2651 (self.vfs, b'journal.branch'),
2647 (self.vfs, b'journal.desc'),
2652 (self.vfs, b'journal.desc'),
2648 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2653 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2649 (self.svfs, b'journal.phaseroots'),
2654 (self.svfs, b'journal.phaseroots'),
2650 )
2655 )
2651 return first + tuple(middle) + end
2656 return first + tuple(middle) + end
2652
2657
2653 def undofiles(self):
2658 def undofiles(self):
2654 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2659 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2655
2660
2656 @unfilteredmethod
2661 @unfilteredmethod
2657 def _writejournal(self, desc):
2662 def _writejournal(self, desc):
2658 self.dirstate.savebackup(None, b'journal.dirstate')
2663 self.dirstate.savebackup(None, b'journal.dirstate')
2659 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2664 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2660 narrowspec.savebackup(self, b'journal.narrowspec')
2665 narrowspec.savebackup(self, b'journal.narrowspec')
2661 self.vfs.write(
2666 self.vfs.write(
2662 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2667 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2663 )
2668 )
2664 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2669 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2665 bookmarksvfs = bookmarks.bookmarksvfs(self)
2670 bookmarksvfs = bookmarks.bookmarksvfs(self)
2666 bookmarksvfs.write(
2671 bookmarksvfs.write(
2667 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2672 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2668 )
2673 )
2669 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2674 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2670
2675
2671 def recover(self):
2676 def recover(self):
2672 with self.lock():
2677 with self.lock():
2673 if self.svfs.exists(b"journal"):
2678 if self.svfs.exists(b"journal"):
2674 self.ui.status(_(b"rolling back interrupted transaction\n"))
2679 self.ui.status(_(b"rolling back interrupted transaction\n"))
2675 vfsmap = {
2680 vfsmap = {
2676 b'': self.svfs,
2681 b'': self.svfs,
2677 b'plain': self.vfs,
2682 b'plain': self.vfs,
2678 }
2683 }
2679 transaction.rollback(
2684 transaction.rollback(
2680 self.svfs,
2685 self.svfs,
2681 vfsmap,
2686 vfsmap,
2682 b"journal",
2687 b"journal",
2683 self.ui.warn,
2688 self.ui.warn,
2684 checkambigfiles=_cachedfiles,
2689 checkambigfiles=_cachedfiles,
2685 )
2690 )
2686 self.invalidate()
2691 self.invalidate()
2687 return True
2692 return True
2688 else:
2693 else:
2689 self.ui.warn(_(b"no interrupted transaction available\n"))
2694 self.ui.warn(_(b"no interrupted transaction available\n"))
2690 return False
2695 return False
2691
2696
2692 def rollback(self, dryrun=False, force=False):
2697 def rollback(self, dryrun=False, force=False):
2693 wlock = lock = dsguard = None
2698 wlock = lock = dsguard = None
2694 try:
2699 try:
2695 wlock = self.wlock()
2700 wlock = self.wlock()
2696 lock = self.lock()
2701 lock = self.lock()
2697 if self.svfs.exists(b"undo"):
2702 if self.svfs.exists(b"undo"):
2698 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2703 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2699
2704
2700 return self._rollback(dryrun, force, dsguard)
2705 return self._rollback(dryrun, force, dsguard)
2701 else:
2706 else:
2702 self.ui.warn(_(b"no rollback information available\n"))
2707 self.ui.warn(_(b"no rollback information available\n"))
2703 return 1
2708 return 1
2704 finally:
2709 finally:
2705 release(dsguard, lock, wlock)
2710 release(dsguard, lock, wlock)
2706
2711
2707 @unfilteredmethod # Until we get smarter cache management
2712 @unfilteredmethod # Until we get smarter cache management
2708 def _rollback(self, dryrun, force, dsguard):
2713 def _rollback(self, dryrun, force, dsguard):
2709 ui = self.ui
2714 ui = self.ui
2710 try:
2715 try:
2711 args = self.vfs.read(b'undo.desc').splitlines()
2716 args = self.vfs.read(b'undo.desc').splitlines()
2712 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2717 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2713 if len(args) >= 3:
2718 if len(args) >= 3:
2714 detail = args[2]
2719 detail = args[2]
2715 oldtip = oldlen - 1
2720 oldtip = oldlen - 1
2716
2721
2717 if detail and ui.verbose:
2722 if detail and ui.verbose:
2718 msg = _(
2723 msg = _(
2719 b'repository tip rolled back to revision %d'
2724 b'repository tip rolled back to revision %d'
2720 b' (undo %s: %s)\n'
2725 b' (undo %s: %s)\n'
2721 ) % (oldtip, desc, detail)
2726 ) % (oldtip, desc, detail)
2722 else:
2727 else:
2723 msg = _(
2728 msg = _(
2724 b'repository tip rolled back to revision %d (undo %s)\n'
2729 b'repository tip rolled back to revision %d (undo %s)\n'
2725 ) % (oldtip, desc)
2730 ) % (oldtip, desc)
2726 except IOError:
2731 except IOError:
2727 msg = _(b'rolling back unknown transaction\n')
2732 msg = _(b'rolling back unknown transaction\n')
2728 desc = None
2733 desc = None
2729
2734
2730 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2735 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2731 raise error.Abort(
2736 raise error.Abort(
2732 _(
2737 _(
2733 b'rollback of last commit while not checked out '
2738 b'rollback of last commit while not checked out '
2734 b'may lose data'
2739 b'may lose data'
2735 ),
2740 ),
2736 hint=_(b'use -f to force'),
2741 hint=_(b'use -f to force'),
2737 )
2742 )
2738
2743
2739 ui.status(msg)
2744 ui.status(msg)
2740 if dryrun:
2745 if dryrun:
2741 return 0
2746 return 0
2742
2747
2743 parents = self.dirstate.parents()
2748 parents = self.dirstate.parents()
2744 self.destroying()
2749 self.destroying()
2745 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2750 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2746 transaction.rollback(
2751 transaction.rollback(
2747 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2752 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2748 )
2753 )
2749 bookmarksvfs = bookmarks.bookmarksvfs(self)
2754 bookmarksvfs = bookmarks.bookmarksvfs(self)
2750 if bookmarksvfs.exists(b'undo.bookmarks'):
2755 if bookmarksvfs.exists(b'undo.bookmarks'):
2751 bookmarksvfs.rename(
2756 bookmarksvfs.rename(
2752 b'undo.bookmarks', b'bookmarks', checkambig=True
2757 b'undo.bookmarks', b'bookmarks', checkambig=True
2753 )
2758 )
2754 if self.svfs.exists(b'undo.phaseroots'):
2759 if self.svfs.exists(b'undo.phaseroots'):
2755 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2760 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2756 self.invalidate()
2761 self.invalidate()
2757
2762
2758 has_node = self.changelog.index.has_node
2763 has_node = self.changelog.index.has_node
2759 parentgone = any(not has_node(p) for p in parents)
2764 parentgone = any(not has_node(p) for p in parents)
2760 if parentgone:
2765 if parentgone:
2761 # prevent dirstateguard from overwriting already restored one
2766 # prevent dirstateguard from overwriting already restored one
2762 dsguard.close()
2767 dsguard.close()
2763
2768
2764 narrowspec.restorebackup(self, b'undo.narrowspec')
2769 narrowspec.restorebackup(self, b'undo.narrowspec')
2765 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2770 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2766 self.dirstate.restorebackup(None, b'undo.dirstate')
2771 self.dirstate.restorebackup(None, b'undo.dirstate')
2767 try:
2772 try:
2768 branch = self.vfs.read(b'undo.branch')
2773 branch = self.vfs.read(b'undo.branch')
2769 self.dirstate.setbranch(encoding.tolocal(branch))
2774 self.dirstate.setbranch(encoding.tolocal(branch))
2770 except IOError:
2775 except IOError:
2771 ui.warn(
2776 ui.warn(
2772 _(
2777 _(
2773 b'named branch could not be reset: '
2778 b'named branch could not be reset: '
2774 b'current branch is still \'%s\'\n'
2779 b'current branch is still \'%s\'\n'
2775 )
2780 )
2776 % self.dirstate.branch()
2781 % self.dirstate.branch()
2777 )
2782 )
2778
2783
2779 parents = tuple([p.rev() for p in self[None].parents()])
2784 parents = tuple([p.rev() for p in self[None].parents()])
2780 if len(parents) > 1:
2785 if len(parents) > 1:
2781 ui.status(
2786 ui.status(
2782 _(
2787 _(
2783 b'working directory now based on '
2788 b'working directory now based on '
2784 b'revisions %d and %d\n'
2789 b'revisions %d and %d\n'
2785 )
2790 )
2786 % parents
2791 % parents
2787 )
2792 )
2788 else:
2793 else:
2789 ui.status(
2794 ui.status(
2790 _(b'working directory now based on revision %d\n') % parents
2795 _(b'working directory now based on revision %d\n') % parents
2791 )
2796 )
2792 mergestatemod.mergestate.clean(self)
2797 mergestatemod.mergestate.clean(self)
2793
2798
2794 # TODO: if we know which new heads may result from this rollback, pass
2799 # TODO: if we know which new heads may result from this rollback, pass
2795 # them to destroy(), which will prevent the branchhead cache from being
2800 # them to destroy(), which will prevent the branchhead cache from being
2796 # invalidated.
2801 # invalidated.
2797 self.destroyed()
2802 self.destroyed()
2798 return 0
2803 return 0
2799
2804
2800 def _buildcacheupdater(self, newtransaction):
2805 def _buildcacheupdater(self, newtransaction):
2801 """called during transaction to build the callback updating cache
2806 """called during transaction to build the callback updating cache
2802
2807
2803 Lives on the repository to help extension who might want to augment
2808 Lives on the repository to help extension who might want to augment
2804 this logic. For this purpose, the created transaction is passed to the
2809 this logic. For this purpose, the created transaction is passed to the
2805 method.
2810 method.
2806 """
2811 """
2807 # we must avoid cyclic reference between repo and transaction.
2812 # we must avoid cyclic reference between repo and transaction.
2808 reporef = weakref.ref(self)
2813 reporef = weakref.ref(self)
2809
2814
2810 def updater(tr):
2815 def updater(tr):
2811 repo = reporef()
2816 repo = reporef()
2812 assert repo is not None # help pytype
2817 assert repo is not None # help pytype
2813 repo.updatecaches(tr)
2818 repo.updatecaches(tr)
2814
2819
2815 return updater
2820 return updater
2816
2821
2817 @unfilteredmethod
2822 @unfilteredmethod
2818 def updatecaches(self, tr=None, full=False, caches=None):
2823 def updatecaches(self, tr=None, full=False, caches=None):
2819 """warm appropriate caches
2824 """warm appropriate caches
2820
2825
2821 If this function is called after a transaction closed. The transaction
2826 If this function is called after a transaction closed. The transaction
2822 will be available in the 'tr' argument. This can be used to selectively
2827 will be available in the 'tr' argument. This can be used to selectively
2823 update caches relevant to the changes in that transaction.
2828 update caches relevant to the changes in that transaction.
2824
2829
2825 If 'full' is set, make sure all caches the function knows about have
2830 If 'full' is set, make sure all caches the function knows about have
2826 up-to-date data. Even the ones usually loaded more lazily.
2831 up-to-date data. Even the ones usually loaded more lazily.
2827
2832
2828 The `full` argument can take a special "post-clone" value. In this case
2833 The `full` argument can take a special "post-clone" value. In this case
2829 the cache warming is made after a clone and of the slower cache might
2834 the cache warming is made after a clone and of the slower cache might
2830 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2835 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2831 as we plan for a cleaner way to deal with this for 5.9.
2836 as we plan for a cleaner way to deal with this for 5.9.
2832 """
2837 """
2833 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2838 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2834 # During strip, many caches are invalid but
2839 # During strip, many caches are invalid but
2835 # later call to `destroyed` will refresh them.
2840 # later call to `destroyed` will refresh them.
2836 return
2841 return
2837
2842
2838 unfi = self.unfiltered()
2843 unfi = self.unfiltered()
2839
2844
2840 if full:
2845 if full:
2841 msg = (
2846 msg = (
2842 "`full` argument for `repo.updatecaches` is deprecated\n"
2847 "`full` argument for `repo.updatecaches` is deprecated\n"
2843 "(use `caches=repository.CACHE_ALL` instead)"
2848 "(use `caches=repository.CACHE_ALL` instead)"
2844 )
2849 )
2845 self.ui.deprecwarn(msg, b"5.9")
2850 self.ui.deprecwarn(msg, b"5.9")
2846 caches = repository.CACHES_ALL
2851 caches = repository.CACHES_ALL
2847 if full == b"post-clone":
2852 if full == b"post-clone":
2848 caches = repository.CACHES_POST_CLONE
2853 caches = repository.CACHES_POST_CLONE
2849 caches = repository.CACHES_ALL
2854 caches = repository.CACHES_ALL
2850 elif caches is None:
2855 elif caches is None:
2851 caches = repository.CACHES_DEFAULT
2856 caches = repository.CACHES_DEFAULT
2852
2857
2853 if repository.CACHE_BRANCHMAP_SERVED in caches:
2858 if repository.CACHE_BRANCHMAP_SERVED in caches:
2854 if tr is None or tr.changes[b'origrepolen'] < len(self):
2859 if tr is None or tr.changes[b'origrepolen'] < len(self):
2855 # accessing the 'served' branchmap should refresh all the others,
2860 # accessing the 'served' branchmap should refresh all the others,
2856 self.ui.debug(b'updating the branch cache\n')
2861 self.ui.debug(b'updating the branch cache\n')
2857 self.filtered(b'served').branchmap()
2862 self.filtered(b'served').branchmap()
2858 self.filtered(b'served.hidden').branchmap()
2863 self.filtered(b'served.hidden').branchmap()
2859 # flush all possibly delayed write.
2864 # flush all possibly delayed write.
2860 self._branchcaches.write_delayed(self)
2865 self._branchcaches.write_delayed(self)
2861
2866
2862 if repository.CACHE_CHANGELOG_CACHE in caches:
2867 if repository.CACHE_CHANGELOG_CACHE in caches:
2863 self.changelog.update_caches(transaction=tr)
2868 self.changelog.update_caches(transaction=tr)
2864
2869
2865 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2870 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2866 self.manifestlog.update_caches(transaction=tr)
2871 self.manifestlog.update_caches(transaction=tr)
2867
2872
2868 if repository.CACHE_REV_BRANCH in caches:
2873 if repository.CACHE_REV_BRANCH in caches:
2869 rbc = unfi.revbranchcache()
2874 rbc = unfi.revbranchcache()
2870 for r in unfi.changelog:
2875 for r in unfi.changelog:
2871 rbc.branchinfo(r)
2876 rbc.branchinfo(r)
2872 rbc.write()
2877 rbc.write()
2873
2878
2874 if repository.CACHE_FULL_MANIFEST in caches:
2879 if repository.CACHE_FULL_MANIFEST in caches:
2875 # ensure the working copy parents are in the manifestfulltextcache
2880 # ensure the working copy parents are in the manifestfulltextcache
2876 for ctx in self[b'.'].parents():
2881 for ctx in self[b'.'].parents():
2877 ctx.manifest() # accessing the manifest is enough
2882 ctx.manifest() # accessing the manifest is enough
2878
2883
2879 if repository.CACHE_FILE_NODE_TAGS in caches:
2884 if repository.CACHE_FILE_NODE_TAGS in caches:
2880 # accessing fnode cache warms the cache
2885 # accessing fnode cache warms the cache
2881 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2886 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2882
2887
2883 if repository.CACHE_TAGS_DEFAULT in caches:
2888 if repository.CACHE_TAGS_DEFAULT in caches:
2884 # accessing tags warm the cache
2889 # accessing tags warm the cache
2885 self.tags()
2890 self.tags()
2886 if repository.CACHE_TAGS_SERVED in caches:
2891 if repository.CACHE_TAGS_SERVED in caches:
2887 self.filtered(b'served').tags()
2892 self.filtered(b'served').tags()
2888
2893
2889 if repository.CACHE_BRANCHMAP_ALL in caches:
2894 if repository.CACHE_BRANCHMAP_ALL in caches:
2890 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2895 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2891 # so we're forcing a write to cause these caches to be warmed up
2896 # so we're forcing a write to cause these caches to be warmed up
2892 # even if they haven't explicitly been requested yet (if they've
2897 # even if they haven't explicitly been requested yet (if they've
2893 # never been used by hg, they won't ever have been written, even if
2898 # never been used by hg, they won't ever have been written, even if
2894 # they're a subset of another kind of cache that *has* been used).
2899 # they're a subset of another kind of cache that *has* been used).
2895 for filt in repoview.filtertable.keys():
2900 for filt in repoview.filtertable.keys():
2896 filtered = self.filtered(filt)
2901 filtered = self.filtered(filt)
2897 filtered.branchmap().write(filtered)
2902 filtered.branchmap().write(filtered)
2898
2903
2899 def invalidatecaches(self):
2904 def invalidatecaches(self):
2900
2905
2901 if '_tagscache' in vars(self):
2906 if '_tagscache' in vars(self):
2902 # can't use delattr on proxy
2907 # can't use delattr on proxy
2903 del self.__dict__['_tagscache']
2908 del self.__dict__['_tagscache']
2904
2909
2905 self._branchcaches.clear()
2910 self._branchcaches.clear()
2906 self.invalidatevolatilesets()
2911 self.invalidatevolatilesets()
2907 self._sparsesignaturecache.clear()
2912 self._sparsesignaturecache.clear()
2908
2913
2909 def invalidatevolatilesets(self):
2914 def invalidatevolatilesets(self):
2910 self.filteredrevcache.clear()
2915 self.filteredrevcache.clear()
2911 obsolete.clearobscaches(self)
2916 obsolete.clearobscaches(self)
2912 self._quick_access_changeid_invalidate()
2917 self._quick_access_changeid_invalidate()
2913
2918
2914 def invalidatedirstate(self):
2919 def invalidatedirstate(self):
2915 """Invalidates the dirstate, causing the next call to dirstate
2920 """Invalidates the dirstate, causing the next call to dirstate
2916 to check if it was modified since the last time it was read,
2921 to check if it was modified since the last time it was read,
2917 rereading it if it has.
2922 rereading it if it has.
2918
2923
2919 This is different to dirstate.invalidate() that it doesn't always
2924 This is different to dirstate.invalidate() that it doesn't always
2920 rereads the dirstate. Use dirstate.invalidate() if you want to
2925 rereads the dirstate. Use dirstate.invalidate() if you want to
2921 explicitly read the dirstate again (i.e. restoring it to a previous
2926 explicitly read the dirstate again (i.e. restoring it to a previous
2922 known good state)."""
2927 known good state)."""
2923 if hasunfilteredcache(self, 'dirstate'):
2928 if hasunfilteredcache(self, 'dirstate'):
2924 for k in self.dirstate._filecache:
2929 for k in self.dirstate._filecache:
2925 try:
2930 try:
2926 delattr(self.dirstate, k)
2931 delattr(self.dirstate, k)
2927 except AttributeError:
2932 except AttributeError:
2928 pass
2933 pass
2929 delattr(self.unfiltered(), 'dirstate')
2934 delattr(self.unfiltered(), 'dirstate')
2930
2935
2931 def invalidate(self, clearfilecache=False):
2936 def invalidate(self, clearfilecache=False):
2932 """Invalidates both store and non-store parts other than dirstate
2937 """Invalidates both store and non-store parts other than dirstate
2933
2938
2934 If a transaction is running, invalidation of store is omitted,
2939 If a transaction is running, invalidation of store is omitted,
2935 because discarding in-memory changes might cause inconsistency
2940 because discarding in-memory changes might cause inconsistency
2936 (e.g. incomplete fncache causes unintentional failure, but
2941 (e.g. incomplete fncache causes unintentional failure, but
2937 redundant one doesn't).
2942 redundant one doesn't).
2938 """
2943 """
2939 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2944 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2940 for k in list(self._filecache.keys()):
2945 for k in list(self._filecache.keys()):
2941 # dirstate is invalidated separately in invalidatedirstate()
2946 # dirstate is invalidated separately in invalidatedirstate()
2942 if k == b'dirstate':
2947 if k == b'dirstate':
2943 continue
2948 continue
2944 if (
2949 if (
2945 k == b'changelog'
2950 k == b'changelog'
2946 and self.currenttransaction()
2951 and self.currenttransaction()
2947 and self.changelog._delayed
2952 and self.changelog._delayed
2948 ):
2953 ):
2949 # The changelog object may store unwritten revisions. We don't
2954 # The changelog object may store unwritten revisions. We don't
2950 # want to lose them.
2955 # want to lose them.
2951 # TODO: Solve the problem instead of working around it.
2956 # TODO: Solve the problem instead of working around it.
2952 continue
2957 continue
2953
2958
2954 if clearfilecache:
2959 if clearfilecache:
2955 del self._filecache[k]
2960 del self._filecache[k]
2956 try:
2961 try:
2957 delattr(unfiltered, k)
2962 delattr(unfiltered, k)
2958 except AttributeError:
2963 except AttributeError:
2959 pass
2964 pass
2960 self.invalidatecaches()
2965 self.invalidatecaches()
2961 if not self.currenttransaction():
2966 if not self.currenttransaction():
2962 # TODO: Changing contents of store outside transaction
2967 # TODO: Changing contents of store outside transaction
2963 # causes inconsistency. We should make in-memory store
2968 # causes inconsistency. We should make in-memory store
2964 # changes detectable, and abort if changed.
2969 # changes detectable, and abort if changed.
2965 self.store.invalidatecaches()
2970 self.store.invalidatecaches()
2966
2971
2967 def invalidateall(self):
2972 def invalidateall(self):
2968 """Fully invalidates both store and non-store parts, causing the
2973 """Fully invalidates both store and non-store parts, causing the
2969 subsequent operation to reread any outside changes."""
2974 subsequent operation to reread any outside changes."""
2970 # extension should hook this to invalidate its caches
2975 # extension should hook this to invalidate its caches
2971 self.invalidate()
2976 self.invalidate()
2972 self.invalidatedirstate()
2977 self.invalidatedirstate()
2973
2978
2974 @unfilteredmethod
2979 @unfilteredmethod
2975 def _refreshfilecachestats(self, tr):
2980 def _refreshfilecachestats(self, tr):
2976 """Reload stats of cached files so that they are flagged as valid"""
2981 """Reload stats of cached files so that they are flagged as valid"""
2977 for k, ce in self._filecache.items():
2982 for k, ce in self._filecache.items():
2978 k = pycompat.sysstr(k)
2983 k = pycompat.sysstr(k)
2979 if k == 'dirstate' or k not in self.__dict__:
2984 if k == 'dirstate' or k not in self.__dict__:
2980 continue
2985 continue
2981 ce.refresh()
2986 ce.refresh()
2982
2987
2983 def _lock(
2988 def _lock(
2984 self,
2989 self,
2985 vfs,
2990 vfs,
2986 lockname,
2991 lockname,
2987 wait,
2992 wait,
2988 releasefn,
2993 releasefn,
2989 acquirefn,
2994 acquirefn,
2990 desc,
2995 desc,
2991 ):
2996 ):
2992 timeout = 0
2997 timeout = 0
2993 warntimeout = 0
2998 warntimeout = 0
2994 if wait:
2999 if wait:
2995 timeout = self.ui.configint(b"ui", b"timeout")
3000 timeout = self.ui.configint(b"ui", b"timeout")
2996 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3001 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2997 # internal config: ui.signal-safe-lock
3002 # internal config: ui.signal-safe-lock
2998 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3003 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2999
3004
3000 l = lockmod.trylock(
3005 l = lockmod.trylock(
3001 self.ui,
3006 self.ui,
3002 vfs,
3007 vfs,
3003 lockname,
3008 lockname,
3004 timeout,
3009 timeout,
3005 warntimeout,
3010 warntimeout,
3006 releasefn=releasefn,
3011 releasefn=releasefn,
3007 acquirefn=acquirefn,
3012 acquirefn=acquirefn,
3008 desc=desc,
3013 desc=desc,
3009 signalsafe=signalsafe,
3014 signalsafe=signalsafe,
3010 )
3015 )
3011 return l
3016 return l
3012
3017
3013 def _afterlock(self, callback):
3018 def _afterlock(self, callback):
3014 """add a callback to be run when the repository is fully unlocked
3019 """add a callback to be run when the repository is fully unlocked
3015
3020
3016 The callback will be executed when the outermost lock is released
3021 The callback will be executed when the outermost lock is released
3017 (with wlock being higher level than 'lock')."""
3022 (with wlock being higher level than 'lock')."""
3018 for ref in (self._wlockref, self._lockref):
3023 for ref in (self._wlockref, self._lockref):
3019 l = ref and ref()
3024 l = ref and ref()
3020 if l and l.held:
3025 if l and l.held:
3021 l.postrelease.append(callback)
3026 l.postrelease.append(callback)
3022 break
3027 break
3023 else: # no lock have been found.
3028 else: # no lock have been found.
3024 callback(True)
3029 callback(True)
3025
3030
3026 def lock(self, wait=True):
3031 def lock(self, wait=True):
3027 """Lock the repository store (.hg/store) and return a weak reference
3032 """Lock the repository store (.hg/store) and return a weak reference
3028 to the lock. Use this before modifying the store (e.g. committing or
3033 to the lock. Use this before modifying the store (e.g. committing or
3029 stripping). If you are opening a transaction, get a lock as well.)
3034 stripping). If you are opening a transaction, get a lock as well.)
3030
3035
3031 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3036 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3032 'wlock' first to avoid a dead-lock hazard."""
3037 'wlock' first to avoid a dead-lock hazard."""
3033 l = self._currentlock(self._lockref)
3038 l = self._currentlock(self._lockref)
3034 if l is not None:
3039 if l is not None:
3035 l.lock()
3040 l.lock()
3036 return l
3041 return l
3037
3042
3038 l = self._lock(
3043 l = self._lock(
3039 vfs=self.svfs,
3044 vfs=self.svfs,
3040 lockname=b"lock",
3045 lockname=b"lock",
3041 wait=wait,
3046 wait=wait,
3042 releasefn=None,
3047 releasefn=None,
3043 acquirefn=self.invalidate,
3048 acquirefn=self.invalidate,
3044 desc=_(b'repository %s') % self.origroot,
3049 desc=_(b'repository %s') % self.origroot,
3045 )
3050 )
3046 self._lockref = weakref.ref(l)
3051 self._lockref = weakref.ref(l)
3047 return l
3052 return l
3048
3053
3049 def wlock(self, wait=True):
3054 def wlock(self, wait=True):
3050 """Lock the non-store parts of the repository (everything under
3055 """Lock the non-store parts of the repository (everything under
3051 .hg except .hg/store) and return a weak reference to the lock.
3056 .hg except .hg/store) and return a weak reference to the lock.
3052
3057
3053 Use this before modifying files in .hg.
3058 Use this before modifying files in .hg.
3054
3059
3055 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3060 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3056 'wlock' first to avoid a dead-lock hazard."""
3061 'wlock' first to avoid a dead-lock hazard."""
3057 l = self._wlockref() if self._wlockref else None
3062 l = self._wlockref() if self._wlockref else None
3058 if l is not None and l.held:
3063 if l is not None and l.held:
3059 l.lock()
3064 l.lock()
3060 return l
3065 return l
3061
3066
3062 # We do not need to check for non-waiting lock acquisition. Such
3067 # We do not need to check for non-waiting lock acquisition. Such
3063 # acquisition would not cause dead-lock as they would just fail.
3068 # acquisition would not cause dead-lock as they would just fail.
3064 if wait and (
3069 if wait and (
3065 self.ui.configbool(b'devel', b'all-warnings')
3070 self.ui.configbool(b'devel', b'all-warnings')
3066 or self.ui.configbool(b'devel', b'check-locks')
3071 or self.ui.configbool(b'devel', b'check-locks')
3067 ):
3072 ):
3068 if self._currentlock(self._lockref) is not None:
3073 if self._currentlock(self._lockref) is not None:
3069 self.ui.develwarn(b'"wlock" acquired after "lock"')
3074 self.ui.develwarn(b'"wlock" acquired after "lock"')
3070
3075
3071 def unlock():
3076 def unlock():
3072 if self.dirstate.pendingparentchange():
3077 if self.dirstate.pendingparentchange():
3073 self.dirstate.invalidate()
3078 self.dirstate.invalidate()
3074 else:
3079 else:
3075 self.dirstate.write(None)
3080 self.dirstate.write(None)
3076
3081
3077 self._filecache[b'dirstate'].refresh()
3082 self._filecache[b'dirstate'].refresh()
3078
3083
3079 l = self._lock(
3084 l = self._lock(
3080 self.vfs,
3085 self.vfs,
3081 b"wlock",
3086 b"wlock",
3082 wait,
3087 wait,
3083 unlock,
3088 unlock,
3084 self.invalidatedirstate,
3089 self.invalidatedirstate,
3085 _(b'working directory of %s') % self.origroot,
3090 _(b'working directory of %s') % self.origroot,
3086 )
3091 )
3087 self._wlockref = weakref.ref(l)
3092 self._wlockref = weakref.ref(l)
3088 return l
3093 return l
3089
3094
3090 def _currentlock(self, lockref):
3095 def _currentlock(self, lockref):
3091 """Returns the lock if it's held, or None if it's not."""
3096 """Returns the lock if it's held, or None if it's not."""
3092 if lockref is None:
3097 if lockref is None:
3093 return None
3098 return None
3094 l = lockref()
3099 l = lockref()
3095 if l is None or not l.held:
3100 if l is None or not l.held:
3096 return None
3101 return None
3097 return l
3102 return l
3098
3103
3099 def currentwlock(self):
3104 def currentwlock(self):
3100 """Returns the wlock if it's held, or None if it's not."""
3105 """Returns the wlock if it's held, or None if it's not."""
3101 return self._currentlock(self._wlockref)
3106 return self._currentlock(self._wlockref)
3102
3107
3103 def checkcommitpatterns(self, wctx, match, status, fail):
3108 def checkcommitpatterns(self, wctx, match, status, fail):
3104 """check for commit arguments that aren't committable"""
3109 """check for commit arguments that aren't committable"""
3105 if match.isexact() or match.prefix():
3110 if match.isexact() or match.prefix():
3106 matched = set(status.modified + status.added + status.removed)
3111 matched = set(status.modified + status.added + status.removed)
3107
3112
3108 for f in match.files():
3113 for f in match.files():
3109 f = self.dirstate.normalize(f)
3114 f = self.dirstate.normalize(f)
3110 if f == b'.' or f in matched or f in wctx.substate:
3115 if f == b'.' or f in matched or f in wctx.substate:
3111 continue
3116 continue
3112 if f in status.deleted:
3117 if f in status.deleted:
3113 fail(f, _(b'file not found!'))
3118 fail(f, _(b'file not found!'))
3114 # Is it a directory that exists or used to exist?
3119 # Is it a directory that exists or used to exist?
3115 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3120 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3116 d = f + b'/'
3121 d = f + b'/'
3117 for mf in matched:
3122 for mf in matched:
3118 if mf.startswith(d):
3123 if mf.startswith(d):
3119 break
3124 break
3120 else:
3125 else:
3121 fail(f, _(b"no match under directory!"))
3126 fail(f, _(b"no match under directory!"))
3122 elif f not in self.dirstate:
3127 elif f not in self.dirstate:
3123 fail(f, _(b"file not tracked!"))
3128 fail(f, _(b"file not tracked!"))
3124
3129
3125 @unfilteredmethod
3130 @unfilteredmethod
3126 def commit(
3131 def commit(
3127 self,
3132 self,
3128 text=b"",
3133 text=b"",
3129 user=None,
3134 user=None,
3130 date=None,
3135 date=None,
3131 match=None,
3136 match=None,
3132 force=False,
3137 force=False,
3133 editor=None,
3138 editor=None,
3134 extra=None,
3139 extra=None,
3135 ):
3140 ):
3136 """Add a new revision to current repository.
3141 """Add a new revision to current repository.
3137
3142
3138 Revision information is gathered from the working directory,
3143 Revision information is gathered from the working directory,
3139 match can be used to filter the committed files. If editor is
3144 match can be used to filter the committed files. If editor is
3140 supplied, it is called to get a commit message.
3145 supplied, it is called to get a commit message.
3141 """
3146 """
3142 if extra is None:
3147 if extra is None:
3143 extra = {}
3148 extra = {}
3144
3149
3145 def fail(f, msg):
3150 def fail(f, msg):
3146 raise error.InputError(b'%s: %s' % (f, msg))
3151 raise error.InputError(b'%s: %s' % (f, msg))
3147
3152
3148 if not match:
3153 if not match:
3149 match = matchmod.always()
3154 match = matchmod.always()
3150
3155
3151 if not force:
3156 if not force:
3152 match.bad = fail
3157 match.bad = fail
3153
3158
3154 # lock() for recent changelog (see issue4368)
3159 # lock() for recent changelog (see issue4368)
3155 with self.wlock(), self.lock():
3160 with self.wlock(), self.lock():
3156 wctx = self[None]
3161 wctx = self[None]
3157 merge = len(wctx.parents()) > 1
3162 merge = len(wctx.parents()) > 1
3158
3163
3159 if not force and merge and not match.always():
3164 if not force and merge and not match.always():
3160 raise error.Abort(
3165 raise error.Abort(
3161 _(
3166 _(
3162 b'cannot partially commit a merge '
3167 b'cannot partially commit a merge '
3163 b'(do not specify files or patterns)'
3168 b'(do not specify files or patterns)'
3164 )
3169 )
3165 )
3170 )
3166
3171
3167 status = self.status(match=match, clean=force)
3172 status = self.status(match=match, clean=force)
3168 if force:
3173 if force:
3169 status.modified.extend(
3174 status.modified.extend(
3170 status.clean
3175 status.clean
3171 ) # mq may commit clean files
3176 ) # mq may commit clean files
3172
3177
3173 # check subrepos
3178 # check subrepos
3174 subs, commitsubs, newstate = subrepoutil.precommit(
3179 subs, commitsubs, newstate = subrepoutil.precommit(
3175 self.ui, wctx, status, match, force=force
3180 self.ui, wctx, status, match, force=force
3176 )
3181 )
3177
3182
3178 # make sure all explicit patterns are matched
3183 # make sure all explicit patterns are matched
3179 if not force:
3184 if not force:
3180 self.checkcommitpatterns(wctx, match, status, fail)
3185 self.checkcommitpatterns(wctx, match, status, fail)
3181
3186
3182 cctx = context.workingcommitctx(
3187 cctx = context.workingcommitctx(
3183 self, status, text, user, date, extra
3188 self, status, text, user, date, extra
3184 )
3189 )
3185
3190
3186 ms = mergestatemod.mergestate.read(self)
3191 ms = mergestatemod.mergestate.read(self)
3187 mergeutil.checkunresolved(ms)
3192 mergeutil.checkunresolved(ms)
3188
3193
3189 # internal config: ui.allowemptycommit
3194 # internal config: ui.allowemptycommit
3190 if cctx.isempty() and not self.ui.configbool(
3195 if cctx.isempty() and not self.ui.configbool(
3191 b'ui', b'allowemptycommit'
3196 b'ui', b'allowemptycommit'
3192 ):
3197 ):
3193 self.ui.debug(b'nothing to commit, clearing merge state\n')
3198 self.ui.debug(b'nothing to commit, clearing merge state\n')
3194 ms.reset()
3199 ms.reset()
3195 return None
3200 return None
3196
3201
3197 if merge and cctx.deleted():
3202 if merge and cctx.deleted():
3198 raise error.Abort(_(b"cannot commit merge with missing files"))
3203 raise error.Abort(_(b"cannot commit merge with missing files"))
3199
3204
3200 if editor:
3205 if editor:
3201 cctx._text = editor(self, cctx, subs)
3206 cctx._text = editor(self, cctx, subs)
3202 edited = text != cctx._text
3207 edited = text != cctx._text
3203
3208
3204 # Save commit message in case this transaction gets rolled back
3209 # Save commit message in case this transaction gets rolled back
3205 # (e.g. by a pretxncommit hook). Leave the content alone on
3210 # (e.g. by a pretxncommit hook). Leave the content alone on
3206 # the assumption that the user will use the same editor again.
3211 # the assumption that the user will use the same editor again.
3207 msg_path = self.savecommitmessage(cctx._text)
3212 msg_path = self.savecommitmessage(cctx._text)
3208
3213
3209 # commit subs and write new state
3214 # commit subs and write new state
3210 if subs:
3215 if subs:
3211 uipathfn = scmutil.getuipathfn(self)
3216 uipathfn = scmutil.getuipathfn(self)
3212 for s in sorted(commitsubs):
3217 for s in sorted(commitsubs):
3213 sub = wctx.sub(s)
3218 sub = wctx.sub(s)
3214 self.ui.status(
3219 self.ui.status(
3215 _(b'committing subrepository %s\n')
3220 _(b'committing subrepository %s\n')
3216 % uipathfn(subrepoutil.subrelpath(sub))
3221 % uipathfn(subrepoutil.subrelpath(sub))
3217 )
3222 )
3218 sr = sub.commit(cctx._text, user, date)
3223 sr = sub.commit(cctx._text, user, date)
3219 newstate[s] = (newstate[s][0], sr)
3224 newstate[s] = (newstate[s][0], sr)
3220 subrepoutil.writestate(self, newstate)
3225 subrepoutil.writestate(self, newstate)
3221
3226
3222 p1, p2 = self.dirstate.parents()
3227 p1, p2 = self.dirstate.parents()
3223 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3228 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3224 try:
3229 try:
3225 self.hook(
3230 self.hook(
3226 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3231 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3227 )
3232 )
3228 with self.transaction(b'commit'):
3233 with self.transaction(b'commit'):
3229 ret = self.commitctx(cctx, True)
3234 ret = self.commitctx(cctx, True)
3230 # update bookmarks, dirstate and mergestate
3235 # update bookmarks, dirstate and mergestate
3231 bookmarks.update(self, [p1, p2], ret)
3236 bookmarks.update(self, [p1, p2], ret)
3232 cctx.markcommitted(ret)
3237 cctx.markcommitted(ret)
3233 ms.reset()
3238 ms.reset()
3234 except: # re-raises
3239 except: # re-raises
3235 if edited:
3240 if edited:
3236 self.ui.write(
3241 self.ui.write(
3237 _(b'note: commit message saved in %s\n') % msg_path
3242 _(b'note: commit message saved in %s\n') % msg_path
3238 )
3243 )
3239 self.ui.write(
3244 self.ui.write(
3240 _(
3245 _(
3241 b"note: use 'hg commit --logfile "
3246 b"note: use 'hg commit --logfile "
3242 b"%s --edit' to reuse it\n"
3247 b"%s --edit' to reuse it\n"
3243 )
3248 )
3244 % msg_path
3249 % msg_path
3245 )
3250 )
3246 raise
3251 raise
3247
3252
3248 def commithook(unused_success):
3253 def commithook(unused_success):
3249 # hack for command that use a temporary commit (eg: histedit)
3254 # hack for command that use a temporary commit (eg: histedit)
3250 # temporary commit got stripped before hook release
3255 # temporary commit got stripped before hook release
3251 if self.changelog.hasnode(ret):
3256 if self.changelog.hasnode(ret):
3252 self.hook(
3257 self.hook(
3253 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3258 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3254 )
3259 )
3255
3260
3256 self._afterlock(commithook)
3261 self._afterlock(commithook)
3257 return ret
3262 return ret
3258
3263
3259 @unfilteredmethod
3264 @unfilteredmethod
3260 def commitctx(self, ctx, error=False, origctx=None):
3265 def commitctx(self, ctx, error=False, origctx=None):
3261 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3266 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3262
3267
3263 @unfilteredmethod
3268 @unfilteredmethod
3264 def destroying(self):
3269 def destroying(self):
3265 """Inform the repository that nodes are about to be destroyed.
3270 """Inform the repository that nodes are about to be destroyed.
3266 Intended for use by strip and rollback, so there's a common
3271 Intended for use by strip and rollback, so there's a common
3267 place for anything that has to be done before destroying history.
3272 place for anything that has to be done before destroying history.
3268
3273
3269 This is mostly useful for saving state that is in memory and waiting
3274 This is mostly useful for saving state that is in memory and waiting
3270 to be flushed when the current lock is released. Because a call to
3275 to be flushed when the current lock is released. Because a call to
3271 destroyed is imminent, the repo will be invalidated causing those
3276 destroyed is imminent, the repo will be invalidated causing those
3272 changes to stay in memory (waiting for the next unlock), or vanish
3277 changes to stay in memory (waiting for the next unlock), or vanish
3273 completely.
3278 completely.
3274 """
3279 """
3275 # When using the same lock to commit and strip, the phasecache is left
3280 # When using the same lock to commit and strip, the phasecache is left
3276 # dirty after committing. Then when we strip, the repo is invalidated,
3281 # dirty after committing. Then when we strip, the repo is invalidated,
3277 # causing those changes to disappear.
3282 # causing those changes to disappear.
3278 if '_phasecache' in vars(self):
3283 if '_phasecache' in vars(self):
3279 self._phasecache.write()
3284 self._phasecache.write()
3280
3285
3281 @unfilteredmethod
3286 @unfilteredmethod
3282 def destroyed(self):
3287 def destroyed(self):
3283 """Inform the repository that nodes have been destroyed.
3288 """Inform the repository that nodes have been destroyed.
3284 Intended for use by strip and rollback, so there's a common
3289 Intended for use by strip and rollback, so there's a common
3285 place for anything that has to be done after destroying history.
3290 place for anything that has to be done after destroying history.
3286 """
3291 """
3287 # When one tries to:
3292 # When one tries to:
3288 # 1) destroy nodes thus calling this method (e.g. strip)
3293 # 1) destroy nodes thus calling this method (e.g. strip)
3289 # 2) use phasecache somewhere (e.g. commit)
3294 # 2) use phasecache somewhere (e.g. commit)
3290 #
3295 #
3291 # then 2) will fail because the phasecache contains nodes that were
3296 # then 2) will fail because the phasecache contains nodes that were
3292 # removed. We can either remove phasecache from the filecache,
3297 # removed. We can either remove phasecache from the filecache,
3293 # causing it to reload next time it is accessed, or simply filter
3298 # causing it to reload next time it is accessed, or simply filter
3294 # the removed nodes now and write the updated cache.
3299 # the removed nodes now and write the updated cache.
3295 self._phasecache.filterunknown(self)
3300 self._phasecache.filterunknown(self)
3296 self._phasecache.write()
3301 self._phasecache.write()
3297
3302
3298 # refresh all repository caches
3303 # refresh all repository caches
3299 self.updatecaches()
3304 self.updatecaches()
3300
3305
3301 # Ensure the persistent tag cache is updated. Doing it now
3306 # Ensure the persistent tag cache is updated. Doing it now
3302 # means that the tag cache only has to worry about destroyed
3307 # means that the tag cache only has to worry about destroyed
3303 # heads immediately after a strip/rollback. That in turn
3308 # heads immediately after a strip/rollback. That in turn
3304 # guarantees that "cachetip == currenttip" (comparing both rev
3309 # guarantees that "cachetip == currenttip" (comparing both rev
3305 # and node) always means no nodes have been added or destroyed.
3310 # and node) always means no nodes have been added or destroyed.
3306
3311
3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3312 # XXX this is suboptimal when qrefresh'ing: we strip the current
3308 # head, refresh the tag cache, then immediately add a new head.
3313 # head, refresh the tag cache, then immediately add a new head.
3309 # But I think doing it this way is necessary for the "instant
3314 # But I think doing it this way is necessary for the "instant
3310 # tag cache retrieval" case to work.
3315 # tag cache retrieval" case to work.
3311 self.invalidate()
3316 self.invalidate()
3312
3317
3313 def status(
3318 def status(
3314 self,
3319 self,
3315 node1=b'.',
3320 node1=b'.',
3316 node2=None,
3321 node2=None,
3317 match=None,
3322 match=None,
3318 ignored=False,
3323 ignored=False,
3319 clean=False,
3324 clean=False,
3320 unknown=False,
3325 unknown=False,
3321 listsubrepos=False,
3326 listsubrepos=False,
3322 ):
3327 ):
3323 '''a convenience method that calls node1.status(node2)'''
3328 '''a convenience method that calls node1.status(node2)'''
3324 return self[node1].status(
3329 return self[node1].status(
3325 node2, match, ignored, clean, unknown, listsubrepos
3330 node2, match, ignored, clean, unknown, listsubrepos
3326 )
3331 )
3327
3332
3328 def addpostdsstatus(self, ps):
3333 def addpostdsstatus(self, ps):
3329 """Add a callback to run within the wlock, at the point at which status
3334 """Add a callback to run within the wlock, at the point at which status
3330 fixups happen.
3335 fixups happen.
3331
3336
3332 On status completion, callback(wctx, status) will be called with the
3337 On status completion, callback(wctx, status) will be called with the
3333 wlock held, unless the dirstate has changed from underneath or the wlock
3338 wlock held, unless the dirstate has changed from underneath or the wlock
3334 couldn't be grabbed.
3339 couldn't be grabbed.
3335
3340
3336 Callbacks should not capture and use a cached copy of the dirstate --
3341 Callbacks should not capture and use a cached copy of the dirstate --
3337 it might change in the meanwhile. Instead, they should access the
3342 it might change in the meanwhile. Instead, they should access the
3338 dirstate via wctx.repo().dirstate.
3343 dirstate via wctx.repo().dirstate.
3339
3344
3340 This list is emptied out after each status run -- extensions should
3345 This list is emptied out after each status run -- extensions should
3341 make sure it adds to this list each time dirstate.status is called.
3346 make sure it adds to this list each time dirstate.status is called.
3342 Extensions should also make sure they don't call this for statuses
3347 Extensions should also make sure they don't call this for statuses
3343 that don't involve the dirstate.
3348 that don't involve the dirstate.
3344 """
3349 """
3345
3350
3346 # The list is located here for uniqueness reasons -- it is actually
3351 # The list is located here for uniqueness reasons -- it is actually
3347 # managed by the workingctx, but that isn't unique per-repo.
3352 # managed by the workingctx, but that isn't unique per-repo.
3348 self._postdsstatus.append(ps)
3353 self._postdsstatus.append(ps)
3349
3354
3350 def postdsstatus(self):
3355 def postdsstatus(self):
3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3356 """Used by workingctx to get the list of post-dirstate-status hooks."""
3352 return self._postdsstatus
3357 return self._postdsstatus
3353
3358
3354 def clearpostdsstatus(self):
3359 def clearpostdsstatus(self):
3355 """Used by workingctx to clear post-dirstate-status hooks."""
3360 """Used by workingctx to clear post-dirstate-status hooks."""
3356 del self._postdsstatus[:]
3361 del self._postdsstatus[:]
3357
3362
3358 def heads(self, start=None):
3363 def heads(self, start=None):
3359 if start is None:
3364 if start is None:
3360 cl = self.changelog
3365 cl = self.changelog
3361 headrevs = reversed(cl.headrevs())
3366 headrevs = reversed(cl.headrevs())
3362 return [cl.node(rev) for rev in headrevs]
3367 return [cl.node(rev) for rev in headrevs]
3363
3368
3364 heads = self.changelog.heads(start)
3369 heads = self.changelog.heads(start)
3365 # sort the output in rev descending order
3370 # sort the output in rev descending order
3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3371 return sorted(heads, key=self.changelog.rev, reverse=True)
3367
3372
3368 def branchheads(self, branch=None, start=None, closed=False):
3373 def branchheads(self, branch=None, start=None, closed=False):
3369 """return a (possibly filtered) list of heads for the given branch
3374 """return a (possibly filtered) list of heads for the given branch
3370
3375
3371 Heads are returned in topological order, from newest to oldest.
3376 Heads are returned in topological order, from newest to oldest.
3372 If branch is None, use the dirstate branch.
3377 If branch is None, use the dirstate branch.
3373 If start is not None, return only heads reachable from start.
3378 If start is not None, return only heads reachable from start.
3374 If closed is True, return heads that are marked as closed as well.
3379 If closed is True, return heads that are marked as closed as well.
3375 """
3380 """
3376 if branch is None:
3381 if branch is None:
3377 branch = self[None].branch()
3382 branch = self[None].branch()
3378 branches = self.branchmap()
3383 branches = self.branchmap()
3379 if not branches.hasbranch(branch):
3384 if not branches.hasbranch(branch):
3380 return []
3385 return []
3381 # the cache returns heads ordered lowest to highest
3386 # the cache returns heads ordered lowest to highest
3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3387 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3383 if start is not None:
3388 if start is not None:
3384 # filter out the heads that cannot be reached from startrev
3389 # filter out the heads that cannot be reached from startrev
3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3390 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3386 bheads = [h for h in bheads if h in fbheads]
3391 bheads = [h for h in bheads if h in fbheads]
3387 return bheads
3392 return bheads
3388
3393
3389 def branches(self, nodes):
3394 def branches(self, nodes):
3390 if not nodes:
3395 if not nodes:
3391 nodes = [self.changelog.tip()]
3396 nodes = [self.changelog.tip()]
3392 b = []
3397 b = []
3393 for n in nodes:
3398 for n in nodes:
3394 t = n
3399 t = n
3395 while True:
3400 while True:
3396 p = self.changelog.parents(n)
3401 p = self.changelog.parents(n)
3397 if p[1] != self.nullid or p[0] == self.nullid:
3402 if p[1] != self.nullid or p[0] == self.nullid:
3398 b.append((t, n, p[0], p[1]))
3403 b.append((t, n, p[0], p[1]))
3399 break
3404 break
3400 n = p[0]
3405 n = p[0]
3401 return b
3406 return b
3402
3407
3403 def between(self, pairs):
3408 def between(self, pairs):
3404 r = []
3409 r = []
3405
3410
3406 for top, bottom in pairs:
3411 for top, bottom in pairs:
3407 n, l, i = top, [], 0
3412 n, l, i = top, [], 0
3408 f = 1
3413 f = 1
3409
3414
3410 while n != bottom and n != self.nullid:
3415 while n != bottom and n != self.nullid:
3411 p = self.changelog.parents(n)[0]
3416 p = self.changelog.parents(n)[0]
3412 if i == f:
3417 if i == f:
3413 l.append(n)
3418 l.append(n)
3414 f = f * 2
3419 f = f * 2
3415 n = p
3420 n = p
3416 i += 1
3421 i += 1
3417
3422
3418 r.append(l)
3423 r.append(l)
3419
3424
3420 return r
3425 return r
3421
3426
3422 def checkpush(self, pushop):
3427 def checkpush(self, pushop):
3423 """Extensions can override this function if additional checks have
3428 """Extensions can override this function if additional checks have
3424 to be performed before pushing, or call it if they override push
3429 to be performed before pushing, or call it if they override push
3425 command.
3430 command.
3426 """
3431 """
3427
3432
3428 @unfilteredpropertycache
3433 @unfilteredpropertycache
3429 def prepushoutgoinghooks(self):
3434 def prepushoutgoinghooks(self):
3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3435 """Return util.hooks consists of a pushop with repo, remote, outgoing
3431 methods, which are called before pushing changesets.
3436 methods, which are called before pushing changesets.
3432 """
3437 """
3433 return util.hooks()
3438 return util.hooks()
3434
3439
3435 def pushkey(self, namespace, key, old, new):
3440 def pushkey(self, namespace, key, old, new):
3436 try:
3441 try:
3437 tr = self.currenttransaction()
3442 tr = self.currenttransaction()
3438 hookargs = {}
3443 hookargs = {}
3439 if tr is not None:
3444 if tr is not None:
3440 hookargs.update(tr.hookargs)
3445 hookargs.update(tr.hookargs)
3441 hookargs = pycompat.strkwargs(hookargs)
3446 hookargs = pycompat.strkwargs(hookargs)
3442 hookargs['namespace'] = namespace
3447 hookargs['namespace'] = namespace
3443 hookargs['key'] = key
3448 hookargs['key'] = key
3444 hookargs['old'] = old
3449 hookargs['old'] = old
3445 hookargs['new'] = new
3450 hookargs['new'] = new
3446 self.hook(b'prepushkey', throw=True, **hookargs)
3451 self.hook(b'prepushkey', throw=True, **hookargs)
3447 except error.HookAbort as exc:
3452 except error.HookAbort as exc:
3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3453 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3449 if exc.hint:
3454 if exc.hint:
3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3455 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3451 return False
3456 return False
3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3457 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3453 ret = pushkey.push(self, namespace, key, old, new)
3458 ret = pushkey.push(self, namespace, key, old, new)
3454
3459
3455 def runhook(unused_success):
3460 def runhook(unused_success):
3456 self.hook(
3461 self.hook(
3457 b'pushkey',
3462 b'pushkey',
3458 namespace=namespace,
3463 namespace=namespace,
3459 key=key,
3464 key=key,
3460 old=old,
3465 old=old,
3461 new=new,
3466 new=new,
3462 ret=ret,
3467 ret=ret,
3463 )
3468 )
3464
3469
3465 self._afterlock(runhook)
3470 self._afterlock(runhook)
3466 return ret
3471 return ret
3467
3472
3468 def listkeys(self, namespace):
3473 def listkeys(self, namespace):
3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3474 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3475 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3471 values = pushkey.list(self, namespace)
3476 values = pushkey.list(self, namespace)
3472 self.hook(b'listkeys', namespace=namespace, values=values)
3477 self.hook(b'listkeys', namespace=namespace, values=values)
3473 return values
3478 return values
3474
3479
3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3480 def debugwireargs(self, one, two, three=None, four=None, five=None):
3476 '''used to test argument passing over the wire'''
3481 '''used to test argument passing over the wire'''
3477 return b"%s %s %s %s %s" % (
3482 return b"%s %s %s %s %s" % (
3478 one,
3483 one,
3479 two,
3484 two,
3480 pycompat.bytestr(three),
3485 pycompat.bytestr(three),
3481 pycompat.bytestr(four),
3486 pycompat.bytestr(four),
3482 pycompat.bytestr(five),
3487 pycompat.bytestr(five),
3483 )
3488 )
3484
3489
3485 def savecommitmessage(self, text):
3490 def savecommitmessage(self, text):
3486 fp = self.vfs(b'last-message.txt', b'wb')
3491 fp = self.vfs(b'last-message.txt', b'wb')
3487 try:
3492 try:
3488 fp.write(text)
3493 fp.write(text)
3489 finally:
3494 finally:
3490 fp.close()
3495 fp.close()
3491 return self.pathto(fp.name[len(self.root) + 1 :])
3496 return self.pathto(fp.name[len(self.root) + 1 :])
3492
3497
3493 def register_wanted_sidedata(self, category):
3498 def register_wanted_sidedata(self, category):
3494 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3499 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3495 # Only revlogv2 repos can want sidedata.
3500 # Only revlogv2 repos can want sidedata.
3496 return
3501 return
3497 self._wanted_sidedata.add(pycompat.bytestr(category))
3502 self._wanted_sidedata.add(pycompat.bytestr(category))
3498
3503
3499 def register_sidedata_computer(
3504 def register_sidedata_computer(
3500 self, kind, category, keys, computer, flags, replace=False
3505 self, kind, category, keys, computer, flags, replace=False
3501 ):
3506 ):
3502 if kind not in revlogconst.ALL_KINDS:
3507 if kind not in revlogconst.ALL_KINDS:
3503 msg = _(b"unexpected revlog kind '%s'.")
3508 msg = _(b"unexpected revlog kind '%s'.")
3504 raise error.ProgrammingError(msg % kind)
3509 raise error.ProgrammingError(msg % kind)
3505 category = pycompat.bytestr(category)
3510 category = pycompat.bytestr(category)
3506 already_registered = category in self._sidedata_computers.get(kind, [])
3511 already_registered = category in self._sidedata_computers.get(kind, [])
3507 if already_registered and not replace:
3512 if already_registered and not replace:
3508 msg = _(
3513 msg = _(
3509 b"cannot register a sidedata computer twice for category '%s'."
3514 b"cannot register a sidedata computer twice for category '%s'."
3510 )
3515 )
3511 raise error.ProgrammingError(msg % category)
3516 raise error.ProgrammingError(msg % category)
3512 if replace and not already_registered:
3517 if replace and not already_registered:
3513 msg = _(
3518 msg = _(
3514 b"cannot replace a sidedata computer that isn't registered "
3519 b"cannot replace a sidedata computer that isn't registered "
3515 b"for category '%s'."
3520 b"for category '%s'."
3516 )
3521 )
3517 raise error.ProgrammingError(msg % category)
3522 raise error.ProgrammingError(msg % category)
3518 self._sidedata_computers.setdefault(kind, {})
3523 self._sidedata_computers.setdefault(kind, {})
3519 self._sidedata_computers[kind][category] = (keys, computer, flags)
3524 self._sidedata_computers[kind][category] = (keys, computer, flags)
3520
3525
3521
3526
3522 # used to avoid circular references so destructors work
3527 # used to avoid circular references so destructors work
3523 def aftertrans(files):
3528 def aftertrans(files):
3524 renamefiles = [tuple(t) for t in files]
3529 renamefiles = [tuple(t) for t in files]
3525
3530
3526 def a():
3531 def a():
3527 for vfs, src, dest in renamefiles:
3532 for vfs, src, dest in renamefiles:
3528 # if src and dest refer to a same file, vfs.rename is a no-op,
3533 # if src and dest refer to a same file, vfs.rename is a no-op,
3529 # leaving both src and dest on disk. delete dest to make sure
3534 # leaving both src and dest on disk. delete dest to make sure
3530 # the rename couldn't be such a no-op.
3535 # the rename couldn't be such a no-op.
3531 vfs.tryunlink(dest)
3536 vfs.tryunlink(dest)
3532 try:
3537 try:
3533 vfs.rename(src, dest)
3538 vfs.rename(src, dest)
3534 except FileNotFoundError: # journal file does not yet exist
3539 except FileNotFoundError: # journal file does not yet exist
3535 pass
3540 pass
3536
3541
3537 return a
3542 return a
3538
3543
3539
3544
3540 def undoname(fn: bytes) -> bytes:
3545 def undoname(fn: bytes) -> bytes:
3541 base, name = os.path.split(fn)
3546 base, name = os.path.split(fn)
3542 assert name.startswith(b'journal')
3547 assert name.startswith(b'journal')
3543 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3548 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3544
3549
3545
3550
3546 def instance(ui, path: bytes, create, intents=None, createopts=None):
3551 def instance(ui, path: bytes, create, intents=None, createopts=None):
3547
3552
3548 # prevent cyclic import localrepo -> upgrade -> localrepo
3553 # prevent cyclic import localrepo -> upgrade -> localrepo
3549 from . import upgrade
3554 from . import upgrade
3550
3555
3551 localpath = urlutil.urllocalpath(path)
3556 localpath = urlutil.urllocalpath(path)
3552 if create:
3557 if create:
3553 createrepository(ui, localpath, createopts=createopts)
3558 createrepository(ui, localpath, createopts=createopts)
3554
3559
3555 def repo_maker():
3560 def repo_maker():
3556 return makelocalrepository(ui, localpath, intents=intents)
3561 return makelocalrepository(ui, localpath, intents=intents)
3557
3562
3558 repo = repo_maker()
3563 repo = repo_maker()
3559 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3564 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3560 return repo
3565 return repo
3561
3566
3562
3567
3563 def islocal(path: bytes) -> bool:
3568 def islocal(path: bytes) -> bool:
3564 return True
3569 return True
3565
3570
3566
3571
3567 def defaultcreateopts(ui, createopts=None):
3572 def defaultcreateopts(ui, createopts=None):
3568 """Populate the default creation options for a repository.
3573 """Populate the default creation options for a repository.
3569
3574
3570 A dictionary of explicitly requested creation options can be passed
3575 A dictionary of explicitly requested creation options can be passed
3571 in. Missing keys will be populated.
3576 in. Missing keys will be populated.
3572 """
3577 """
3573 createopts = dict(createopts or {})
3578 createopts = dict(createopts or {})
3574
3579
3575 if b'backend' not in createopts:
3580 if b'backend' not in createopts:
3576 # experimental config: storage.new-repo-backend
3581 # experimental config: storage.new-repo-backend
3577 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3582 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3578
3583
3579 return createopts
3584 return createopts
3580
3585
3581
3586
3582 def clone_requirements(ui, createopts, srcrepo):
3587 def clone_requirements(ui, createopts, srcrepo):
3583 """clone the requirements of a local repo for a local clone
3588 """clone the requirements of a local repo for a local clone
3584
3589
3585 The store requirements are unchanged while the working copy requirements
3590 The store requirements are unchanged while the working copy requirements
3586 depends on the configuration
3591 depends on the configuration
3587 """
3592 """
3588 target_requirements = set()
3593 target_requirements = set()
3589 if not srcrepo.requirements:
3594 if not srcrepo.requirements:
3590 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3595 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3591 # with it.
3596 # with it.
3592 return target_requirements
3597 return target_requirements
3593 createopts = defaultcreateopts(ui, createopts=createopts)
3598 createopts = defaultcreateopts(ui, createopts=createopts)
3594 for r in newreporequirements(ui, createopts):
3599 for r in newreporequirements(ui, createopts):
3595 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3600 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3596 target_requirements.add(r)
3601 target_requirements.add(r)
3597
3602
3598 for r in srcrepo.requirements:
3603 for r in srcrepo.requirements:
3599 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3604 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3600 target_requirements.add(r)
3605 target_requirements.add(r)
3601 return target_requirements
3606 return target_requirements
3602
3607
3603
3608
3604 def newreporequirements(ui, createopts):
3609 def newreporequirements(ui, createopts):
3605 """Determine the set of requirements for a new local repository.
3610 """Determine the set of requirements for a new local repository.
3606
3611
3607 Extensions can wrap this function to specify custom requirements for
3612 Extensions can wrap this function to specify custom requirements for
3608 new repositories.
3613 new repositories.
3609 """
3614 """
3610
3615
3611 if b'backend' not in createopts:
3616 if b'backend' not in createopts:
3612 raise error.ProgrammingError(
3617 raise error.ProgrammingError(
3613 b'backend key not present in createopts; '
3618 b'backend key not present in createopts; '
3614 b'was defaultcreateopts() called?'
3619 b'was defaultcreateopts() called?'
3615 )
3620 )
3616
3621
3617 if createopts[b'backend'] != b'revlogv1':
3622 if createopts[b'backend'] != b'revlogv1':
3618 raise error.Abort(
3623 raise error.Abort(
3619 _(
3624 _(
3620 b'unable to determine repository requirements for '
3625 b'unable to determine repository requirements for '
3621 b'storage backend: %s'
3626 b'storage backend: %s'
3622 )
3627 )
3623 % createopts[b'backend']
3628 % createopts[b'backend']
3624 )
3629 )
3625
3630
3626 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3631 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3627 if ui.configbool(b'format', b'usestore'):
3632 if ui.configbool(b'format', b'usestore'):
3628 requirements.add(requirementsmod.STORE_REQUIREMENT)
3633 requirements.add(requirementsmod.STORE_REQUIREMENT)
3629 if ui.configbool(b'format', b'usefncache'):
3634 if ui.configbool(b'format', b'usefncache'):
3630 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3635 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3631 if ui.configbool(b'format', b'dotencode'):
3636 if ui.configbool(b'format', b'dotencode'):
3632 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3637 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3633
3638
3634 compengines = ui.configlist(b'format', b'revlog-compression')
3639 compengines = ui.configlist(b'format', b'revlog-compression')
3635 for compengine in compengines:
3640 for compengine in compengines:
3636 if compengine in util.compengines:
3641 if compengine in util.compengines:
3637 engine = util.compengines[compengine]
3642 engine = util.compengines[compengine]
3638 if engine.available() and engine.revlogheader():
3643 if engine.available() and engine.revlogheader():
3639 break
3644 break
3640 else:
3645 else:
3641 raise error.Abort(
3646 raise error.Abort(
3642 _(
3647 _(
3643 b'compression engines %s defined by '
3648 b'compression engines %s defined by '
3644 b'format.revlog-compression not available'
3649 b'format.revlog-compression not available'
3645 )
3650 )
3646 % b', '.join(b'"%s"' % e for e in compengines),
3651 % b', '.join(b'"%s"' % e for e in compengines),
3647 hint=_(
3652 hint=_(
3648 b'run "hg debuginstall" to list available '
3653 b'run "hg debuginstall" to list available '
3649 b'compression engines'
3654 b'compression engines'
3650 ),
3655 ),
3651 )
3656 )
3652
3657
3653 # zlib is the historical default and doesn't need an explicit requirement.
3658 # zlib is the historical default and doesn't need an explicit requirement.
3654 if compengine == b'zstd':
3659 if compengine == b'zstd':
3655 requirements.add(b'revlog-compression-zstd')
3660 requirements.add(b'revlog-compression-zstd')
3656 elif compengine != b'zlib':
3661 elif compengine != b'zlib':
3657 requirements.add(b'exp-compression-%s' % compengine)
3662 requirements.add(b'exp-compression-%s' % compengine)
3658
3663
3659 if scmutil.gdinitconfig(ui):
3664 if scmutil.gdinitconfig(ui):
3660 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3665 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3661 if ui.configbool(b'format', b'sparse-revlog'):
3666 if ui.configbool(b'format', b'sparse-revlog'):
3662 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3667 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3663
3668
3664 # experimental config: format.use-dirstate-v2
3669 # experimental config: format.use-dirstate-v2
3665 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3670 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3666 if ui.configbool(b'format', b'use-dirstate-v2'):
3671 if ui.configbool(b'format', b'use-dirstate-v2'):
3667 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3672 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3668
3673
3669 # experimental config: format.exp-use-copies-side-data-changeset
3674 # experimental config: format.exp-use-copies-side-data-changeset
3670 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3675 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3671 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3676 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3672 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3677 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3673 if ui.configbool(b'experimental', b'treemanifest'):
3678 if ui.configbool(b'experimental', b'treemanifest'):
3674 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3679 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3675
3680
3676 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3681 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3677 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3682 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3678 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3683 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3679
3684
3680 revlogv2 = ui.config(b'experimental', b'revlogv2')
3685 revlogv2 = ui.config(b'experimental', b'revlogv2')
3681 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3686 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3682 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3687 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3683 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3688 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3684 # experimental config: format.internal-phase
3689 # experimental config: format.internal-phase
3685 if ui.configbool(b'format', b'use-internal-phase'):
3690 if ui.configbool(b'format', b'use-internal-phase'):
3686 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3691 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3687
3692
3688 # experimental config: format.exp-archived-phase
3693 # experimental config: format.exp-archived-phase
3689 if ui.configbool(b'format', b'exp-archived-phase'):
3694 if ui.configbool(b'format', b'exp-archived-phase'):
3690 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3695 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3691
3696
3692 if createopts.get(b'narrowfiles'):
3697 if createopts.get(b'narrowfiles'):
3693 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3698 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3694
3699
3695 if createopts.get(b'lfs'):
3700 if createopts.get(b'lfs'):
3696 requirements.add(b'lfs')
3701 requirements.add(b'lfs')
3697
3702
3698 if ui.configbool(b'format', b'bookmarks-in-store'):
3703 if ui.configbool(b'format', b'bookmarks-in-store'):
3699 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3704 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3700
3705
3701 if ui.configbool(b'format', b'use-persistent-nodemap'):
3706 if ui.configbool(b'format', b'use-persistent-nodemap'):
3702 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3707 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3703
3708
3704 # if share-safe is enabled, let's create the new repository with the new
3709 # if share-safe is enabled, let's create the new repository with the new
3705 # requirement
3710 # requirement
3706 if ui.configbool(b'format', b'use-share-safe'):
3711 if ui.configbool(b'format', b'use-share-safe'):
3707 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3712 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3708
3713
3709 # if we are creating a share-repoΒΉ we have to handle requirement
3714 # if we are creating a share-repoΒΉ we have to handle requirement
3710 # differently.
3715 # differently.
3711 #
3716 #
3712 # [1] (i.e. reusing the store from another repository, just having a
3717 # [1] (i.e. reusing the store from another repository, just having a
3713 # working copy)
3718 # working copy)
3714 if b'sharedrepo' in createopts:
3719 if b'sharedrepo' in createopts:
3715 source_requirements = set(createopts[b'sharedrepo'].requirements)
3720 source_requirements = set(createopts[b'sharedrepo'].requirements)
3716
3721
3717 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3722 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3718 # share to an old school repository, we have to copy the
3723 # share to an old school repository, we have to copy the
3719 # requirements and hope for the best.
3724 # requirements and hope for the best.
3720 requirements = source_requirements
3725 requirements = source_requirements
3721 else:
3726 else:
3722 # We have control on the working copy only, so "copy" the non
3727 # We have control on the working copy only, so "copy" the non
3723 # working copy part over, ignoring previous logic.
3728 # working copy part over, ignoring previous logic.
3724 to_drop = set()
3729 to_drop = set()
3725 for req in requirements:
3730 for req in requirements:
3726 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3731 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3727 continue
3732 continue
3728 if req in source_requirements:
3733 if req in source_requirements:
3729 continue
3734 continue
3730 to_drop.add(req)
3735 to_drop.add(req)
3731 requirements -= to_drop
3736 requirements -= to_drop
3732 requirements |= source_requirements
3737 requirements |= source_requirements
3733
3738
3734 if createopts.get(b'sharedrelative'):
3739 if createopts.get(b'sharedrelative'):
3735 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3740 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3736 else:
3741 else:
3737 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3742 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3738
3743
3739 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3744 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3740 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3745 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3741 msg = _(b"ignoring unknown tracked key version: %d\n")
3746 msg = _(b"ignoring unknown tracked key version: %d\n")
3742 hint = _(
3747 hint = _(
3743 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3748 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3744 )
3749 )
3745 if version != 1:
3750 if version != 1:
3746 ui.warn(msg % version, hint=hint)
3751 ui.warn(msg % version, hint=hint)
3747 else:
3752 else:
3748 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3753 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3749
3754
3750 return requirements
3755 return requirements
3751
3756
3752
3757
3753 def checkrequirementscompat(ui, requirements):
3758 def checkrequirementscompat(ui, requirements):
3754 """Checks compatibility of repository requirements enabled and disabled.
3759 """Checks compatibility of repository requirements enabled and disabled.
3755
3760
3756 Returns a set of requirements which needs to be dropped because dependend
3761 Returns a set of requirements which needs to be dropped because dependend
3757 requirements are not enabled. Also warns users about it"""
3762 requirements are not enabled. Also warns users about it"""
3758
3763
3759 dropped = set()
3764 dropped = set()
3760
3765
3761 if requirementsmod.STORE_REQUIREMENT not in requirements:
3766 if requirementsmod.STORE_REQUIREMENT not in requirements:
3762 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3767 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3763 ui.warn(
3768 ui.warn(
3764 _(
3769 _(
3765 b'ignoring enabled \'format.bookmarks-in-store\' config '
3770 b'ignoring enabled \'format.bookmarks-in-store\' config '
3766 b'beacuse it is incompatible with disabled '
3771 b'beacuse it is incompatible with disabled '
3767 b'\'format.usestore\' config\n'
3772 b'\'format.usestore\' config\n'
3768 )
3773 )
3769 )
3774 )
3770 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3775 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3771
3776
3772 if (
3777 if (
3773 requirementsmod.SHARED_REQUIREMENT in requirements
3778 requirementsmod.SHARED_REQUIREMENT in requirements
3774 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3779 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3775 ):
3780 ):
3776 raise error.Abort(
3781 raise error.Abort(
3777 _(
3782 _(
3778 b"cannot create shared repository as source was created"
3783 b"cannot create shared repository as source was created"
3779 b" with 'format.usestore' config disabled"
3784 b" with 'format.usestore' config disabled"
3780 )
3785 )
3781 )
3786 )
3782
3787
3783 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3788 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3784 if ui.hasconfig(b'format', b'use-share-safe'):
3789 if ui.hasconfig(b'format', b'use-share-safe'):
3785 msg = _(
3790 msg = _(
3786 b"ignoring enabled 'format.use-share-safe' config because "
3791 b"ignoring enabled 'format.use-share-safe' config because "
3787 b"it is incompatible with disabled 'format.usestore'"
3792 b"it is incompatible with disabled 'format.usestore'"
3788 b" config\n"
3793 b" config\n"
3789 )
3794 )
3790 ui.warn(msg)
3795 ui.warn(msg)
3791 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3796 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3792
3797
3793 return dropped
3798 return dropped
3794
3799
3795
3800
3796 def filterknowncreateopts(ui, createopts):
3801 def filterknowncreateopts(ui, createopts):
3797 """Filters a dict of repo creation options against options that are known.
3802 """Filters a dict of repo creation options against options that are known.
3798
3803
3799 Receives a dict of repo creation options and returns a dict of those
3804 Receives a dict of repo creation options and returns a dict of those
3800 options that we don't know how to handle.
3805 options that we don't know how to handle.
3801
3806
3802 This function is called as part of repository creation. If the
3807 This function is called as part of repository creation. If the
3803 returned dict contains any items, repository creation will not
3808 returned dict contains any items, repository creation will not
3804 be allowed, as it means there was a request to create a repository
3809 be allowed, as it means there was a request to create a repository
3805 with options not recognized by loaded code.
3810 with options not recognized by loaded code.
3806
3811
3807 Extensions can wrap this function to filter out creation options
3812 Extensions can wrap this function to filter out creation options
3808 they know how to handle.
3813 they know how to handle.
3809 """
3814 """
3810 known = {
3815 known = {
3811 b'backend',
3816 b'backend',
3812 b'lfs',
3817 b'lfs',
3813 b'narrowfiles',
3818 b'narrowfiles',
3814 b'sharedrepo',
3819 b'sharedrepo',
3815 b'sharedrelative',
3820 b'sharedrelative',
3816 b'shareditems',
3821 b'shareditems',
3817 b'shallowfilestore',
3822 b'shallowfilestore',
3818 }
3823 }
3819
3824
3820 return {k: v for k, v in createopts.items() if k not in known}
3825 return {k: v for k, v in createopts.items() if k not in known}
3821
3826
3822
3827
3823 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3828 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3824 """Create a new repository in a vfs.
3829 """Create a new repository in a vfs.
3825
3830
3826 ``path`` path to the new repo's working directory.
3831 ``path`` path to the new repo's working directory.
3827 ``createopts`` options for the new repository.
3832 ``createopts`` options for the new repository.
3828 ``requirement`` predefined set of requirements.
3833 ``requirement`` predefined set of requirements.
3829 (incompatible with ``createopts``)
3834 (incompatible with ``createopts``)
3830
3835
3831 The following keys for ``createopts`` are recognized:
3836 The following keys for ``createopts`` are recognized:
3832
3837
3833 backend
3838 backend
3834 The storage backend to use.
3839 The storage backend to use.
3835 lfs
3840 lfs
3836 Repository will be created with ``lfs`` requirement. The lfs extension
3841 Repository will be created with ``lfs`` requirement. The lfs extension
3837 will automatically be loaded when the repository is accessed.
3842 will automatically be loaded when the repository is accessed.
3838 narrowfiles
3843 narrowfiles
3839 Set up repository to support narrow file storage.
3844 Set up repository to support narrow file storage.
3840 sharedrepo
3845 sharedrepo
3841 Repository object from which storage should be shared.
3846 Repository object from which storage should be shared.
3842 sharedrelative
3847 sharedrelative
3843 Boolean indicating if the path to the shared repo should be
3848 Boolean indicating if the path to the shared repo should be
3844 stored as relative. By default, the pointer to the "parent" repo
3849 stored as relative. By default, the pointer to the "parent" repo
3845 is stored as an absolute path.
3850 is stored as an absolute path.
3846 shareditems
3851 shareditems
3847 Set of items to share to the new repository (in addition to storage).
3852 Set of items to share to the new repository (in addition to storage).
3848 shallowfilestore
3853 shallowfilestore
3849 Indicates that storage for files should be shallow (not all ancestor
3854 Indicates that storage for files should be shallow (not all ancestor
3850 revisions are known).
3855 revisions are known).
3851 """
3856 """
3852
3857
3853 if requirements is not None:
3858 if requirements is not None:
3854 if createopts is not None:
3859 if createopts is not None:
3855 msg = b'cannot specify both createopts and requirements'
3860 msg = b'cannot specify both createopts and requirements'
3856 raise error.ProgrammingError(msg)
3861 raise error.ProgrammingError(msg)
3857 createopts = {}
3862 createopts = {}
3858 else:
3863 else:
3859 createopts = defaultcreateopts(ui, createopts=createopts)
3864 createopts = defaultcreateopts(ui, createopts=createopts)
3860
3865
3861 unknownopts = filterknowncreateopts(ui, createopts)
3866 unknownopts = filterknowncreateopts(ui, createopts)
3862
3867
3863 if not isinstance(unknownopts, dict):
3868 if not isinstance(unknownopts, dict):
3864 raise error.ProgrammingError(
3869 raise error.ProgrammingError(
3865 b'filterknowncreateopts() did not return a dict'
3870 b'filterknowncreateopts() did not return a dict'
3866 )
3871 )
3867
3872
3868 if unknownopts:
3873 if unknownopts:
3869 raise error.Abort(
3874 raise error.Abort(
3870 _(
3875 _(
3871 b'unable to create repository because of unknown '
3876 b'unable to create repository because of unknown '
3872 b'creation option: %s'
3877 b'creation option: %s'
3873 )
3878 )
3874 % b', '.join(sorted(unknownopts)),
3879 % b', '.join(sorted(unknownopts)),
3875 hint=_(b'is a required extension not loaded?'),
3880 hint=_(b'is a required extension not loaded?'),
3876 )
3881 )
3877
3882
3878 requirements = newreporequirements(ui, createopts=createopts)
3883 requirements = newreporequirements(ui, createopts=createopts)
3879 requirements -= checkrequirementscompat(ui, requirements)
3884 requirements -= checkrequirementscompat(ui, requirements)
3880
3885
3881 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3886 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3882
3887
3883 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3888 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3884 if hgvfs.exists():
3889 if hgvfs.exists():
3885 raise error.RepoError(_(b'repository %s already exists') % path)
3890 raise error.RepoError(_(b'repository %s already exists') % path)
3886
3891
3887 if b'sharedrepo' in createopts:
3892 if b'sharedrepo' in createopts:
3888 sharedpath = createopts[b'sharedrepo'].sharedpath
3893 sharedpath = createopts[b'sharedrepo'].sharedpath
3889
3894
3890 if createopts.get(b'sharedrelative'):
3895 if createopts.get(b'sharedrelative'):
3891 try:
3896 try:
3892 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3897 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3893 sharedpath = util.pconvert(sharedpath)
3898 sharedpath = util.pconvert(sharedpath)
3894 except (IOError, ValueError) as e:
3899 except (IOError, ValueError) as e:
3895 # ValueError is raised on Windows if the drive letters differ
3900 # ValueError is raised on Windows if the drive letters differ
3896 # on each path.
3901 # on each path.
3897 raise error.Abort(
3902 raise error.Abort(
3898 _(b'cannot calculate relative path'),
3903 _(b'cannot calculate relative path'),
3899 hint=stringutil.forcebytestr(e),
3904 hint=stringutil.forcebytestr(e),
3900 )
3905 )
3901
3906
3902 if not wdirvfs.exists():
3907 if not wdirvfs.exists():
3903 wdirvfs.makedirs()
3908 wdirvfs.makedirs()
3904
3909
3905 hgvfs.makedir(notindexed=True)
3910 hgvfs.makedir(notindexed=True)
3906 if b'sharedrepo' not in createopts:
3911 if b'sharedrepo' not in createopts:
3907 hgvfs.mkdir(b'cache')
3912 hgvfs.mkdir(b'cache')
3908 hgvfs.mkdir(b'wcache')
3913 hgvfs.mkdir(b'wcache')
3909
3914
3910 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3915 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3911 if has_store and b'sharedrepo' not in createopts:
3916 if has_store and b'sharedrepo' not in createopts:
3912 hgvfs.mkdir(b'store')
3917 hgvfs.mkdir(b'store')
3913
3918
3914 # We create an invalid changelog outside the store so very old
3919 # We create an invalid changelog outside the store so very old
3915 # Mercurial versions (which didn't know about the requirements
3920 # Mercurial versions (which didn't know about the requirements
3916 # file) encounter an error on reading the changelog. This
3921 # file) encounter an error on reading the changelog. This
3917 # effectively locks out old clients and prevents them from
3922 # effectively locks out old clients and prevents them from
3918 # mucking with a repo in an unknown format.
3923 # mucking with a repo in an unknown format.
3919 #
3924 #
3920 # The revlog header has version 65535, which won't be recognized by
3925 # The revlog header has version 65535, which won't be recognized by
3921 # such old clients.
3926 # such old clients.
3922 hgvfs.append(
3927 hgvfs.append(
3923 b'00changelog.i',
3928 b'00changelog.i',
3924 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3929 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3925 b'layout',
3930 b'layout',
3926 )
3931 )
3927
3932
3928 # Filter the requirements into working copy and store ones
3933 # Filter the requirements into working copy and store ones
3929 wcreq, storereq = scmutil.filterrequirements(requirements)
3934 wcreq, storereq = scmutil.filterrequirements(requirements)
3930 # write working copy ones
3935 # write working copy ones
3931 scmutil.writerequires(hgvfs, wcreq)
3936 scmutil.writerequires(hgvfs, wcreq)
3932 # If there are store requirements and the current repository
3937 # If there are store requirements and the current repository
3933 # is not a shared one, write stored requirements
3938 # is not a shared one, write stored requirements
3934 # For new shared repository, we don't need to write the store
3939 # For new shared repository, we don't need to write the store
3935 # requirements as they are already present in store requires
3940 # requirements as they are already present in store requires
3936 if storereq and b'sharedrepo' not in createopts:
3941 if storereq and b'sharedrepo' not in createopts:
3937 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3942 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3938 scmutil.writerequires(storevfs, storereq)
3943 scmutil.writerequires(storevfs, storereq)
3939
3944
3940 # Write out file telling readers where to find the shared store.
3945 # Write out file telling readers where to find the shared store.
3941 if b'sharedrepo' in createopts:
3946 if b'sharedrepo' in createopts:
3942 hgvfs.write(b'sharedpath', sharedpath)
3947 hgvfs.write(b'sharedpath', sharedpath)
3943
3948
3944 if createopts.get(b'shareditems'):
3949 if createopts.get(b'shareditems'):
3945 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3950 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3946 hgvfs.write(b'shared', shared)
3951 hgvfs.write(b'shared', shared)
3947
3952
3948
3953
3949 def poisonrepository(repo):
3954 def poisonrepository(repo):
3950 """Poison a repository instance so it can no longer be used."""
3955 """Poison a repository instance so it can no longer be used."""
3951 # Perform any cleanup on the instance.
3956 # Perform any cleanup on the instance.
3952 repo.close()
3957 repo.close()
3953
3958
3954 # Our strategy is to replace the type of the object with one that
3959 # Our strategy is to replace the type of the object with one that
3955 # has all attribute lookups result in error.
3960 # has all attribute lookups result in error.
3956 #
3961 #
3957 # But we have to allow the close() method because some constructors
3962 # But we have to allow the close() method because some constructors
3958 # of repos call close() on repo references.
3963 # of repos call close() on repo references.
3959 class poisonedrepository:
3964 class poisonedrepository:
3960 def __getattribute__(self, item):
3965 def __getattribute__(self, item):
3961 if item == 'close':
3966 if item == 'close':
3962 return object.__getattribute__(self, item)
3967 return object.__getattribute__(self, item)
3963
3968
3964 raise error.ProgrammingError(
3969 raise error.ProgrammingError(
3965 b'repo instances should not be used after unshare'
3970 b'repo instances should not be used after unshare'
3966 )
3971 )
3967
3972
3968 def close(self):
3973 def close(self):
3969 pass
3974 pass
3970
3975
3971 # We may have a repoview, which intercepts __setattr__. So be sure
3976 # We may have a repoview, which intercepts __setattr__. So be sure
3972 # we operate at the lowest level possible.
3977 # we operate at the lowest level possible.
3973 object.__setattr__(repo, '__class__', poisonedrepository)
3978 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,3354 +1,3358 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullrev,
28 nullrev,
29 sha1nodeconstants,
29 sha1nodeconstants,
30 short,
30 short,
31 wdirrev,
31 wdirrev,
32 )
32 )
33 from .i18n import _
33 from .i18n import _
34 from .pycompat import getattr
34 from .pycompat import getattr
35 from .revlogutils.constants import (
35 from .revlogutils.constants import (
36 ALL_KINDS,
36 ALL_KINDS,
37 CHANGELOGV2,
37 CHANGELOGV2,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 ENTRY_RANK,
41 ENTRY_RANK,
42 FEATURES_BY_VERSION,
42 FEATURES_BY_VERSION,
43 FLAG_GENERALDELTA,
43 FLAG_GENERALDELTA,
44 FLAG_INLINE_DATA,
44 FLAG_INLINE_DATA,
45 INDEX_HEADER,
45 INDEX_HEADER,
46 KIND_CHANGELOG,
46 KIND_CHANGELOG,
47 KIND_FILELOG,
47 KIND_FILELOG,
48 RANK_UNKNOWN,
48 RANK_UNKNOWN,
49 REVLOGV0,
49 REVLOGV0,
50 REVLOGV1,
50 REVLOGV1,
51 REVLOGV1_FLAGS,
51 REVLOGV1_FLAGS,
52 REVLOGV2,
52 REVLOGV2,
53 REVLOGV2_FLAGS,
53 REVLOGV2_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
54 REVLOG_DEFAULT_FLAGS,
55 REVLOG_DEFAULT_FORMAT,
55 REVLOG_DEFAULT_FORMAT,
56 REVLOG_DEFAULT_VERSION,
56 REVLOG_DEFAULT_VERSION,
57 SUPPORTED_FLAGS,
57 SUPPORTED_FLAGS,
58 )
58 )
59 from .revlogutils.flagutil import (
59 from .revlogutils.flagutil import (
60 REVIDX_DEFAULT_FLAGS,
60 REVIDX_DEFAULT_FLAGS,
61 REVIDX_ELLIPSIS,
61 REVIDX_ELLIPSIS,
62 REVIDX_EXTSTORED,
62 REVIDX_EXTSTORED,
63 REVIDX_FLAGS_ORDER,
63 REVIDX_FLAGS_ORDER,
64 REVIDX_HASCOPIESINFO,
64 REVIDX_HASCOPIESINFO,
65 REVIDX_ISCENSORED,
65 REVIDX_ISCENSORED,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
66 REVIDX_RAWTEXT_CHANGING_FLAGS,
67 )
67 )
68 from .thirdparty import attr
68 from .thirdparty import attr
69 from . import (
69 from . import (
70 ancestor,
70 ancestor,
71 dagop,
71 dagop,
72 error,
72 error,
73 mdiff,
73 mdiff,
74 policy,
74 policy,
75 pycompat,
75 pycompat,
76 revlogutils,
76 revlogutils,
77 templatefilters,
77 templatefilters,
78 util,
78 util,
79 )
79 )
80 from .interfaces import (
80 from .interfaces import (
81 repository,
81 repository,
82 util as interfaceutil,
82 util as interfaceutil,
83 )
83 )
84 from .revlogutils import (
84 from .revlogutils import (
85 deltas as deltautil,
85 deltas as deltautil,
86 docket as docketutil,
86 docket as docketutil,
87 flagutil,
87 flagutil,
88 nodemap as nodemaputil,
88 nodemap as nodemaputil,
89 randomaccessfile,
89 randomaccessfile,
90 revlogv0,
90 revlogv0,
91 rewrite,
91 rewrite,
92 sidedata as sidedatautil,
92 sidedata as sidedatautil,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 storageutil,
95 storageutil,
96 stringutil,
96 stringutil,
97 )
97 )
98
98
99 # blanked usage of all the name to prevent pyflakes constraints
99 # blanked usage of all the name to prevent pyflakes constraints
100 # We need these name available in the module for extensions.
100 # We need these name available in the module for extensions.
101
101
102 REVLOGV0
102 REVLOGV0
103 REVLOGV1
103 REVLOGV1
104 REVLOGV2
104 REVLOGV2
105 CHANGELOGV2
105 CHANGELOGV2
106 FLAG_INLINE_DATA
106 FLAG_INLINE_DATA
107 FLAG_GENERALDELTA
107 FLAG_GENERALDELTA
108 REVLOG_DEFAULT_FLAGS
108 REVLOG_DEFAULT_FLAGS
109 REVLOG_DEFAULT_FORMAT
109 REVLOG_DEFAULT_FORMAT
110 REVLOG_DEFAULT_VERSION
110 REVLOG_DEFAULT_VERSION
111 REVLOGV1_FLAGS
111 REVLOGV1_FLAGS
112 REVLOGV2_FLAGS
112 REVLOGV2_FLAGS
113 REVIDX_ISCENSORED
113 REVIDX_ISCENSORED
114 REVIDX_ELLIPSIS
114 REVIDX_ELLIPSIS
115 REVIDX_HASCOPIESINFO
115 REVIDX_HASCOPIESINFO
116 REVIDX_EXTSTORED
116 REVIDX_EXTSTORED
117 REVIDX_DEFAULT_FLAGS
117 REVIDX_DEFAULT_FLAGS
118 REVIDX_FLAGS_ORDER
118 REVIDX_FLAGS_ORDER
119 REVIDX_RAWTEXT_CHANGING_FLAGS
119 REVIDX_RAWTEXT_CHANGING_FLAGS
120
120
121 parsers = policy.importmod('parsers')
121 parsers = policy.importmod('parsers')
122 rustancestor = policy.importrust('ancestor')
122 rustancestor = policy.importrust('ancestor')
123 rustdagop = policy.importrust('dagop')
123 rustdagop = policy.importrust('dagop')
124 rustrevlog = policy.importrust('revlog')
124 rustrevlog = policy.importrust('revlog')
125
125
126 # Aliased for performance.
126 # Aliased for performance.
127 _zlibdecompress = zlib.decompress
127 _zlibdecompress = zlib.decompress
128
128
129 # max size of revlog with inline data
129 # max size of revlog with inline data
130 _maxinline = 131072
130 _maxinline = 131072
131
131
132 # Flag processors for REVIDX_ELLIPSIS.
132 # Flag processors for REVIDX_ELLIPSIS.
133 def ellipsisreadprocessor(rl, text):
133 def ellipsisreadprocessor(rl, text):
134 return text, False
134 return text, False
135
135
136
136
137 def ellipsiswriteprocessor(rl, text):
137 def ellipsiswriteprocessor(rl, text):
138 return text, False
138 return text, False
139
139
140
140
141 def ellipsisrawprocessor(rl, text):
141 def ellipsisrawprocessor(rl, text):
142 return False
142 return False
143
143
144
144
145 ellipsisprocessor = (
145 ellipsisprocessor = (
146 ellipsisreadprocessor,
146 ellipsisreadprocessor,
147 ellipsiswriteprocessor,
147 ellipsiswriteprocessor,
148 ellipsisrawprocessor,
148 ellipsisrawprocessor,
149 )
149 )
150
150
151
151
152 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
154 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
155 if skipflags:
155 if skipflags:
156 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
157 else:
157 else:
158 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
159 rl.revision(node)
159 rl.revision(node)
160
160
161
161
162 # True if a fast implementation for persistent-nodemap is available
162 # True if a fast implementation for persistent-nodemap is available
163 #
163 #
164 # We also consider we have a "fast" implementation in "pure" python because
164 # We also consider we have a "fast" implementation in "pure" python because
165 # people using pure don't really have performance consideration (and a
165 # people using pure don't really have performance consideration (and a
166 # wheelbarrow of other slowness source)
166 # wheelbarrow of other slowness source)
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
168 parsers, 'BaseIndexObject'
168 parsers, 'BaseIndexObject'
169 )
169 )
170
170
171
171
172 @interfaceutil.implementer(repository.irevisiondelta)
172 @interfaceutil.implementer(repository.irevisiondelta)
173 @attr.s(slots=True)
173 @attr.s(slots=True)
174 class revlogrevisiondelta:
174 class revlogrevisiondelta:
175 node = attr.ib()
175 node = attr.ib()
176 p1node = attr.ib()
176 p1node = attr.ib()
177 p2node = attr.ib()
177 p2node = attr.ib()
178 basenode = attr.ib()
178 basenode = attr.ib()
179 flags = attr.ib()
179 flags = attr.ib()
180 baserevisionsize = attr.ib()
180 baserevisionsize = attr.ib()
181 revision = attr.ib()
181 revision = attr.ib()
182 delta = attr.ib()
182 delta = attr.ib()
183 sidedata = attr.ib()
183 sidedata = attr.ib()
184 protocol_flags = attr.ib()
184 protocol_flags = attr.ib()
185 linknode = attr.ib(default=None)
185 linknode = attr.ib(default=None)
186
186
187
187
188 @interfaceutil.implementer(repository.iverifyproblem)
188 @interfaceutil.implementer(repository.iverifyproblem)
189 @attr.s(frozen=True)
189 @attr.s(frozen=True)
190 class revlogproblem:
190 class revlogproblem:
191 warning = attr.ib(default=None)
191 warning = attr.ib(default=None)
192 error = attr.ib(default=None)
192 error = attr.ib(default=None)
193 node = attr.ib(default=None)
193 node = attr.ib(default=None)
194
194
195
195
196 def parse_index_v1(data, inline):
196 def parse_index_v1(data, inline):
197 # call the C implementation to parse the index data
197 # call the C implementation to parse the index data
198 index, cache = parsers.parse_index2(data, inline)
198 index, cache = parsers.parse_index2(data, inline)
199 return index, cache
199 return index, cache
200
200
201
201
202 def parse_index_v2(data, inline):
202 def parse_index_v2(data, inline):
203 # call the C implementation to parse the index data
203 # call the C implementation to parse the index data
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
204 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
205 return index, cache
205 return index, cache
206
206
207
207
208 def parse_index_cl_v2(data, inline):
208 def parse_index_cl_v2(data, inline):
209 # call the C implementation to parse the index data
209 # call the C implementation to parse the index data
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
210 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
211 return index, cache
211 return index, cache
212
212
213
213
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
214 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
215
215
216 def parse_index_v1_nodemap(data, inline):
216 def parse_index_v1_nodemap(data, inline):
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
217 index, cache = parsers.parse_index_devel_nodemap(data, inline)
218 return index, cache
218 return index, cache
219
219
220
220
221 else:
221 else:
222 parse_index_v1_nodemap = None
222 parse_index_v1_nodemap = None
223
223
224
224
225 def parse_index_v1_mixed(data, inline):
225 def parse_index_v1_mixed(data, inline):
226 index, cache = parse_index_v1(data, inline)
226 index, cache = parse_index_v1(data, inline)
227 return rustrevlog.MixedIndex(index), cache
227 return rustrevlog.MixedIndex(index), cache
228
228
229
229
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
230 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
231 # signed integer)
231 # signed integer)
232 _maxentrysize = 0x7FFFFFFF
232 _maxentrysize = 0x7FFFFFFF
233
233
234 FILE_TOO_SHORT_MSG = _(
234 FILE_TOO_SHORT_MSG = _(
235 b'cannot read from revlog %s;'
235 b'cannot read from revlog %s;'
236 b' expected %d bytes from offset %d, data size is %d'
236 b' expected %d bytes from offset %d, data size is %d'
237 )
237 )
238
238
239 hexdigits = b'0123456789abcdefABCDEF'
239 hexdigits = b'0123456789abcdefABCDEF'
240
240
241
241
242 class revlog:
242 class revlog:
243 """
243 """
244 the underlying revision storage object
244 the underlying revision storage object
245
245
246 A revlog consists of two parts, an index and the revision data.
246 A revlog consists of two parts, an index and the revision data.
247
247
248 The index is a file with a fixed record size containing
248 The index is a file with a fixed record size containing
249 information on each revision, including its nodeid (hash), the
249 information on each revision, including its nodeid (hash), the
250 nodeids of its parents, the position and offset of its data within
250 nodeids of its parents, the position and offset of its data within
251 the data file, and the revision it's based on. Finally, each entry
251 the data file, and the revision it's based on. Finally, each entry
252 contains a linkrev entry that can serve as a pointer to external
252 contains a linkrev entry that can serve as a pointer to external
253 data.
253 data.
254
254
255 The revision data itself is a linear collection of data chunks.
255 The revision data itself is a linear collection of data chunks.
256 Each chunk represents a revision and is usually represented as a
256 Each chunk represents a revision and is usually represented as a
257 delta against the previous chunk. To bound lookup time, runs of
257 delta against the previous chunk. To bound lookup time, runs of
258 deltas are limited to about 2 times the length of the original
258 deltas are limited to about 2 times the length of the original
259 version data. This makes retrieval of a version proportional to
259 version data. This makes retrieval of a version proportional to
260 its size, or O(1) relative to the number of revisions.
260 its size, or O(1) relative to the number of revisions.
261
261
262 Both pieces of the revlog are written to in an append-only
262 Both pieces of the revlog are written to in an append-only
263 fashion, which means we never need to rewrite a file to insert or
263 fashion, which means we never need to rewrite a file to insert or
264 remove data, and can use some simple techniques to avoid the need
264 remove data, and can use some simple techniques to avoid the need
265 for locking while reading.
265 for locking while reading.
266
266
267 If checkambig, indexfile is opened with checkambig=True at
267 If checkambig, indexfile is opened with checkambig=True at
268 writing, to avoid file stat ambiguity.
268 writing, to avoid file stat ambiguity.
269
269
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
270 If mmaplargeindex is True, and an mmapindexthreshold is set, the
271 index will be mmapped rather than read if it is larger than the
271 index will be mmapped rather than read if it is larger than the
272 configured threshold.
272 configured threshold.
273
273
274 If censorable is True, the revlog can have censored revisions.
274 If censorable is True, the revlog can have censored revisions.
275
275
276 If `upperboundcomp` is not None, this is the expected maximal gain from
276 If `upperboundcomp` is not None, this is the expected maximal gain from
277 compression for the data content.
277 compression for the data content.
278
278
279 `concurrencychecker` is an optional function that receives 3 arguments: a
279 `concurrencychecker` is an optional function that receives 3 arguments: a
280 file handle, a filename, and an expected position. It should check whether
280 file handle, a filename, and an expected position. It should check whether
281 the current position in the file handle is valid, and log/warn/fail (by
281 the current position in the file handle is valid, and log/warn/fail (by
282 raising).
282 raising).
283
283
284 See mercurial/revlogutils/contants.py for details about the content of an
284 See mercurial/revlogutils/contants.py for details about the content of an
285 index entry.
285 index entry.
286 """
286 """
287
287
288 _flagserrorclass = error.RevlogError
288 _flagserrorclass = error.RevlogError
289
289
290 def __init__(
290 def __init__(
291 self,
291 self,
292 opener,
292 opener,
293 target,
293 target,
294 radix,
294 radix,
295 postfix=None, # only exist for `tmpcensored` now
295 postfix=None, # only exist for `tmpcensored` now
296 checkambig=False,
296 checkambig=False,
297 mmaplargeindex=False,
297 mmaplargeindex=False,
298 censorable=False,
298 censorable=False,
299 upperboundcomp=None,
299 upperboundcomp=None,
300 persistentnodemap=False,
300 persistentnodemap=False,
301 concurrencychecker=None,
301 concurrencychecker=None,
302 trypending=False,
302 trypending=False,
303 canonical_parent_order=True,
303 canonical_parent_order=True,
304 ):
304 ):
305 """
305 """
306 create a revlog object
306 create a revlog object
307
307
308 opener is a function that abstracts the file opening operation
308 opener is a function that abstracts the file opening operation
309 and can be used to implement COW semantics or the like.
309 and can be used to implement COW semantics or the like.
310
310
311 `target`: a (KIND, ID) tuple that identify the content stored in
311 `target`: a (KIND, ID) tuple that identify the content stored in
312 this revlog. It help the rest of the code to understand what the revlog
312 this revlog. It help the rest of the code to understand what the revlog
313 is about without having to resort to heuristic and index filename
313 is about without having to resort to heuristic and index filename
314 analysis. Note: that this must be reliably be set by normal code, but
314 analysis. Note: that this must be reliably be set by normal code, but
315 that test, debug, or performance measurement code might not set this to
315 that test, debug, or performance measurement code might not set this to
316 accurate value.
316 accurate value.
317 """
317 """
318 self.upperboundcomp = upperboundcomp
318 self.upperboundcomp = upperboundcomp
319
319
320 self.radix = radix
320 self.radix = radix
321
321
322 self._docket_file = None
322 self._docket_file = None
323 self._indexfile = None
323 self._indexfile = None
324 self._datafile = None
324 self._datafile = None
325 self._sidedatafile = None
325 self._sidedatafile = None
326 self._nodemap_file = None
326 self._nodemap_file = None
327 self.postfix = postfix
327 self.postfix = postfix
328 self._trypending = trypending
328 self._trypending = trypending
329 self.opener = opener
329 self.opener = opener
330 if persistentnodemap:
330 if persistentnodemap:
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
331 self._nodemap_file = nodemaputil.get_nodemap_file(self)
332
332
333 assert target[0] in ALL_KINDS
333 assert target[0] in ALL_KINDS
334 assert len(target) == 2
334 assert len(target) == 2
335 self.target = target
335 self.target = target
336 # When True, indexfile is opened with checkambig=True at writing, to
336 # When True, indexfile is opened with checkambig=True at writing, to
337 # avoid file stat ambiguity.
337 # avoid file stat ambiguity.
338 self._checkambig = checkambig
338 self._checkambig = checkambig
339 self._mmaplargeindex = mmaplargeindex
339 self._mmaplargeindex = mmaplargeindex
340 self._censorable = censorable
340 self._censorable = censorable
341 # 3-tuple of (node, rev, text) for a raw revision.
341 # 3-tuple of (node, rev, text) for a raw revision.
342 self._revisioncache = None
342 self._revisioncache = None
343 # Maps rev to chain base rev.
343 # Maps rev to chain base rev.
344 self._chainbasecache = util.lrucachedict(100)
344 self._chainbasecache = util.lrucachedict(100)
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
345 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
346 self._chunkcache = (0, b'')
346 self._chunkcache = (0, b'')
347 # How much data to read and cache into the raw revlog data cache.
347 # How much data to read and cache into the raw revlog data cache.
348 self._chunkcachesize = 65536
348 self._chunkcachesize = 65536
349 self._maxchainlen = None
349 self._maxchainlen = None
350 self._deltabothparents = True
350 self._deltabothparents = True
351 self._candidate_group_chunk_size = 0
351 self._debug_delta = False
352 self._debug_delta = False
352 self.index = None
353 self.index = None
353 self._docket = None
354 self._docket = None
354 self._nodemap_docket = None
355 self._nodemap_docket = None
355 # Mapping of partial identifiers to full nodes.
356 # Mapping of partial identifiers to full nodes.
356 self._pcache = {}
357 self._pcache = {}
357 # Mapping of revision integer to full node.
358 # Mapping of revision integer to full node.
358 self._compengine = b'zlib'
359 self._compengine = b'zlib'
359 self._compengineopts = {}
360 self._compengineopts = {}
360 self._maxdeltachainspan = -1
361 self._maxdeltachainspan = -1
361 self._withsparseread = False
362 self._withsparseread = False
362 self._sparserevlog = False
363 self._sparserevlog = False
363 self.hassidedata = False
364 self.hassidedata = False
364 self._srdensitythreshold = 0.50
365 self._srdensitythreshold = 0.50
365 self._srmingapsize = 262144
366 self._srmingapsize = 262144
366
367
367 # Make copy of flag processors so each revlog instance can support
368 # Make copy of flag processors so each revlog instance can support
368 # custom flags.
369 # custom flags.
369 self._flagprocessors = dict(flagutil.flagprocessors)
370 self._flagprocessors = dict(flagutil.flagprocessors)
370
371
371 # 3-tuple of file handles being used for active writing.
372 # 3-tuple of file handles being used for active writing.
372 self._writinghandles = None
373 self._writinghandles = None
373 # prevent nesting of addgroup
374 # prevent nesting of addgroup
374 self._adding_group = None
375 self._adding_group = None
375
376
376 self._loadindex()
377 self._loadindex()
377
378
378 self._concurrencychecker = concurrencychecker
379 self._concurrencychecker = concurrencychecker
379
380
380 # parent order is supposed to be semantically irrelevant, so we
381 # parent order is supposed to be semantically irrelevant, so we
381 # normally resort parents to ensure that the first parent is non-null,
382 # normally resort parents to ensure that the first parent is non-null,
382 # if there is a non-null parent at all.
383 # if there is a non-null parent at all.
383 # filelog abuses the parent order as flag to mark some instances of
384 # filelog abuses the parent order as flag to mark some instances of
384 # meta-encoded files, so allow it to disable this behavior.
385 # meta-encoded files, so allow it to disable this behavior.
385 self.canonical_parent_order = canonical_parent_order
386 self.canonical_parent_order = canonical_parent_order
386
387
387 def _init_opts(self):
388 def _init_opts(self):
388 """process options (from above/config) to setup associated default revlog mode
389 """process options (from above/config) to setup associated default revlog mode
389
390
390 These values might be affected when actually reading on disk information.
391 These values might be affected when actually reading on disk information.
391
392
392 The relevant values are returned for use in _loadindex().
393 The relevant values are returned for use in _loadindex().
393
394
394 * newversionflags:
395 * newversionflags:
395 version header to use if we need to create a new revlog
396 version header to use if we need to create a new revlog
396
397
397 * mmapindexthreshold:
398 * mmapindexthreshold:
398 minimal index size for start to use mmap
399 minimal index size for start to use mmap
399
400
400 * force_nodemap:
401 * force_nodemap:
401 force the usage of a "development" version of the nodemap code
402 force the usage of a "development" version of the nodemap code
402 """
403 """
403 mmapindexthreshold = None
404 mmapindexthreshold = None
404 opts = self.opener.options
405 opts = self.opener.options
405
406
406 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
407 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
407 new_header = CHANGELOGV2
408 new_header = CHANGELOGV2
408 elif b'revlogv2' in opts:
409 elif b'revlogv2' in opts:
409 new_header = REVLOGV2
410 new_header = REVLOGV2
410 elif b'revlogv1' in opts:
411 elif b'revlogv1' in opts:
411 new_header = REVLOGV1 | FLAG_INLINE_DATA
412 new_header = REVLOGV1 | FLAG_INLINE_DATA
412 if b'generaldelta' in opts:
413 if b'generaldelta' in opts:
413 new_header |= FLAG_GENERALDELTA
414 new_header |= FLAG_GENERALDELTA
414 elif b'revlogv0' in self.opener.options:
415 elif b'revlogv0' in self.opener.options:
415 new_header = REVLOGV0
416 new_header = REVLOGV0
416 else:
417 else:
417 new_header = REVLOG_DEFAULT_VERSION
418 new_header = REVLOG_DEFAULT_VERSION
418
419
419 if b'chunkcachesize' in opts:
420 if b'chunkcachesize' in opts:
420 self._chunkcachesize = opts[b'chunkcachesize']
421 self._chunkcachesize = opts[b'chunkcachesize']
421 if b'maxchainlen' in opts:
422 if b'maxchainlen' in opts:
422 self._maxchainlen = opts[b'maxchainlen']
423 self._maxchainlen = opts[b'maxchainlen']
423 if b'deltabothparents' in opts:
424 if b'deltabothparents' in opts:
424 self._deltabothparents = opts[b'deltabothparents']
425 self._deltabothparents = opts[b'deltabothparents']
426 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
427 if dps_cgds:
428 self._candidate_group_chunk_size = dps_cgds
425 self._lazydelta = bool(opts.get(b'lazydelta', True))
429 self._lazydelta = bool(opts.get(b'lazydelta', True))
426 self._lazydeltabase = False
430 self._lazydeltabase = False
427 if self._lazydelta:
431 if self._lazydelta:
428 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
432 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
429 if b'debug-delta' in opts:
433 if b'debug-delta' in opts:
430 self._debug_delta = opts[b'debug-delta']
434 self._debug_delta = opts[b'debug-delta']
431 if b'compengine' in opts:
435 if b'compengine' in opts:
432 self._compengine = opts[b'compengine']
436 self._compengine = opts[b'compengine']
433 if b'zlib.level' in opts:
437 if b'zlib.level' in opts:
434 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
438 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
435 if b'zstd.level' in opts:
439 if b'zstd.level' in opts:
436 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
440 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
437 if b'maxdeltachainspan' in opts:
441 if b'maxdeltachainspan' in opts:
438 self._maxdeltachainspan = opts[b'maxdeltachainspan']
442 self._maxdeltachainspan = opts[b'maxdeltachainspan']
439 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
443 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
440 mmapindexthreshold = opts[b'mmapindexthreshold']
444 mmapindexthreshold = opts[b'mmapindexthreshold']
441 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
445 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
442 withsparseread = bool(opts.get(b'with-sparse-read', False))
446 withsparseread = bool(opts.get(b'with-sparse-read', False))
443 # sparse-revlog forces sparse-read
447 # sparse-revlog forces sparse-read
444 self._withsparseread = self._sparserevlog or withsparseread
448 self._withsparseread = self._sparserevlog or withsparseread
445 if b'sparse-read-density-threshold' in opts:
449 if b'sparse-read-density-threshold' in opts:
446 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
450 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
447 if b'sparse-read-min-gap-size' in opts:
451 if b'sparse-read-min-gap-size' in opts:
448 self._srmingapsize = opts[b'sparse-read-min-gap-size']
452 self._srmingapsize = opts[b'sparse-read-min-gap-size']
449 if opts.get(b'enableellipsis'):
453 if opts.get(b'enableellipsis'):
450 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
454 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
451
455
452 # revlog v0 doesn't have flag processors
456 # revlog v0 doesn't have flag processors
453 for flag, processor in opts.get(b'flagprocessors', {}).items():
457 for flag, processor in opts.get(b'flagprocessors', {}).items():
454 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
458 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
455
459
456 if self._chunkcachesize <= 0:
460 if self._chunkcachesize <= 0:
457 raise error.RevlogError(
461 raise error.RevlogError(
458 _(b'revlog chunk cache size %r is not greater than 0')
462 _(b'revlog chunk cache size %r is not greater than 0')
459 % self._chunkcachesize
463 % self._chunkcachesize
460 )
464 )
461 elif self._chunkcachesize & (self._chunkcachesize - 1):
465 elif self._chunkcachesize & (self._chunkcachesize - 1):
462 raise error.RevlogError(
466 raise error.RevlogError(
463 _(b'revlog chunk cache size %r is not a power of 2')
467 _(b'revlog chunk cache size %r is not a power of 2')
464 % self._chunkcachesize
468 % self._chunkcachesize
465 )
469 )
466 force_nodemap = opts.get(b'devel-force-nodemap', False)
470 force_nodemap = opts.get(b'devel-force-nodemap', False)
467 return new_header, mmapindexthreshold, force_nodemap
471 return new_header, mmapindexthreshold, force_nodemap
468
472
469 def _get_data(self, filepath, mmap_threshold, size=None):
473 def _get_data(self, filepath, mmap_threshold, size=None):
470 """return a file content with or without mmap
474 """return a file content with or without mmap
471
475
472 If the file is missing return the empty string"""
476 If the file is missing return the empty string"""
473 try:
477 try:
474 with self.opener(filepath) as fp:
478 with self.opener(filepath) as fp:
475 if mmap_threshold is not None:
479 if mmap_threshold is not None:
476 file_size = self.opener.fstat(fp).st_size
480 file_size = self.opener.fstat(fp).st_size
477 if file_size >= mmap_threshold:
481 if file_size >= mmap_threshold:
478 if size is not None:
482 if size is not None:
479 # avoid potentiel mmap crash
483 # avoid potentiel mmap crash
480 size = min(file_size, size)
484 size = min(file_size, size)
481 # TODO: should .close() to release resources without
485 # TODO: should .close() to release resources without
482 # relying on Python GC
486 # relying on Python GC
483 if size is None:
487 if size is None:
484 return util.buffer(util.mmapread(fp))
488 return util.buffer(util.mmapread(fp))
485 else:
489 else:
486 return util.buffer(util.mmapread(fp, size))
490 return util.buffer(util.mmapread(fp, size))
487 if size is None:
491 if size is None:
488 return fp.read()
492 return fp.read()
489 else:
493 else:
490 return fp.read(size)
494 return fp.read(size)
491 except FileNotFoundError:
495 except FileNotFoundError:
492 return b''
496 return b''
493
497
494 def _loadindex(self, docket=None):
498 def _loadindex(self, docket=None):
495
499
496 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
500 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
497
501
498 if self.postfix is not None:
502 if self.postfix is not None:
499 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
503 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
500 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
504 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
501 entry_point = b'%s.i.a' % self.radix
505 entry_point = b'%s.i.a' % self.radix
502 else:
506 else:
503 entry_point = b'%s.i' % self.radix
507 entry_point = b'%s.i' % self.radix
504
508
505 if docket is not None:
509 if docket is not None:
506 self._docket = docket
510 self._docket = docket
507 self._docket_file = entry_point
511 self._docket_file = entry_point
508 else:
512 else:
509 self._initempty = True
513 self._initempty = True
510 entry_data = self._get_data(entry_point, mmapindexthreshold)
514 entry_data = self._get_data(entry_point, mmapindexthreshold)
511 if len(entry_data) > 0:
515 if len(entry_data) > 0:
512 header = INDEX_HEADER.unpack(entry_data[:4])[0]
516 header = INDEX_HEADER.unpack(entry_data[:4])[0]
513 self._initempty = False
517 self._initempty = False
514 else:
518 else:
515 header = new_header
519 header = new_header
516
520
517 self._format_flags = header & ~0xFFFF
521 self._format_flags = header & ~0xFFFF
518 self._format_version = header & 0xFFFF
522 self._format_version = header & 0xFFFF
519
523
520 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
524 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
521 if supported_flags is None:
525 if supported_flags is None:
522 msg = _(b'unknown version (%d) in revlog %s')
526 msg = _(b'unknown version (%d) in revlog %s')
523 msg %= (self._format_version, self.display_id)
527 msg %= (self._format_version, self.display_id)
524 raise error.RevlogError(msg)
528 raise error.RevlogError(msg)
525 elif self._format_flags & ~supported_flags:
529 elif self._format_flags & ~supported_flags:
526 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
530 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
527 display_flag = self._format_flags >> 16
531 display_flag = self._format_flags >> 16
528 msg %= (display_flag, self._format_version, self.display_id)
532 msg %= (display_flag, self._format_version, self.display_id)
529 raise error.RevlogError(msg)
533 raise error.RevlogError(msg)
530
534
531 features = FEATURES_BY_VERSION[self._format_version]
535 features = FEATURES_BY_VERSION[self._format_version]
532 self._inline = features[b'inline'](self._format_flags)
536 self._inline = features[b'inline'](self._format_flags)
533 self._generaldelta = features[b'generaldelta'](self._format_flags)
537 self._generaldelta = features[b'generaldelta'](self._format_flags)
534 self.hassidedata = features[b'sidedata']
538 self.hassidedata = features[b'sidedata']
535
539
536 if not features[b'docket']:
540 if not features[b'docket']:
537 self._indexfile = entry_point
541 self._indexfile = entry_point
538 index_data = entry_data
542 index_data = entry_data
539 else:
543 else:
540 self._docket_file = entry_point
544 self._docket_file = entry_point
541 if self._initempty:
545 if self._initempty:
542 self._docket = docketutil.default_docket(self, header)
546 self._docket = docketutil.default_docket(self, header)
543 else:
547 else:
544 self._docket = docketutil.parse_docket(
548 self._docket = docketutil.parse_docket(
545 self, entry_data, use_pending=self._trypending
549 self, entry_data, use_pending=self._trypending
546 )
550 )
547
551
548 if self._docket is not None:
552 if self._docket is not None:
549 self._indexfile = self._docket.index_filepath()
553 self._indexfile = self._docket.index_filepath()
550 index_data = b''
554 index_data = b''
551 index_size = self._docket.index_end
555 index_size = self._docket.index_end
552 if index_size > 0:
556 if index_size > 0:
553 index_data = self._get_data(
557 index_data = self._get_data(
554 self._indexfile, mmapindexthreshold, size=index_size
558 self._indexfile, mmapindexthreshold, size=index_size
555 )
559 )
556 if len(index_data) < index_size:
560 if len(index_data) < index_size:
557 msg = _(b'too few index data for %s: got %d, expected %d')
561 msg = _(b'too few index data for %s: got %d, expected %d')
558 msg %= (self.display_id, len(index_data), index_size)
562 msg %= (self.display_id, len(index_data), index_size)
559 raise error.RevlogError(msg)
563 raise error.RevlogError(msg)
560
564
561 self._inline = False
565 self._inline = False
562 # generaldelta implied by version 2 revlogs.
566 # generaldelta implied by version 2 revlogs.
563 self._generaldelta = True
567 self._generaldelta = True
564 # the logic for persistent nodemap will be dealt with within the
568 # the logic for persistent nodemap will be dealt with within the
565 # main docket, so disable it for now.
569 # main docket, so disable it for now.
566 self._nodemap_file = None
570 self._nodemap_file = None
567
571
568 if self._docket is not None:
572 if self._docket is not None:
569 self._datafile = self._docket.data_filepath()
573 self._datafile = self._docket.data_filepath()
570 self._sidedatafile = self._docket.sidedata_filepath()
574 self._sidedatafile = self._docket.sidedata_filepath()
571 elif self.postfix is None:
575 elif self.postfix is None:
572 self._datafile = b'%s.d' % self.radix
576 self._datafile = b'%s.d' % self.radix
573 else:
577 else:
574 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
578 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
575
579
576 self.nodeconstants = sha1nodeconstants
580 self.nodeconstants = sha1nodeconstants
577 self.nullid = self.nodeconstants.nullid
581 self.nullid = self.nodeconstants.nullid
578
582
579 # sparse-revlog can't be on without general-delta (issue6056)
583 # sparse-revlog can't be on without general-delta (issue6056)
580 if not self._generaldelta:
584 if not self._generaldelta:
581 self._sparserevlog = False
585 self._sparserevlog = False
582
586
583 self._storedeltachains = True
587 self._storedeltachains = True
584
588
585 devel_nodemap = (
589 devel_nodemap = (
586 self._nodemap_file
590 self._nodemap_file
587 and force_nodemap
591 and force_nodemap
588 and parse_index_v1_nodemap is not None
592 and parse_index_v1_nodemap is not None
589 )
593 )
590
594
591 use_rust_index = False
595 use_rust_index = False
592 if rustrevlog is not None:
596 if rustrevlog is not None:
593 if self._nodemap_file is not None:
597 if self._nodemap_file is not None:
594 use_rust_index = True
598 use_rust_index = True
595 else:
599 else:
596 use_rust_index = self.opener.options.get(b'rust.index')
600 use_rust_index = self.opener.options.get(b'rust.index')
597
601
598 self._parse_index = parse_index_v1
602 self._parse_index = parse_index_v1
599 if self._format_version == REVLOGV0:
603 if self._format_version == REVLOGV0:
600 self._parse_index = revlogv0.parse_index_v0
604 self._parse_index = revlogv0.parse_index_v0
601 elif self._format_version == REVLOGV2:
605 elif self._format_version == REVLOGV2:
602 self._parse_index = parse_index_v2
606 self._parse_index = parse_index_v2
603 elif self._format_version == CHANGELOGV2:
607 elif self._format_version == CHANGELOGV2:
604 self._parse_index = parse_index_cl_v2
608 self._parse_index = parse_index_cl_v2
605 elif devel_nodemap:
609 elif devel_nodemap:
606 self._parse_index = parse_index_v1_nodemap
610 self._parse_index = parse_index_v1_nodemap
607 elif use_rust_index:
611 elif use_rust_index:
608 self._parse_index = parse_index_v1_mixed
612 self._parse_index = parse_index_v1_mixed
609 try:
613 try:
610 d = self._parse_index(index_data, self._inline)
614 d = self._parse_index(index_data, self._inline)
611 index, chunkcache = d
615 index, chunkcache = d
612 use_nodemap = (
616 use_nodemap = (
613 not self._inline
617 not self._inline
614 and self._nodemap_file is not None
618 and self._nodemap_file is not None
615 and util.safehasattr(index, 'update_nodemap_data')
619 and util.safehasattr(index, 'update_nodemap_data')
616 )
620 )
617 if use_nodemap:
621 if use_nodemap:
618 nodemap_data = nodemaputil.persisted_data(self)
622 nodemap_data = nodemaputil.persisted_data(self)
619 if nodemap_data is not None:
623 if nodemap_data is not None:
620 docket = nodemap_data[0]
624 docket = nodemap_data[0]
621 if (
625 if (
622 len(d[0]) > docket.tip_rev
626 len(d[0]) > docket.tip_rev
623 and d[0][docket.tip_rev][7] == docket.tip_node
627 and d[0][docket.tip_rev][7] == docket.tip_node
624 ):
628 ):
625 # no changelog tampering
629 # no changelog tampering
626 self._nodemap_docket = docket
630 self._nodemap_docket = docket
627 index.update_nodemap_data(*nodemap_data)
631 index.update_nodemap_data(*nodemap_data)
628 except (ValueError, IndexError):
632 except (ValueError, IndexError):
629 raise error.RevlogError(
633 raise error.RevlogError(
630 _(b"index %s is corrupted") % self.display_id
634 _(b"index %s is corrupted") % self.display_id
631 )
635 )
632 self.index = index
636 self.index = index
633 self._segmentfile = randomaccessfile.randomaccessfile(
637 self._segmentfile = randomaccessfile.randomaccessfile(
634 self.opener,
638 self.opener,
635 (self._indexfile if self._inline else self._datafile),
639 (self._indexfile if self._inline else self._datafile),
636 self._chunkcachesize,
640 self._chunkcachesize,
637 chunkcache,
641 chunkcache,
638 )
642 )
639 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
643 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
640 self.opener,
644 self.opener,
641 self._sidedatafile,
645 self._sidedatafile,
642 self._chunkcachesize,
646 self._chunkcachesize,
643 )
647 )
644 # revnum -> (chain-length, sum-delta-length)
648 # revnum -> (chain-length, sum-delta-length)
645 self._chaininfocache = util.lrucachedict(500)
649 self._chaininfocache = util.lrucachedict(500)
646 # revlog header -> revlog compressor
650 # revlog header -> revlog compressor
647 self._decompressors = {}
651 self._decompressors = {}
648
652
649 @util.propertycache
653 @util.propertycache
650 def revlog_kind(self):
654 def revlog_kind(self):
651 return self.target[0]
655 return self.target[0]
652
656
653 @util.propertycache
657 @util.propertycache
654 def display_id(self):
658 def display_id(self):
655 """The public facing "ID" of the revlog that we use in message"""
659 """The public facing "ID" of the revlog that we use in message"""
656 if self.revlog_kind == KIND_FILELOG:
660 if self.revlog_kind == KIND_FILELOG:
657 # Reference the file without the "data/" prefix, so it is familiar
661 # Reference the file without the "data/" prefix, so it is familiar
658 # to the user.
662 # to the user.
659 return self.target[1]
663 return self.target[1]
660 else:
664 else:
661 return self.radix
665 return self.radix
662
666
663 def _get_decompressor(self, t):
667 def _get_decompressor(self, t):
664 try:
668 try:
665 compressor = self._decompressors[t]
669 compressor = self._decompressors[t]
666 except KeyError:
670 except KeyError:
667 try:
671 try:
668 engine = util.compengines.forrevlogheader(t)
672 engine = util.compengines.forrevlogheader(t)
669 compressor = engine.revlogcompressor(self._compengineopts)
673 compressor = engine.revlogcompressor(self._compengineopts)
670 self._decompressors[t] = compressor
674 self._decompressors[t] = compressor
671 except KeyError:
675 except KeyError:
672 raise error.RevlogError(
676 raise error.RevlogError(
673 _(b'unknown compression type %s') % binascii.hexlify(t)
677 _(b'unknown compression type %s') % binascii.hexlify(t)
674 )
678 )
675 return compressor
679 return compressor
676
680
677 @util.propertycache
681 @util.propertycache
678 def _compressor(self):
682 def _compressor(self):
679 engine = util.compengines[self._compengine]
683 engine = util.compengines[self._compengine]
680 return engine.revlogcompressor(self._compengineopts)
684 return engine.revlogcompressor(self._compengineopts)
681
685
682 @util.propertycache
686 @util.propertycache
683 def _decompressor(self):
687 def _decompressor(self):
684 """the default decompressor"""
688 """the default decompressor"""
685 if self._docket is None:
689 if self._docket is None:
686 return None
690 return None
687 t = self._docket.default_compression_header
691 t = self._docket.default_compression_header
688 c = self._get_decompressor(t)
692 c = self._get_decompressor(t)
689 return c.decompress
693 return c.decompress
690
694
691 def _indexfp(self):
695 def _indexfp(self):
692 """file object for the revlog's index file"""
696 """file object for the revlog's index file"""
693 return self.opener(self._indexfile, mode=b"r")
697 return self.opener(self._indexfile, mode=b"r")
694
698
695 def __index_write_fp(self):
699 def __index_write_fp(self):
696 # You should not use this directly and use `_writing` instead
700 # You should not use this directly and use `_writing` instead
697 try:
701 try:
698 f = self.opener(
702 f = self.opener(
699 self._indexfile, mode=b"r+", checkambig=self._checkambig
703 self._indexfile, mode=b"r+", checkambig=self._checkambig
700 )
704 )
701 if self._docket is None:
705 if self._docket is None:
702 f.seek(0, os.SEEK_END)
706 f.seek(0, os.SEEK_END)
703 else:
707 else:
704 f.seek(self._docket.index_end, os.SEEK_SET)
708 f.seek(self._docket.index_end, os.SEEK_SET)
705 return f
709 return f
706 except FileNotFoundError:
710 except FileNotFoundError:
707 return self.opener(
711 return self.opener(
708 self._indexfile, mode=b"w+", checkambig=self._checkambig
712 self._indexfile, mode=b"w+", checkambig=self._checkambig
709 )
713 )
710
714
711 def __index_new_fp(self):
715 def __index_new_fp(self):
712 # You should not use this unless you are upgrading from inline revlog
716 # You should not use this unless you are upgrading from inline revlog
713 return self.opener(
717 return self.opener(
714 self._indexfile,
718 self._indexfile,
715 mode=b"w",
719 mode=b"w",
716 checkambig=self._checkambig,
720 checkambig=self._checkambig,
717 atomictemp=True,
721 atomictemp=True,
718 )
722 )
719
723
720 def _datafp(self, mode=b'r'):
724 def _datafp(self, mode=b'r'):
721 """file object for the revlog's data file"""
725 """file object for the revlog's data file"""
722 return self.opener(self._datafile, mode=mode)
726 return self.opener(self._datafile, mode=mode)
723
727
724 @contextlib.contextmanager
728 @contextlib.contextmanager
725 def _sidedatareadfp(self):
729 def _sidedatareadfp(self):
726 """file object suitable to read sidedata"""
730 """file object suitable to read sidedata"""
727 if self._writinghandles:
731 if self._writinghandles:
728 yield self._writinghandles[2]
732 yield self._writinghandles[2]
729 else:
733 else:
730 with self.opener(self._sidedatafile) as fp:
734 with self.opener(self._sidedatafile) as fp:
731 yield fp
735 yield fp
732
736
733 def tiprev(self):
737 def tiprev(self):
734 return len(self.index) - 1
738 return len(self.index) - 1
735
739
736 def tip(self):
740 def tip(self):
737 return self.node(self.tiprev())
741 return self.node(self.tiprev())
738
742
739 def __contains__(self, rev):
743 def __contains__(self, rev):
740 return 0 <= rev < len(self)
744 return 0 <= rev < len(self)
741
745
742 def __len__(self):
746 def __len__(self):
743 return len(self.index)
747 return len(self.index)
744
748
745 def __iter__(self):
749 def __iter__(self):
746 return iter(range(len(self)))
750 return iter(range(len(self)))
747
751
748 def revs(self, start=0, stop=None):
752 def revs(self, start=0, stop=None):
749 """iterate over all rev in this revlog (from start to stop)"""
753 """iterate over all rev in this revlog (from start to stop)"""
750 return storageutil.iterrevs(len(self), start=start, stop=stop)
754 return storageutil.iterrevs(len(self), start=start, stop=stop)
751
755
752 def hasnode(self, node):
756 def hasnode(self, node):
753 try:
757 try:
754 self.rev(node)
758 self.rev(node)
755 return True
759 return True
756 except KeyError:
760 except KeyError:
757 return False
761 return False
758
762
759 def candelta(self, baserev, rev):
763 def candelta(self, baserev, rev):
760 """whether two revisions (baserev, rev) can be delta-ed or not"""
764 """whether two revisions (baserev, rev) can be delta-ed or not"""
761 # Disable delta if either rev requires a content-changing flag
765 # Disable delta if either rev requires a content-changing flag
762 # processor (ex. LFS). This is because such flag processor can alter
766 # processor (ex. LFS). This is because such flag processor can alter
763 # the rawtext content that the delta will be based on, and two clients
767 # the rawtext content that the delta will be based on, and two clients
764 # could have a same revlog node with different flags (i.e. different
768 # could have a same revlog node with different flags (i.e. different
765 # rawtext contents) and the delta could be incompatible.
769 # rawtext contents) and the delta could be incompatible.
766 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
770 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
767 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
771 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
768 ):
772 ):
769 return False
773 return False
770 return True
774 return True
771
775
772 def update_caches(self, transaction):
776 def update_caches(self, transaction):
773 if self._nodemap_file is not None:
777 if self._nodemap_file is not None:
774 if transaction is None:
778 if transaction is None:
775 nodemaputil.update_persistent_nodemap(self)
779 nodemaputil.update_persistent_nodemap(self)
776 else:
780 else:
777 nodemaputil.setup_persistent_nodemap(transaction, self)
781 nodemaputil.setup_persistent_nodemap(transaction, self)
778
782
779 def clearcaches(self):
783 def clearcaches(self):
780 self._revisioncache = None
784 self._revisioncache = None
781 self._chainbasecache.clear()
785 self._chainbasecache.clear()
782 self._segmentfile.clear_cache()
786 self._segmentfile.clear_cache()
783 self._segmentfile_sidedata.clear_cache()
787 self._segmentfile_sidedata.clear_cache()
784 self._pcache = {}
788 self._pcache = {}
785 self._nodemap_docket = None
789 self._nodemap_docket = None
786 self.index.clearcaches()
790 self.index.clearcaches()
787 # The python code is the one responsible for validating the docket, we
791 # The python code is the one responsible for validating the docket, we
788 # end up having to refresh it here.
792 # end up having to refresh it here.
789 use_nodemap = (
793 use_nodemap = (
790 not self._inline
794 not self._inline
791 and self._nodemap_file is not None
795 and self._nodemap_file is not None
792 and util.safehasattr(self.index, 'update_nodemap_data')
796 and util.safehasattr(self.index, 'update_nodemap_data')
793 )
797 )
794 if use_nodemap:
798 if use_nodemap:
795 nodemap_data = nodemaputil.persisted_data(self)
799 nodemap_data = nodemaputil.persisted_data(self)
796 if nodemap_data is not None:
800 if nodemap_data is not None:
797 self._nodemap_docket = nodemap_data[0]
801 self._nodemap_docket = nodemap_data[0]
798 self.index.update_nodemap_data(*nodemap_data)
802 self.index.update_nodemap_data(*nodemap_data)
799
803
800 def rev(self, node):
804 def rev(self, node):
801 try:
805 try:
802 return self.index.rev(node)
806 return self.index.rev(node)
803 except TypeError:
807 except TypeError:
804 raise
808 raise
805 except error.RevlogError:
809 except error.RevlogError:
806 # parsers.c radix tree lookup failed
810 # parsers.c radix tree lookup failed
807 if (
811 if (
808 node == self.nodeconstants.wdirid
812 node == self.nodeconstants.wdirid
809 or node in self.nodeconstants.wdirfilenodeids
813 or node in self.nodeconstants.wdirfilenodeids
810 ):
814 ):
811 raise error.WdirUnsupported
815 raise error.WdirUnsupported
812 raise error.LookupError(node, self.display_id, _(b'no node'))
816 raise error.LookupError(node, self.display_id, _(b'no node'))
813
817
814 # Accessors for index entries.
818 # Accessors for index entries.
815
819
816 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
820 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
817 # are flags.
821 # are flags.
818 def start(self, rev):
822 def start(self, rev):
819 return int(self.index[rev][0] >> 16)
823 return int(self.index[rev][0] >> 16)
820
824
821 def sidedata_cut_off(self, rev):
825 def sidedata_cut_off(self, rev):
822 sd_cut_off = self.index[rev][8]
826 sd_cut_off = self.index[rev][8]
823 if sd_cut_off != 0:
827 if sd_cut_off != 0:
824 return sd_cut_off
828 return sd_cut_off
825 # This is some annoying dance, because entries without sidedata
829 # This is some annoying dance, because entries without sidedata
826 # currently use 0 as their ofsset. (instead of previous-offset +
830 # currently use 0 as their ofsset. (instead of previous-offset +
827 # previous-size)
831 # previous-size)
828 #
832 #
829 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
833 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
830 # In the meantime, we need this.
834 # In the meantime, we need this.
831 while 0 <= rev:
835 while 0 <= rev:
832 e = self.index[rev]
836 e = self.index[rev]
833 if e[9] != 0:
837 if e[9] != 0:
834 return e[8] + e[9]
838 return e[8] + e[9]
835 rev -= 1
839 rev -= 1
836 return 0
840 return 0
837
841
838 def flags(self, rev):
842 def flags(self, rev):
839 return self.index[rev][0] & 0xFFFF
843 return self.index[rev][0] & 0xFFFF
840
844
841 def length(self, rev):
845 def length(self, rev):
842 return self.index[rev][1]
846 return self.index[rev][1]
843
847
844 def sidedata_length(self, rev):
848 def sidedata_length(self, rev):
845 if not self.hassidedata:
849 if not self.hassidedata:
846 return 0
850 return 0
847 return self.index[rev][9]
851 return self.index[rev][9]
848
852
849 def rawsize(self, rev):
853 def rawsize(self, rev):
850 """return the length of the uncompressed text for a given revision"""
854 """return the length of the uncompressed text for a given revision"""
851 l = self.index[rev][2]
855 l = self.index[rev][2]
852 if l >= 0:
856 if l >= 0:
853 return l
857 return l
854
858
855 t = self.rawdata(rev)
859 t = self.rawdata(rev)
856 return len(t)
860 return len(t)
857
861
858 def size(self, rev):
862 def size(self, rev):
859 """length of non-raw text (processed by a "read" flag processor)"""
863 """length of non-raw text (processed by a "read" flag processor)"""
860 # fast path: if no "read" flag processor could change the content,
864 # fast path: if no "read" flag processor could change the content,
861 # size is rawsize. note: ELLIPSIS is known to not change the content.
865 # size is rawsize. note: ELLIPSIS is known to not change the content.
862 flags = self.flags(rev)
866 flags = self.flags(rev)
863 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
867 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
864 return self.rawsize(rev)
868 return self.rawsize(rev)
865
869
866 return len(self.revision(rev))
870 return len(self.revision(rev))
867
871
868 def fast_rank(self, rev):
872 def fast_rank(self, rev):
869 """Return the rank of a revision if already known, or None otherwise.
873 """Return the rank of a revision if already known, or None otherwise.
870
874
871 The rank of a revision is the size of the sub-graph it defines as a
875 The rank of a revision is the size of the sub-graph it defines as a
872 head. Equivalently, the rank of a revision `r` is the size of the set
876 head. Equivalently, the rank of a revision `r` is the size of the set
873 `ancestors(r)`, `r` included.
877 `ancestors(r)`, `r` included.
874
878
875 This method returns the rank retrieved from the revlog in constant
879 This method returns the rank retrieved from the revlog in constant
876 time. It makes no attempt at computing unknown values for versions of
880 time. It makes no attempt at computing unknown values for versions of
877 the revlog which do not persist the rank.
881 the revlog which do not persist the rank.
878 """
882 """
879 rank = self.index[rev][ENTRY_RANK]
883 rank = self.index[rev][ENTRY_RANK]
880 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
884 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
881 return None
885 return None
882 if rev == nullrev:
886 if rev == nullrev:
883 return 0 # convention
887 return 0 # convention
884 return rank
888 return rank
885
889
886 def chainbase(self, rev):
890 def chainbase(self, rev):
887 base = self._chainbasecache.get(rev)
891 base = self._chainbasecache.get(rev)
888 if base is not None:
892 if base is not None:
889 return base
893 return base
890
894
891 index = self.index
895 index = self.index
892 iterrev = rev
896 iterrev = rev
893 base = index[iterrev][3]
897 base = index[iterrev][3]
894 while base != iterrev:
898 while base != iterrev:
895 iterrev = base
899 iterrev = base
896 base = index[iterrev][3]
900 base = index[iterrev][3]
897
901
898 self._chainbasecache[rev] = base
902 self._chainbasecache[rev] = base
899 return base
903 return base
900
904
901 def linkrev(self, rev):
905 def linkrev(self, rev):
902 return self.index[rev][4]
906 return self.index[rev][4]
903
907
904 def parentrevs(self, rev):
908 def parentrevs(self, rev):
905 try:
909 try:
906 entry = self.index[rev]
910 entry = self.index[rev]
907 except IndexError:
911 except IndexError:
908 if rev == wdirrev:
912 if rev == wdirrev:
909 raise error.WdirUnsupported
913 raise error.WdirUnsupported
910 raise
914 raise
911
915
912 if self.canonical_parent_order and entry[5] == nullrev:
916 if self.canonical_parent_order and entry[5] == nullrev:
913 return entry[6], entry[5]
917 return entry[6], entry[5]
914 else:
918 else:
915 return entry[5], entry[6]
919 return entry[5], entry[6]
916
920
917 # fast parentrevs(rev) where rev isn't filtered
921 # fast parentrevs(rev) where rev isn't filtered
918 _uncheckedparentrevs = parentrevs
922 _uncheckedparentrevs = parentrevs
919
923
920 def node(self, rev):
924 def node(self, rev):
921 try:
925 try:
922 return self.index[rev][7]
926 return self.index[rev][7]
923 except IndexError:
927 except IndexError:
924 if rev == wdirrev:
928 if rev == wdirrev:
925 raise error.WdirUnsupported
929 raise error.WdirUnsupported
926 raise
930 raise
927
931
928 # Derived from index values.
932 # Derived from index values.
929
933
930 def end(self, rev):
934 def end(self, rev):
931 return self.start(rev) + self.length(rev)
935 return self.start(rev) + self.length(rev)
932
936
933 def parents(self, node):
937 def parents(self, node):
934 i = self.index
938 i = self.index
935 d = i[self.rev(node)]
939 d = i[self.rev(node)]
936 # inline node() to avoid function call overhead
940 # inline node() to avoid function call overhead
937 if self.canonical_parent_order and d[5] == self.nullid:
941 if self.canonical_parent_order and d[5] == self.nullid:
938 return i[d[6]][7], i[d[5]][7]
942 return i[d[6]][7], i[d[5]][7]
939 else:
943 else:
940 return i[d[5]][7], i[d[6]][7]
944 return i[d[5]][7], i[d[6]][7]
941
945
942 def chainlen(self, rev):
946 def chainlen(self, rev):
943 return self._chaininfo(rev)[0]
947 return self._chaininfo(rev)[0]
944
948
945 def _chaininfo(self, rev):
949 def _chaininfo(self, rev):
946 chaininfocache = self._chaininfocache
950 chaininfocache = self._chaininfocache
947 if rev in chaininfocache:
951 if rev in chaininfocache:
948 return chaininfocache[rev]
952 return chaininfocache[rev]
949 index = self.index
953 index = self.index
950 generaldelta = self._generaldelta
954 generaldelta = self._generaldelta
951 iterrev = rev
955 iterrev = rev
952 e = index[iterrev]
956 e = index[iterrev]
953 clen = 0
957 clen = 0
954 compresseddeltalen = 0
958 compresseddeltalen = 0
955 while iterrev != e[3]:
959 while iterrev != e[3]:
956 clen += 1
960 clen += 1
957 compresseddeltalen += e[1]
961 compresseddeltalen += e[1]
958 if generaldelta:
962 if generaldelta:
959 iterrev = e[3]
963 iterrev = e[3]
960 else:
964 else:
961 iterrev -= 1
965 iterrev -= 1
962 if iterrev in chaininfocache:
966 if iterrev in chaininfocache:
963 t = chaininfocache[iterrev]
967 t = chaininfocache[iterrev]
964 clen += t[0]
968 clen += t[0]
965 compresseddeltalen += t[1]
969 compresseddeltalen += t[1]
966 break
970 break
967 e = index[iterrev]
971 e = index[iterrev]
968 else:
972 else:
969 # Add text length of base since decompressing that also takes
973 # Add text length of base since decompressing that also takes
970 # work. For cache hits the length is already included.
974 # work. For cache hits the length is already included.
971 compresseddeltalen += e[1]
975 compresseddeltalen += e[1]
972 r = (clen, compresseddeltalen)
976 r = (clen, compresseddeltalen)
973 chaininfocache[rev] = r
977 chaininfocache[rev] = r
974 return r
978 return r
975
979
976 def _deltachain(self, rev, stoprev=None):
980 def _deltachain(self, rev, stoprev=None):
977 """Obtain the delta chain for a revision.
981 """Obtain the delta chain for a revision.
978
982
979 ``stoprev`` specifies a revision to stop at. If not specified, we
983 ``stoprev`` specifies a revision to stop at. If not specified, we
980 stop at the base of the chain.
984 stop at the base of the chain.
981
985
982 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
986 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
983 revs in ascending order and ``stopped`` is a bool indicating whether
987 revs in ascending order and ``stopped`` is a bool indicating whether
984 ``stoprev`` was hit.
988 ``stoprev`` was hit.
985 """
989 """
986 # Try C implementation.
990 # Try C implementation.
987 try:
991 try:
988 return self.index.deltachain(rev, stoprev, self._generaldelta)
992 return self.index.deltachain(rev, stoprev, self._generaldelta)
989 except AttributeError:
993 except AttributeError:
990 pass
994 pass
991
995
992 chain = []
996 chain = []
993
997
994 # Alias to prevent attribute lookup in tight loop.
998 # Alias to prevent attribute lookup in tight loop.
995 index = self.index
999 index = self.index
996 generaldelta = self._generaldelta
1000 generaldelta = self._generaldelta
997
1001
998 iterrev = rev
1002 iterrev = rev
999 e = index[iterrev]
1003 e = index[iterrev]
1000 while iterrev != e[3] and iterrev != stoprev:
1004 while iterrev != e[3] and iterrev != stoprev:
1001 chain.append(iterrev)
1005 chain.append(iterrev)
1002 if generaldelta:
1006 if generaldelta:
1003 iterrev = e[3]
1007 iterrev = e[3]
1004 else:
1008 else:
1005 iterrev -= 1
1009 iterrev -= 1
1006 e = index[iterrev]
1010 e = index[iterrev]
1007
1011
1008 if iterrev == stoprev:
1012 if iterrev == stoprev:
1009 stopped = True
1013 stopped = True
1010 else:
1014 else:
1011 chain.append(iterrev)
1015 chain.append(iterrev)
1012 stopped = False
1016 stopped = False
1013
1017
1014 chain.reverse()
1018 chain.reverse()
1015 return chain, stopped
1019 return chain, stopped
1016
1020
1017 def ancestors(self, revs, stoprev=0, inclusive=False):
1021 def ancestors(self, revs, stoprev=0, inclusive=False):
1018 """Generate the ancestors of 'revs' in reverse revision order.
1022 """Generate the ancestors of 'revs' in reverse revision order.
1019 Does not generate revs lower than stoprev.
1023 Does not generate revs lower than stoprev.
1020
1024
1021 See the documentation for ancestor.lazyancestors for more details."""
1025 See the documentation for ancestor.lazyancestors for more details."""
1022
1026
1023 # first, make sure start revisions aren't filtered
1027 # first, make sure start revisions aren't filtered
1024 revs = list(revs)
1028 revs = list(revs)
1025 checkrev = self.node
1029 checkrev = self.node
1026 for r in revs:
1030 for r in revs:
1027 checkrev(r)
1031 checkrev(r)
1028 # and we're sure ancestors aren't filtered as well
1032 # and we're sure ancestors aren't filtered as well
1029
1033
1030 if rustancestor is not None and self.index.rust_ext_compat:
1034 if rustancestor is not None and self.index.rust_ext_compat:
1031 lazyancestors = rustancestor.LazyAncestors
1035 lazyancestors = rustancestor.LazyAncestors
1032 arg = self.index
1036 arg = self.index
1033 else:
1037 else:
1034 lazyancestors = ancestor.lazyancestors
1038 lazyancestors = ancestor.lazyancestors
1035 arg = self._uncheckedparentrevs
1039 arg = self._uncheckedparentrevs
1036 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1040 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1037
1041
1038 def descendants(self, revs):
1042 def descendants(self, revs):
1039 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1043 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1040
1044
1041 def findcommonmissing(self, common=None, heads=None):
1045 def findcommonmissing(self, common=None, heads=None):
1042 """Return a tuple of the ancestors of common and the ancestors of heads
1046 """Return a tuple of the ancestors of common and the ancestors of heads
1043 that are not ancestors of common. In revset terminology, we return the
1047 that are not ancestors of common. In revset terminology, we return the
1044 tuple:
1048 tuple:
1045
1049
1046 ::common, (::heads) - (::common)
1050 ::common, (::heads) - (::common)
1047
1051
1048 The list is sorted by revision number, meaning it is
1052 The list is sorted by revision number, meaning it is
1049 topologically sorted.
1053 topologically sorted.
1050
1054
1051 'heads' and 'common' are both lists of node IDs. If heads is
1055 'heads' and 'common' are both lists of node IDs. If heads is
1052 not supplied, uses all of the revlog's heads. If common is not
1056 not supplied, uses all of the revlog's heads. If common is not
1053 supplied, uses nullid."""
1057 supplied, uses nullid."""
1054 if common is None:
1058 if common is None:
1055 common = [self.nullid]
1059 common = [self.nullid]
1056 if heads is None:
1060 if heads is None:
1057 heads = self.heads()
1061 heads = self.heads()
1058
1062
1059 common = [self.rev(n) for n in common]
1063 common = [self.rev(n) for n in common]
1060 heads = [self.rev(n) for n in heads]
1064 heads = [self.rev(n) for n in heads]
1061
1065
1062 # we want the ancestors, but inclusive
1066 # we want the ancestors, but inclusive
1063 class lazyset:
1067 class lazyset:
1064 def __init__(self, lazyvalues):
1068 def __init__(self, lazyvalues):
1065 self.addedvalues = set()
1069 self.addedvalues = set()
1066 self.lazyvalues = lazyvalues
1070 self.lazyvalues = lazyvalues
1067
1071
1068 def __contains__(self, value):
1072 def __contains__(self, value):
1069 return value in self.addedvalues or value in self.lazyvalues
1073 return value in self.addedvalues or value in self.lazyvalues
1070
1074
1071 def __iter__(self):
1075 def __iter__(self):
1072 added = self.addedvalues
1076 added = self.addedvalues
1073 for r in added:
1077 for r in added:
1074 yield r
1078 yield r
1075 for r in self.lazyvalues:
1079 for r in self.lazyvalues:
1076 if not r in added:
1080 if not r in added:
1077 yield r
1081 yield r
1078
1082
1079 def add(self, value):
1083 def add(self, value):
1080 self.addedvalues.add(value)
1084 self.addedvalues.add(value)
1081
1085
1082 def update(self, values):
1086 def update(self, values):
1083 self.addedvalues.update(values)
1087 self.addedvalues.update(values)
1084
1088
1085 has = lazyset(self.ancestors(common))
1089 has = lazyset(self.ancestors(common))
1086 has.add(nullrev)
1090 has.add(nullrev)
1087 has.update(common)
1091 has.update(common)
1088
1092
1089 # take all ancestors from heads that aren't in has
1093 # take all ancestors from heads that aren't in has
1090 missing = set()
1094 missing = set()
1091 visit = collections.deque(r for r in heads if r not in has)
1095 visit = collections.deque(r for r in heads if r not in has)
1092 while visit:
1096 while visit:
1093 r = visit.popleft()
1097 r = visit.popleft()
1094 if r in missing:
1098 if r in missing:
1095 continue
1099 continue
1096 else:
1100 else:
1097 missing.add(r)
1101 missing.add(r)
1098 for p in self.parentrevs(r):
1102 for p in self.parentrevs(r):
1099 if p not in has:
1103 if p not in has:
1100 visit.append(p)
1104 visit.append(p)
1101 missing = list(missing)
1105 missing = list(missing)
1102 missing.sort()
1106 missing.sort()
1103 return has, [self.node(miss) for miss in missing]
1107 return has, [self.node(miss) for miss in missing]
1104
1108
1105 def incrementalmissingrevs(self, common=None):
1109 def incrementalmissingrevs(self, common=None):
1106 """Return an object that can be used to incrementally compute the
1110 """Return an object that can be used to incrementally compute the
1107 revision numbers of the ancestors of arbitrary sets that are not
1111 revision numbers of the ancestors of arbitrary sets that are not
1108 ancestors of common. This is an ancestor.incrementalmissingancestors
1112 ancestors of common. This is an ancestor.incrementalmissingancestors
1109 object.
1113 object.
1110
1114
1111 'common' is a list of revision numbers. If common is not supplied, uses
1115 'common' is a list of revision numbers. If common is not supplied, uses
1112 nullrev.
1116 nullrev.
1113 """
1117 """
1114 if common is None:
1118 if common is None:
1115 common = [nullrev]
1119 common = [nullrev]
1116
1120
1117 if rustancestor is not None and self.index.rust_ext_compat:
1121 if rustancestor is not None and self.index.rust_ext_compat:
1118 return rustancestor.MissingAncestors(self.index, common)
1122 return rustancestor.MissingAncestors(self.index, common)
1119 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1123 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1120
1124
1121 def findmissingrevs(self, common=None, heads=None):
1125 def findmissingrevs(self, common=None, heads=None):
1122 """Return the revision numbers of the ancestors of heads that
1126 """Return the revision numbers of the ancestors of heads that
1123 are not ancestors of common.
1127 are not ancestors of common.
1124
1128
1125 More specifically, return a list of revision numbers corresponding to
1129 More specifically, return a list of revision numbers corresponding to
1126 nodes N such that every N satisfies the following constraints:
1130 nodes N such that every N satisfies the following constraints:
1127
1131
1128 1. N is an ancestor of some node in 'heads'
1132 1. N is an ancestor of some node in 'heads'
1129 2. N is not an ancestor of any node in 'common'
1133 2. N is not an ancestor of any node in 'common'
1130
1134
1131 The list is sorted by revision number, meaning it is
1135 The list is sorted by revision number, meaning it is
1132 topologically sorted.
1136 topologically sorted.
1133
1137
1134 'heads' and 'common' are both lists of revision numbers. If heads is
1138 'heads' and 'common' are both lists of revision numbers. If heads is
1135 not supplied, uses all of the revlog's heads. If common is not
1139 not supplied, uses all of the revlog's heads. If common is not
1136 supplied, uses nullid."""
1140 supplied, uses nullid."""
1137 if common is None:
1141 if common is None:
1138 common = [nullrev]
1142 common = [nullrev]
1139 if heads is None:
1143 if heads is None:
1140 heads = self.headrevs()
1144 heads = self.headrevs()
1141
1145
1142 inc = self.incrementalmissingrevs(common=common)
1146 inc = self.incrementalmissingrevs(common=common)
1143 return inc.missingancestors(heads)
1147 return inc.missingancestors(heads)
1144
1148
1145 def findmissing(self, common=None, heads=None):
1149 def findmissing(self, common=None, heads=None):
1146 """Return the ancestors of heads that are not ancestors of common.
1150 """Return the ancestors of heads that are not ancestors of common.
1147
1151
1148 More specifically, return a list of nodes N such that every N
1152 More specifically, return a list of nodes N such that every N
1149 satisfies the following constraints:
1153 satisfies the following constraints:
1150
1154
1151 1. N is an ancestor of some node in 'heads'
1155 1. N is an ancestor of some node in 'heads'
1152 2. N is not an ancestor of any node in 'common'
1156 2. N is not an ancestor of any node in 'common'
1153
1157
1154 The list is sorted by revision number, meaning it is
1158 The list is sorted by revision number, meaning it is
1155 topologically sorted.
1159 topologically sorted.
1156
1160
1157 'heads' and 'common' are both lists of node IDs. If heads is
1161 'heads' and 'common' are both lists of node IDs. If heads is
1158 not supplied, uses all of the revlog's heads. If common is not
1162 not supplied, uses all of the revlog's heads. If common is not
1159 supplied, uses nullid."""
1163 supplied, uses nullid."""
1160 if common is None:
1164 if common is None:
1161 common = [self.nullid]
1165 common = [self.nullid]
1162 if heads is None:
1166 if heads is None:
1163 heads = self.heads()
1167 heads = self.heads()
1164
1168
1165 common = [self.rev(n) for n in common]
1169 common = [self.rev(n) for n in common]
1166 heads = [self.rev(n) for n in heads]
1170 heads = [self.rev(n) for n in heads]
1167
1171
1168 inc = self.incrementalmissingrevs(common=common)
1172 inc = self.incrementalmissingrevs(common=common)
1169 return [self.node(r) for r in inc.missingancestors(heads)]
1173 return [self.node(r) for r in inc.missingancestors(heads)]
1170
1174
1171 def nodesbetween(self, roots=None, heads=None):
1175 def nodesbetween(self, roots=None, heads=None):
1172 """Return a topological path from 'roots' to 'heads'.
1176 """Return a topological path from 'roots' to 'heads'.
1173
1177
1174 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1178 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1175 topologically sorted list of all nodes N that satisfy both of
1179 topologically sorted list of all nodes N that satisfy both of
1176 these constraints:
1180 these constraints:
1177
1181
1178 1. N is a descendant of some node in 'roots'
1182 1. N is a descendant of some node in 'roots'
1179 2. N is an ancestor of some node in 'heads'
1183 2. N is an ancestor of some node in 'heads'
1180
1184
1181 Every node is considered to be both a descendant and an ancestor
1185 Every node is considered to be both a descendant and an ancestor
1182 of itself, so every reachable node in 'roots' and 'heads' will be
1186 of itself, so every reachable node in 'roots' and 'heads' will be
1183 included in 'nodes'.
1187 included in 'nodes'.
1184
1188
1185 'outroots' is the list of reachable nodes in 'roots', i.e., the
1189 'outroots' is the list of reachable nodes in 'roots', i.e., the
1186 subset of 'roots' that is returned in 'nodes'. Likewise,
1190 subset of 'roots' that is returned in 'nodes'. Likewise,
1187 'outheads' is the subset of 'heads' that is also in 'nodes'.
1191 'outheads' is the subset of 'heads' that is also in 'nodes'.
1188
1192
1189 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1193 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1190 unspecified, uses nullid as the only root. If 'heads' is
1194 unspecified, uses nullid as the only root. If 'heads' is
1191 unspecified, uses list of all of the revlog's heads."""
1195 unspecified, uses list of all of the revlog's heads."""
1192 nonodes = ([], [], [])
1196 nonodes = ([], [], [])
1193 if roots is not None:
1197 if roots is not None:
1194 roots = list(roots)
1198 roots = list(roots)
1195 if not roots:
1199 if not roots:
1196 return nonodes
1200 return nonodes
1197 lowestrev = min([self.rev(n) for n in roots])
1201 lowestrev = min([self.rev(n) for n in roots])
1198 else:
1202 else:
1199 roots = [self.nullid] # Everybody's a descendant of nullid
1203 roots = [self.nullid] # Everybody's a descendant of nullid
1200 lowestrev = nullrev
1204 lowestrev = nullrev
1201 if (lowestrev == nullrev) and (heads is None):
1205 if (lowestrev == nullrev) and (heads is None):
1202 # We want _all_ the nodes!
1206 # We want _all_ the nodes!
1203 return (
1207 return (
1204 [self.node(r) for r in self],
1208 [self.node(r) for r in self],
1205 [self.nullid],
1209 [self.nullid],
1206 list(self.heads()),
1210 list(self.heads()),
1207 )
1211 )
1208 if heads is None:
1212 if heads is None:
1209 # All nodes are ancestors, so the latest ancestor is the last
1213 # All nodes are ancestors, so the latest ancestor is the last
1210 # node.
1214 # node.
1211 highestrev = len(self) - 1
1215 highestrev = len(self) - 1
1212 # Set ancestors to None to signal that every node is an ancestor.
1216 # Set ancestors to None to signal that every node is an ancestor.
1213 ancestors = None
1217 ancestors = None
1214 # Set heads to an empty dictionary for later discovery of heads
1218 # Set heads to an empty dictionary for later discovery of heads
1215 heads = {}
1219 heads = {}
1216 else:
1220 else:
1217 heads = list(heads)
1221 heads = list(heads)
1218 if not heads:
1222 if not heads:
1219 return nonodes
1223 return nonodes
1220 ancestors = set()
1224 ancestors = set()
1221 # Turn heads into a dictionary so we can remove 'fake' heads.
1225 # Turn heads into a dictionary so we can remove 'fake' heads.
1222 # Also, later we will be using it to filter out the heads we can't
1226 # Also, later we will be using it to filter out the heads we can't
1223 # find from roots.
1227 # find from roots.
1224 heads = dict.fromkeys(heads, False)
1228 heads = dict.fromkeys(heads, False)
1225 # Start at the top and keep marking parents until we're done.
1229 # Start at the top and keep marking parents until we're done.
1226 nodestotag = set(heads)
1230 nodestotag = set(heads)
1227 # Remember where the top was so we can use it as a limit later.
1231 # Remember where the top was so we can use it as a limit later.
1228 highestrev = max([self.rev(n) for n in nodestotag])
1232 highestrev = max([self.rev(n) for n in nodestotag])
1229 while nodestotag:
1233 while nodestotag:
1230 # grab a node to tag
1234 # grab a node to tag
1231 n = nodestotag.pop()
1235 n = nodestotag.pop()
1232 # Never tag nullid
1236 # Never tag nullid
1233 if n == self.nullid:
1237 if n == self.nullid:
1234 continue
1238 continue
1235 # A node's revision number represents its place in a
1239 # A node's revision number represents its place in a
1236 # topologically sorted list of nodes.
1240 # topologically sorted list of nodes.
1237 r = self.rev(n)
1241 r = self.rev(n)
1238 if r >= lowestrev:
1242 if r >= lowestrev:
1239 if n not in ancestors:
1243 if n not in ancestors:
1240 # If we are possibly a descendant of one of the roots
1244 # If we are possibly a descendant of one of the roots
1241 # and we haven't already been marked as an ancestor
1245 # and we haven't already been marked as an ancestor
1242 ancestors.add(n) # Mark as ancestor
1246 ancestors.add(n) # Mark as ancestor
1243 # Add non-nullid parents to list of nodes to tag.
1247 # Add non-nullid parents to list of nodes to tag.
1244 nodestotag.update(
1248 nodestotag.update(
1245 [p for p in self.parents(n) if p != self.nullid]
1249 [p for p in self.parents(n) if p != self.nullid]
1246 )
1250 )
1247 elif n in heads: # We've seen it before, is it a fake head?
1251 elif n in heads: # We've seen it before, is it a fake head?
1248 # So it is, real heads should not be the ancestors of
1252 # So it is, real heads should not be the ancestors of
1249 # any other heads.
1253 # any other heads.
1250 heads.pop(n)
1254 heads.pop(n)
1251 if not ancestors:
1255 if not ancestors:
1252 return nonodes
1256 return nonodes
1253 # Now that we have our set of ancestors, we want to remove any
1257 # Now that we have our set of ancestors, we want to remove any
1254 # roots that are not ancestors.
1258 # roots that are not ancestors.
1255
1259
1256 # If one of the roots was nullid, everything is included anyway.
1260 # If one of the roots was nullid, everything is included anyway.
1257 if lowestrev > nullrev:
1261 if lowestrev > nullrev:
1258 # But, since we weren't, let's recompute the lowest rev to not
1262 # But, since we weren't, let's recompute the lowest rev to not
1259 # include roots that aren't ancestors.
1263 # include roots that aren't ancestors.
1260
1264
1261 # Filter out roots that aren't ancestors of heads
1265 # Filter out roots that aren't ancestors of heads
1262 roots = [root for root in roots if root in ancestors]
1266 roots = [root for root in roots if root in ancestors]
1263 # Recompute the lowest revision
1267 # Recompute the lowest revision
1264 if roots:
1268 if roots:
1265 lowestrev = min([self.rev(root) for root in roots])
1269 lowestrev = min([self.rev(root) for root in roots])
1266 else:
1270 else:
1267 # No more roots? Return empty list
1271 # No more roots? Return empty list
1268 return nonodes
1272 return nonodes
1269 else:
1273 else:
1270 # We are descending from nullid, and don't need to care about
1274 # We are descending from nullid, and don't need to care about
1271 # any other roots.
1275 # any other roots.
1272 lowestrev = nullrev
1276 lowestrev = nullrev
1273 roots = [self.nullid]
1277 roots = [self.nullid]
1274 # Transform our roots list into a set.
1278 # Transform our roots list into a set.
1275 descendants = set(roots)
1279 descendants = set(roots)
1276 # Also, keep the original roots so we can filter out roots that aren't
1280 # Also, keep the original roots so we can filter out roots that aren't
1277 # 'real' roots (i.e. are descended from other roots).
1281 # 'real' roots (i.e. are descended from other roots).
1278 roots = descendants.copy()
1282 roots = descendants.copy()
1279 # Our topologically sorted list of output nodes.
1283 # Our topologically sorted list of output nodes.
1280 orderedout = []
1284 orderedout = []
1281 # Don't start at nullid since we don't want nullid in our output list,
1285 # Don't start at nullid since we don't want nullid in our output list,
1282 # and if nullid shows up in descendants, empty parents will look like
1286 # and if nullid shows up in descendants, empty parents will look like
1283 # they're descendants.
1287 # they're descendants.
1284 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1288 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1285 n = self.node(r)
1289 n = self.node(r)
1286 isdescendant = False
1290 isdescendant = False
1287 if lowestrev == nullrev: # Everybody is a descendant of nullid
1291 if lowestrev == nullrev: # Everybody is a descendant of nullid
1288 isdescendant = True
1292 isdescendant = True
1289 elif n in descendants:
1293 elif n in descendants:
1290 # n is already a descendant
1294 # n is already a descendant
1291 isdescendant = True
1295 isdescendant = True
1292 # This check only needs to be done here because all the roots
1296 # This check only needs to be done here because all the roots
1293 # will start being marked is descendants before the loop.
1297 # will start being marked is descendants before the loop.
1294 if n in roots:
1298 if n in roots:
1295 # If n was a root, check if it's a 'real' root.
1299 # If n was a root, check if it's a 'real' root.
1296 p = tuple(self.parents(n))
1300 p = tuple(self.parents(n))
1297 # If any of its parents are descendants, it's not a root.
1301 # If any of its parents are descendants, it's not a root.
1298 if (p[0] in descendants) or (p[1] in descendants):
1302 if (p[0] in descendants) or (p[1] in descendants):
1299 roots.remove(n)
1303 roots.remove(n)
1300 else:
1304 else:
1301 p = tuple(self.parents(n))
1305 p = tuple(self.parents(n))
1302 # A node is a descendant if either of its parents are
1306 # A node is a descendant if either of its parents are
1303 # descendants. (We seeded the dependents list with the roots
1307 # descendants. (We seeded the dependents list with the roots
1304 # up there, remember?)
1308 # up there, remember?)
1305 if (p[0] in descendants) or (p[1] in descendants):
1309 if (p[0] in descendants) or (p[1] in descendants):
1306 descendants.add(n)
1310 descendants.add(n)
1307 isdescendant = True
1311 isdescendant = True
1308 if isdescendant and ((ancestors is None) or (n in ancestors)):
1312 if isdescendant and ((ancestors is None) or (n in ancestors)):
1309 # Only include nodes that are both descendants and ancestors.
1313 # Only include nodes that are both descendants and ancestors.
1310 orderedout.append(n)
1314 orderedout.append(n)
1311 if (ancestors is not None) and (n in heads):
1315 if (ancestors is not None) and (n in heads):
1312 # We're trying to figure out which heads are reachable
1316 # We're trying to figure out which heads are reachable
1313 # from roots.
1317 # from roots.
1314 # Mark this head as having been reached
1318 # Mark this head as having been reached
1315 heads[n] = True
1319 heads[n] = True
1316 elif ancestors is None:
1320 elif ancestors is None:
1317 # Otherwise, we're trying to discover the heads.
1321 # Otherwise, we're trying to discover the heads.
1318 # Assume this is a head because if it isn't, the next step
1322 # Assume this is a head because if it isn't, the next step
1319 # will eventually remove it.
1323 # will eventually remove it.
1320 heads[n] = True
1324 heads[n] = True
1321 # But, obviously its parents aren't.
1325 # But, obviously its parents aren't.
1322 for p in self.parents(n):
1326 for p in self.parents(n):
1323 heads.pop(p, None)
1327 heads.pop(p, None)
1324 heads = [head for head, flag in heads.items() if flag]
1328 heads = [head for head, flag in heads.items() if flag]
1325 roots = list(roots)
1329 roots = list(roots)
1326 assert orderedout
1330 assert orderedout
1327 assert roots
1331 assert roots
1328 assert heads
1332 assert heads
1329 return (orderedout, roots, heads)
1333 return (orderedout, roots, heads)
1330
1334
1331 def headrevs(self, revs=None):
1335 def headrevs(self, revs=None):
1332 if revs is None:
1336 if revs is None:
1333 try:
1337 try:
1334 return self.index.headrevs()
1338 return self.index.headrevs()
1335 except AttributeError:
1339 except AttributeError:
1336 return self._headrevs()
1340 return self._headrevs()
1337 if rustdagop is not None and self.index.rust_ext_compat:
1341 if rustdagop is not None and self.index.rust_ext_compat:
1338 return rustdagop.headrevs(self.index, revs)
1342 return rustdagop.headrevs(self.index, revs)
1339 return dagop.headrevs(revs, self._uncheckedparentrevs)
1343 return dagop.headrevs(revs, self._uncheckedparentrevs)
1340
1344
1341 def computephases(self, roots):
1345 def computephases(self, roots):
1342 return self.index.computephasesmapsets(roots)
1346 return self.index.computephasesmapsets(roots)
1343
1347
1344 def _headrevs(self):
1348 def _headrevs(self):
1345 count = len(self)
1349 count = len(self)
1346 if not count:
1350 if not count:
1347 return [nullrev]
1351 return [nullrev]
1348 # we won't iter over filtered rev so nobody is a head at start
1352 # we won't iter over filtered rev so nobody is a head at start
1349 ishead = [0] * (count + 1)
1353 ishead = [0] * (count + 1)
1350 index = self.index
1354 index = self.index
1351 for r in self:
1355 for r in self:
1352 ishead[r] = 1 # I may be an head
1356 ishead[r] = 1 # I may be an head
1353 e = index[r]
1357 e = index[r]
1354 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1358 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1355 return [r for r, val in enumerate(ishead) if val]
1359 return [r for r, val in enumerate(ishead) if val]
1356
1360
1357 def heads(self, start=None, stop=None):
1361 def heads(self, start=None, stop=None):
1358 """return the list of all nodes that have no children
1362 """return the list of all nodes that have no children
1359
1363
1360 if start is specified, only heads that are descendants of
1364 if start is specified, only heads that are descendants of
1361 start will be returned
1365 start will be returned
1362 if stop is specified, it will consider all the revs from stop
1366 if stop is specified, it will consider all the revs from stop
1363 as if they had no children
1367 as if they had no children
1364 """
1368 """
1365 if start is None and stop is None:
1369 if start is None and stop is None:
1366 if not len(self):
1370 if not len(self):
1367 return [self.nullid]
1371 return [self.nullid]
1368 return [self.node(r) for r in self.headrevs()]
1372 return [self.node(r) for r in self.headrevs()]
1369
1373
1370 if start is None:
1374 if start is None:
1371 start = nullrev
1375 start = nullrev
1372 else:
1376 else:
1373 start = self.rev(start)
1377 start = self.rev(start)
1374
1378
1375 stoprevs = {self.rev(n) for n in stop or []}
1379 stoprevs = {self.rev(n) for n in stop or []}
1376
1380
1377 revs = dagop.headrevssubset(
1381 revs = dagop.headrevssubset(
1378 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1382 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1379 )
1383 )
1380
1384
1381 return [self.node(rev) for rev in revs]
1385 return [self.node(rev) for rev in revs]
1382
1386
1383 def children(self, node):
1387 def children(self, node):
1384 """find the children of a given node"""
1388 """find the children of a given node"""
1385 c = []
1389 c = []
1386 p = self.rev(node)
1390 p = self.rev(node)
1387 for r in self.revs(start=p + 1):
1391 for r in self.revs(start=p + 1):
1388 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1392 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1389 if prevs:
1393 if prevs:
1390 for pr in prevs:
1394 for pr in prevs:
1391 if pr == p:
1395 if pr == p:
1392 c.append(self.node(r))
1396 c.append(self.node(r))
1393 elif p == nullrev:
1397 elif p == nullrev:
1394 c.append(self.node(r))
1398 c.append(self.node(r))
1395 return c
1399 return c
1396
1400
1397 def commonancestorsheads(self, a, b):
1401 def commonancestorsheads(self, a, b):
1398 """calculate all the heads of the common ancestors of nodes a and b"""
1402 """calculate all the heads of the common ancestors of nodes a and b"""
1399 a, b = self.rev(a), self.rev(b)
1403 a, b = self.rev(a), self.rev(b)
1400 ancs = self._commonancestorsheads(a, b)
1404 ancs = self._commonancestorsheads(a, b)
1401 return pycompat.maplist(self.node, ancs)
1405 return pycompat.maplist(self.node, ancs)
1402
1406
1403 def _commonancestorsheads(self, *revs):
1407 def _commonancestorsheads(self, *revs):
1404 """calculate all the heads of the common ancestors of revs"""
1408 """calculate all the heads of the common ancestors of revs"""
1405 try:
1409 try:
1406 ancs = self.index.commonancestorsheads(*revs)
1410 ancs = self.index.commonancestorsheads(*revs)
1407 except (AttributeError, OverflowError): # C implementation failed
1411 except (AttributeError, OverflowError): # C implementation failed
1408 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1412 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1409 return ancs
1413 return ancs
1410
1414
1411 def isancestor(self, a, b):
1415 def isancestor(self, a, b):
1412 """return True if node a is an ancestor of node b
1416 """return True if node a is an ancestor of node b
1413
1417
1414 A revision is considered an ancestor of itself."""
1418 A revision is considered an ancestor of itself."""
1415 a, b = self.rev(a), self.rev(b)
1419 a, b = self.rev(a), self.rev(b)
1416 return self.isancestorrev(a, b)
1420 return self.isancestorrev(a, b)
1417
1421
1418 def isancestorrev(self, a, b):
1422 def isancestorrev(self, a, b):
1419 """return True if revision a is an ancestor of revision b
1423 """return True if revision a is an ancestor of revision b
1420
1424
1421 A revision is considered an ancestor of itself.
1425 A revision is considered an ancestor of itself.
1422
1426
1423 The implementation of this is trivial but the use of
1427 The implementation of this is trivial but the use of
1424 reachableroots is not."""
1428 reachableroots is not."""
1425 if a == nullrev:
1429 if a == nullrev:
1426 return True
1430 return True
1427 elif a == b:
1431 elif a == b:
1428 return True
1432 return True
1429 elif a > b:
1433 elif a > b:
1430 return False
1434 return False
1431 return bool(self.reachableroots(a, [b], [a], includepath=False))
1435 return bool(self.reachableroots(a, [b], [a], includepath=False))
1432
1436
1433 def reachableroots(self, minroot, heads, roots, includepath=False):
1437 def reachableroots(self, minroot, heads, roots, includepath=False):
1434 """return (heads(::(<roots> and <roots>::<heads>)))
1438 """return (heads(::(<roots> and <roots>::<heads>)))
1435
1439
1436 If includepath is True, return (<roots>::<heads>)."""
1440 If includepath is True, return (<roots>::<heads>)."""
1437 try:
1441 try:
1438 return self.index.reachableroots2(
1442 return self.index.reachableroots2(
1439 minroot, heads, roots, includepath
1443 minroot, heads, roots, includepath
1440 )
1444 )
1441 except AttributeError:
1445 except AttributeError:
1442 return dagop._reachablerootspure(
1446 return dagop._reachablerootspure(
1443 self.parentrevs, minroot, roots, heads, includepath
1447 self.parentrevs, minroot, roots, heads, includepath
1444 )
1448 )
1445
1449
1446 def ancestor(self, a, b):
1450 def ancestor(self, a, b):
1447 """calculate the "best" common ancestor of nodes a and b"""
1451 """calculate the "best" common ancestor of nodes a and b"""
1448
1452
1449 a, b = self.rev(a), self.rev(b)
1453 a, b = self.rev(a), self.rev(b)
1450 try:
1454 try:
1451 ancs = self.index.ancestors(a, b)
1455 ancs = self.index.ancestors(a, b)
1452 except (AttributeError, OverflowError):
1456 except (AttributeError, OverflowError):
1453 ancs = ancestor.ancestors(self.parentrevs, a, b)
1457 ancs = ancestor.ancestors(self.parentrevs, a, b)
1454 if ancs:
1458 if ancs:
1455 # choose a consistent winner when there's a tie
1459 # choose a consistent winner when there's a tie
1456 return min(map(self.node, ancs))
1460 return min(map(self.node, ancs))
1457 return self.nullid
1461 return self.nullid
1458
1462
1459 def _match(self, id):
1463 def _match(self, id):
1460 if isinstance(id, int):
1464 if isinstance(id, int):
1461 # rev
1465 # rev
1462 return self.node(id)
1466 return self.node(id)
1463 if len(id) == self.nodeconstants.nodelen:
1467 if len(id) == self.nodeconstants.nodelen:
1464 # possibly a binary node
1468 # possibly a binary node
1465 # odds of a binary node being all hex in ASCII are 1 in 10**25
1469 # odds of a binary node being all hex in ASCII are 1 in 10**25
1466 try:
1470 try:
1467 node = id
1471 node = id
1468 self.rev(node) # quick search the index
1472 self.rev(node) # quick search the index
1469 return node
1473 return node
1470 except error.LookupError:
1474 except error.LookupError:
1471 pass # may be partial hex id
1475 pass # may be partial hex id
1472 try:
1476 try:
1473 # str(rev)
1477 # str(rev)
1474 rev = int(id)
1478 rev = int(id)
1475 if b"%d" % rev != id:
1479 if b"%d" % rev != id:
1476 raise ValueError
1480 raise ValueError
1477 if rev < 0:
1481 if rev < 0:
1478 rev = len(self) + rev
1482 rev = len(self) + rev
1479 if rev < 0 or rev >= len(self):
1483 if rev < 0 or rev >= len(self):
1480 raise ValueError
1484 raise ValueError
1481 return self.node(rev)
1485 return self.node(rev)
1482 except (ValueError, OverflowError):
1486 except (ValueError, OverflowError):
1483 pass
1487 pass
1484 if len(id) == 2 * self.nodeconstants.nodelen:
1488 if len(id) == 2 * self.nodeconstants.nodelen:
1485 try:
1489 try:
1486 # a full hex nodeid?
1490 # a full hex nodeid?
1487 node = bin(id)
1491 node = bin(id)
1488 self.rev(node)
1492 self.rev(node)
1489 return node
1493 return node
1490 except (binascii.Error, error.LookupError):
1494 except (binascii.Error, error.LookupError):
1491 pass
1495 pass
1492
1496
1493 def _partialmatch(self, id):
1497 def _partialmatch(self, id):
1494 # we don't care wdirfilenodeids as they should be always full hash
1498 # we don't care wdirfilenodeids as they should be always full hash
1495 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1499 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1496 ambiguous = False
1500 ambiguous = False
1497 try:
1501 try:
1498 partial = self.index.partialmatch(id)
1502 partial = self.index.partialmatch(id)
1499 if partial and self.hasnode(partial):
1503 if partial and self.hasnode(partial):
1500 if maybewdir:
1504 if maybewdir:
1501 # single 'ff...' match in radix tree, ambiguous with wdir
1505 # single 'ff...' match in radix tree, ambiguous with wdir
1502 ambiguous = True
1506 ambiguous = True
1503 else:
1507 else:
1504 return partial
1508 return partial
1505 elif maybewdir:
1509 elif maybewdir:
1506 # no 'ff...' match in radix tree, wdir identified
1510 # no 'ff...' match in radix tree, wdir identified
1507 raise error.WdirUnsupported
1511 raise error.WdirUnsupported
1508 else:
1512 else:
1509 return None
1513 return None
1510 except error.RevlogError:
1514 except error.RevlogError:
1511 # parsers.c radix tree lookup gave multiple matches
1515 # parsers.c radix tree lookup gave multiple matches
1512 # fast path: for unfiltered changelog, radix tree is accurate
1516 # fast path: for unfiltered changelog, radix tree is accurate
1513 if not getattr(self, 'filteredrevs', None):
1517 if not getattr(self, 'filteredrevs', None):
1514 ambiguous = True
1518 ambiguous = True
1515 # fall through to slow path that filters hidden revisions
1519 # fall through to slow path that filters hidden revisions
1516 except (AttributeError, ValueError):
1520 except (AttributeError, ValueError):
1517 # we are pure python, or key is not hex
1521 # we are pure python, or key is not hex
1518 pass
1522 pass
1519 if ambiguous:
1523 if ambiguous:
1520 raise error.AmbiguousPrefixLookupError(
1524 raise error.AmbiguousPrefixLookupError(
1521 id, self.display_id, _(b'ambiguous identifier')
1525 id, self.display_id, _(b'ambiguous identifier')
1522 )
1526 )
1523
1527
1524 if id in self._pcache:
1528 if id in self._pcache:
1525 return self._pcache[id]
1529 return self._pcache[id]
1526
1530
1527 if len(id) <= 40:
1531 if len(id) <= 40:
1528 # hex(node)[:...]
1532 # hex(node)[:...]
1529 l = len(id) // 2 * 2 # grab an even number of digits
1533 l = len(id) // 2 * 2 # grab an even number of digits
1530 try:
1534 try:
1531 # we're dropping the last digit, so let's check that it's hex,
1535 # we're dropping the last digit, so let's check that it's hex,
1532 # to avoid the expensive computation below if it's not
1536 # to avoid the expensive computation below if it's not
1533 if len(id) % 2 > 0:
1537 if len(id) % 2 > 0:
1534 if not (id[-1] in hexdigits):
1538 if not (id[-1] in hexdigits):
1535 return None
1539 return None
1536 prefix = bin(id[:l])
1540 prefix = bin(id[:l])
1537 except binascii.Error:
1541 except binascii.Error:
1538 pass
1542 pass
1539 else:
1543 else:
1540 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1544 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1541 nl = [
1545 nl = [
1542 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1546 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1543 ]
1547 ]
1544 if self.nodeconstants.nullhex.startswith(id):
1548 if self.nodeconstants.nullhex.startswith(id):
1545 nl.append(self.nullid)
1549 nl.append(self.nullid)
1546 if len(nl) > 0:
1550 if len(nl) > 0:
1547 if len(nl) == 1 and not maybewdir:
1551 if len(nl) == 1 and not maybewdir:
1548 self._pcache[id] = nl[0]
1552 self._pcache[id] = nl[0]
1549 return nl[0]
1553 return nl[0]
1550 raise error.AmbiguousPrefixLookupError(
1554 raise error.AmbiguousPrefixLookupError(
1551 id, self.display_id, _(b'ambiguous identifier')
1555 id, self.display_id, _(b'ambiguous identifier')
1552 )
1556 )
1553 if maybewdir:
1557 if maybewdir:
1554 raise error.WdirUnsupported
1558 raise error.WdirUnsupported
1555 return None
1559 return None
1556
1560
1557 def lookup(self, id):
1561 def lookup(self, id):
1558 """locate a node based on:
1562 """locate a node based on:
1559 - revision number or str(revision number)
1563 - revision number or str(revision number)
1560 - nodeid or subset of hex nodeid
1564 - nodeid or subset of hex nodeid
1561 """
1565 """
1562 n = self._match(id)
1566 n = self._match(id)
1563 if n is not None:
1567 if n is not None:
1564 return n
1568 return n
1565 n = self._partialmatch(id)
1569 n = self._partialmatch(id)
1566 if n:
1570 if n:
1567 return n
1571 return n
1568
1572
1569 raise error.LookupError(id, self.display_id, _(b'no match found'))
1573 raise error.LookupError(id, self.display_id, _(b'no match found'))
1570
1574
1571 def shortest(self, node, minlength=1):
1575 def shortest(self, node, minlength=1):
1572 """Find the shortest unambiguous prefix that matches node."""
1576 """Find the shortest unambiguous prefix that matches node."""
1573
1577
1574 def isvalid(prefix):
1578 def isvalid(prefix):
1575 try:
1579 try:
1576 matchednode = self._partialmatch(prefix)
1580 matchednode = self._partialmatch(prefix)
1577 except error.AmbiguousPrefixLookupError:
1581 except error.AmbiguousPrefixLookupError:
1578 return False
1582 return False
1579 except error.WdirUnsupported:
1583 except error.WdirUnsupported:
1580 # single 'ff...' match
1584 # single 'ff...' match
1581 return True
1585 return True
1582 if matchednode is None:
1586 if matchednode is None:
1583 raise error.LookupError(node, self.display_id, _(b'no node'))
1587 raise error.LookupError(node, self.display_id, _(b'no node'))
1584 return True
1588 return True
1585
1589
1586 def maybewdir(prefix):
1590 def maybewdir(prefix):
1587 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1591 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1588
1592
1589 hexnode = hex(node)
1593 hexnode = hex(node)
1590
1594
1591 def disambiguate(hexnode, minlength):
1595 def disambiguate(hexnode, minlength):
1592 """Disambiguate against wdirid."""
1596 """Disambiguate against wdirid."""
1593 for length in range(minlength, len(hexnode) + 1):
1597 for length in range(minlength, len(hexnode) + 1):
1594 prefix = hexnode[:length]
1598 prefix = hexnode[:length]
1595 if not maybewdir(prefix):
1599 if not maybewdir(prefix):
1596 return prefix
1600 return prefix
1597
1601
1598 if not getattr(self, 'filteredrevs', None):
1602 if not getattr(self, 'filteredrevs', None):
1599 try:
1603 try:
1600 length = max(self.index.shortest(node), minlength)
1604 length = max(self.index.shortest(node), minlength)
1601 return disambiguate(hexnode, length)
1605 return disambiguate(hexnode, length)
1602 except error.RevlogError:
1606 except error.RevlogError:
1603 if node != self.nodeconstants.wdirid:
1607 if node != self.nodeconstants.wdirid:
1604 raise error.LookupError(
1608 raise error.LookupError(
1605 node, self.display_id, _(b'no node')
1609 node, self.display_id, _(b'no node')
1606 )
1610 )
1607 except AttributeError:
1611 except AttributeError:
1608 # Fall through to pure code
1612 # Fall through to pure code
1609 pass
1613 pass
1610
1614
1611 if node == self.nodeconstants.wdirid:
1615 if node == self.nodeconstants.wdirid:
1612 for length in range(minlength, len(hexnode) + 1):
1616 for length in range(minlength, len(hexnode) + 1):
1613 prefix = hexnode[:length]
1617 prefix = hexnode[:length]
1614 if isvalid(prefix):
1618 if isvalid(prefix):
1615 return prefix
1619 return prefix
1616
1620
1617 for length in range(minlength, len(hexnode) + 1):
1621 for length in range(minlength, len(hexnode) + 1):
1618 prefix = hexnode[:length]
1622 prefix = hexnode[:length]
1619 if isvalid(prefix):
1623 if isvalid(prefix):
1620 return disambiguate(hexnode, length)
1624 return disambiguate(hexnode, length)
1621
1625
1622 def cmp(self, node, text):
1626 def cmp(self, node, text):
1623 """compare text with a given file revision
1627 """compare text with a given file revision
1624
1628
1625 returns True if text is different than what is stored.
1629 returns True if text is different than what is stored.
1626 """
1630 """
1627 p1, p2 = self.parents(node)
1631 p1, p2 = self.parents(node)
1628 return storageutil.hashrevisionsha1(text, p1, p2) != node
1632 return storageutil.hashrevisionsha1(text, p1, p2) != node
1629
1633
1630 def _getsegmentforrevs(self, startrev, endrev, df=None):
1634 def _getsegmentforrevs(self, startrev, endrev, df=None):
1631 """Obtain a segment of raw data corresponding to a range of revisions.
1635 """Obtain a segment of raw data corresponding to a range of revisions.
1632
1636
1633 Accepts the start and end revisions and an optional already-open
1637 Accepts the start and end revisions and an optional already-open
1634 file handle to be used for reading. If the file handle is read, its
1638 file handle to be used for reading. If the file handle is read, its
1635 seek position will not be preserved.
1639 seek position will not be preserved.
1636
1640
1637 Requests for data may be satisfied by a cache.
1641 Requests for data may be satisfied by a cache.
1638
1642
1639 Returns a 2-tuple of (offset, data) for the requested range of
1643 Returns a 2-tuple of (offset, data) for the requested range of
1640 revisions. Offset is the integer offset from the beginning of the
1644 revisions. Offset is the integer offset from the beginning of the
1641 revlog and data is a str or buffer of the raw byte data.
1645 revlog and data is a str or buffer of the raw byte data.
1642
1646
1643 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1647 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1644 to determine where each revision's data begins and ends.
1648 to determine where each revision's data begins and ends.
1645 """
1649 """
1646 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1650 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1647 # (functions are expensive).
1651 # (functions are expensive).
1648 index = self.index
1652 index = self.index
1649 istart = index[startrev]
1653 istart = index[startrev]
1650 start = int(istart[0] >> 16)
1654 start = int(istart[0] >> 16)
1651 if startrev == endrev:
1655 if startrev == endrev:
1652 end = start + istart[1]
1656 end = start + istart[1]
1653 else:
1657 else:
1654 iend = index[endrev]
1658 iend = index[endrev]
1655 end = int(iend[0] >> 16) + iend[1]
1659 end = int(iend[0] >> 16) + iend[1]
1656
1660
1657 if self._inline:
1661 if self._inline:
1658 start += (startrev + 1) * self.index.entry_size
1662 start += (startrev + 1) * self.index.entry_size
1659 end += (endrev + 1) * self.index.entry_size
1663 end += (endrev + 1) * self.index.entry_size
1660 length = end - start
1664 length = end - start
1661
1665
1662 return start, self._segmentfile.read_chunk(start, length, df)
1666 return start, self._segmentfile.read_chunk(start, length, df)
1663
1667
1664 def _chunk(self, rev, df=None):
1668 def _chunk(self, rev, df=None):
1665 """Obtain a single decompressed chunk for a revision.
1669 """Obtain a single decompressed chunk for a revision.
1666
1670
1667 Accepts an integer revision and an optional already-open file handle
1671 Accepts an integer revision and an optional already-open file handle
1668 to be used for reading. If used, the seek position of the file will not
1672 to be used for reading. If used, the seek position of the file will not
1669 be preserved.
1673 be preserved.
1670
1674
1671 Returns a str holding uncompressed data for the requested revision.
1675 Returns a str holding uncompressed data for the requested revision.
1672 """
1676 """
1673 compression_mode = self.index[rev][10]
1677 compression_mode = self.index[rev][10]
1674 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1678 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1675 if compression_mode == COMP_MODE_PLAIN:
1679 if compression_mode == COMP_MODE_PLAIN:
1676 return data
1680 return data
1677 elif compression_mode == COMP_MODE_DEFAULT:
1681 elif compression_mode == COMP_MODE_DEFAULT:
1678 return self._decompressor(data)
1682 return self._decompressor(data)
1679 elif compression_mode == COMP_MODE_INLINE:
1683 elif compression_mode == COMP_MODE_INLINE:
1680 return self.decompress(data)
1684 return self.decompress(data)
1681 else:
1685 else:
1682 msg = b'unknown compression mode %d'
1686 msg = b'unknown compression mode %d'
1683 msg %= compression_mode
1687 msg %= compression_mode
1684 raise error.RevlogError(msg)
1688 raise error.RevlogError(msg)
1685
1689
1686 def _chunks(self, revs, df=None, targetsize=None):
1690 def _chunks(self, revs, df=None, targetsize=None):
1687 """Obtain decompressed chunks for the specified revisions.
1691 """Obtain decompressed chunks for the specified revisions.
1688
1692
1689 Accepts an iterable of numeric revisions that are assumed to be in
1693 Accepts an iterable of numeric revisions that are assumed to be in
1690 ascending order. Also accepts an optional already-open file handle
1694 ascending order. Also accepts an optional already-open file handle
1691 to be used for reading. If used, the seek position of the file will
1695 to be used for reading. If used, the seek position of the file will
1692 not be preserved.
1696 not be preserved.
1693
1697
1694 This function is similar to calling ``self._chunk()`` multiple times,
1698 This function is similar to calling ``self._chunk()`` multiple times,
1695 but is faster.
1699 but is faster.
1696
1700
1697 Returns a list with decompressed data for each requested revision.
1701 Returns a list with decompressed data for each requested revision.
1698 """
1702 """
1699 if not revs:
1703 if not revs:
1700 return []
1704 return []
1701 start = self.start
1705 start = self.start
1702 length = self.length
1706 length = self.length
1703 inline = self._inline
1707 inline = self._inline
1704 iosize = self.index.entry_size
1708 iosize = self.index.entry_size
1705 buffer = util.buffer
1709 buffer = util.buffer
1706
1710
1707 l = []
1711 l = []
1708 ladd = l.append
1712 ladd = l.append
1709
1713
1710 if not self._withsparseread:
1714 if not self._withsparseread:
1711 slicedchunks = (revs,)
1715 slicedchunks = (revs,)
1712 else:
1716 else:
1713 slicedchunks = deltautil.slicechunk(
1717 slicedchunks = deltautil.slicechunk(
1714 self, revs, targetsize=targetsize
1718 self, revs, targetsize=targetsize
1715 )
1719 )
1716
1720
1717 for revschunk in slicedchunks:
1721 for revschunk in slicedchunks:
1718 firstrev = revschunk[0]
1722 firstrev = revschunk[0]
1719 # Skip trailing revisions with empty diff
1723 # Skip trailing revisions with empty diff
1720 for lastrev in revschunk[::-1]:
1724 for lastrev in revschunk[::-1]:
1721 if length(lastrev) != 0:
1725 if length(lastrev) != 0:
1722 break
1726 break
1723
1727
1724 try:
1728 try:
1725 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1729 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1726 except OverflowError:
1730 except OverflowError:
1727 # issue4215 - we can't cache a run of chunks greater than
1731 # issue4215 - we can't cache a run of chunks greater than
1728 # 2G on Windows
1732 # 2G on Windows
1729 return [self._chunk(rev, df=df) for rev in revschunk]
1733 return [self._chunk(rev, df=df) for rev in revschunk]
1730
1734
1731 decomp = self.decompress
1735 decomp = self.decompress
1732 # self._decompressor might be None, but will not be used in that case
1736 # self._decompressor might be None, but will not be used in that case
1733 def_decomp = self._decompressor
1737 def_decomp = self._decompressor
1734 for rev in revschunk:
1738 for rev in revschunk:
1735 chunkstart = start(rev)
1739 chunkstart = start(rev)
1736 if inline:
1740 if inline:
1737 chunkstart += (rev + 1) * iosize
1741 chunkstart += (rev + 1) * iosize
1738 chunklength = length(rev)
1742 chunklength = length(rev)
1739 comp_mode = self.index[rev][10]
1743 comp_mode = self.index[rev][10]
1740 c = buffer(data, chunkstart - offset, chunklength)
1744 c = buffer(data, chunkstart - offset, chunklength)
1741 if comp_mode == COMP_MODE_PLAIN:
1745 if comp_mode == COMP_MODE_PLAIN:
1742 ladd(c)
1746 ladd(c)
1743 elif comp_mode == COMP_MODE_INLINE:
1747 elif comp_mode == COMP_MODE_INLINE:
1744 ladd(decomp(c))
1748 ladd(decomp(c))
1745 elif comp_mode == COMP_MODE_DEFAULT:
1749 elif comp_mode == COMP_MODE_DEFAULT:
1746 ladd(def_decomp(c))
1750 ladd(def_decomp(c))
1747 else:
1751 else:
1748 msg = b'unknown compression mode %d'
1752 msg = b'unknown compression mode %d'
1749 msg %= comp_mode
1753 msg %= comp_mode
1750 raise error.RevlogError(msg)
1754 raise error.RevlogError(msg)
1751
1755
1752 return l
1756 return l
1753
1757
1754 def deltaparent(self, rev):
1758 def deltaparent(self, rev):
1755 """return deltaparent of the given revision"""
1759 """return deltaparent of the given revision"""
1756 base = self.index[rev][3]
1760 base = self.index[rev][3]
1757 if base == rev:
1761 if base == rev:
1758 return nullrev
1762 return nullrev
1759 elif self._generaldelta:
1763 elif self._generaldelta:
1760 return base
1764 return base
1761 else:
1765 else:
1762 return rev - 1
1766 return rev - 1
1763
1767
1764 def issnapshot(self, rev):
1768 def issnapshot(self, rev):
1765 """tells whether rev is a snapshot"""
1769 """tells whether rev is a snapshot"""
1766 if not self._sparserevlog:
1770 if not self._sparserevlog:
1767 return self.deltaparent(rev) == nullrev
1771 return self.deltaparent(rev) == nullrev
1768 elif util.safehasattr(self.index, b'issnapshot'):
1772 elif util.safehasattr(self.index, b'issnapshot'):
1769 # directly assign the method to cache the testing and access
1773 # directly assign the method to cache the testing and access
1770 self.issnapshot = self.index.issnapshot
1774 self.issnapshot = self.index.issnapshot
1771 return self.issnapshot(rev)
1775 return self.issnapshot(rev)
1772 if rev == nullrev:
1776 if rev == nullrev:
1773 return True
1777 return True
1774 entry = self.index[rev]
1778 entry = self.index[rev]
1775 base = entry[3]
1779 base = entry[3]
1776 if base == rev:
1780 if base == rev:
1777 return True
1781 return True
1778 if base == nullrev:
1782 if base == nullrev:
1779 return True
1783 return True
1780 p1 = entry[5]
1784 p1 = entry[5]
1781 while self.length(p1) == 0:
1785 while self.length(p1) == 0:
1782 b = self.deltaparent(p1)
1786 b = self.deltaparent(p1)
1783 if b == p1:
1787 if b == p1:
1784 break
1788 break
1785 p1 = b
1789 p1 = b
1786 p2 = entry[6]
1790 p2 = entry[6]
1787 while self.length(p2) == 0:
1791 while self.length(p2) == 0:
1788 b = self.deltaparent(p2)
1792 b = self.deltaparent(p2)
1789 if b == p2:
1793 if b == p2:
1790 break
1794 break
1791 p2 = b
1795 p2 = b
1792 if base == p1 or base == p2:
1796 if base == p1 or base == p2:
1793 return False
1797 return False
1794 return self.issnapshot(base)
1798 return self.issnapshot(base)
1795
1799
1796 def snapshotdepth(self, rev):
1800 def snapshotdepth(self, rev):
1797 """number of snapshot in the chain before this one"""
1801 """number of snapshot in the chain before this one"""
1798 if not self.issnapshot(rev):
1802 if not self.issnapshot(rev):
1799 raise error.ProgrammingError(b'revision %d not a snapshot')
1803 raise error.ProgrammingError(b'revision %d not a snapshot')
1800 return len(self._deltachain(rev)[0]) - 1
1804 return len(self._deltachain(rev)[0]) - 1
1801
1805
1802 def revdiff(self, rev1, rev2):
1806 def revdiff(self, rev1, rev2):
1803 """return or calculate a delta between two revisions
1807 """return or calculate a delta between two revisions
1804
1808
1805 The delta calculated is in binary form and is intended to be written to
1809 The delta calculated is in binary form and is intended to be written to
1806 revlog data directly. So this function needs raw revision data.
1810 revlog data directly. So this function needs raw revision data.
1807 """
1811 """
1808 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1812 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1809 return bytes(self._chunk(rev2))
1813 return bytes(self._chunk(rev2))
1810
1814
1811 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1815 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1812
1816
1813 def revision(self, nodeorrev, _df=None):
1817 def revision(self, nodeorrev, _df=None):
1814 """return an uncompressed revision of a given node or revision
1818 """return an uncompressed revision of a given node or revision
1815 number.
1819 number.
1816
1820
1817 _df - an existing file handle to read from. (internal-only)
1821 _df - an existing file handle to read from. (internal-only)
1818 """
1822 """
1819 return self._revisiondata(nodeorrev, _df)
1823 return self._revisiondata(nodeorrev, _df)
1820
1824
1821 def sidedata(self, nodeorrev, _df=None):
1825 def sidedata(self, nodeorrev, _df=None):
1822 """a map of extra data related to the changeset but not part of the hash
1826 """a map of extra data related to the changeset but not part of the hash
1823
1827
1824 This function currently return a dictionary. However, more advanced
1828 This function currently return a dictionary. However, more advanced
1825 mapping object will likely be used in the future for a more
1829 mapping object will likely be used in the future for a more
1826 efficient/lazy code.
1830 efficient/lazy code.
1827 """
1831 """
1828 # deal with <nodeorrev> argument type
1832 # deal with <nodeorrev> argument type
1829 if isinstance(nodeorrev, int):
1833 if isinstance(nodeorrev, int):
1830 rev = nodeorrev
1834 rev = nodeorrev
1831 else:
1835 else:
1832 rev = self.rev(nodeorrev)
1836 rev = self.rev(nodeorrev)
1833 return self._sidedata(rev)
1837 return self._sidedata(rev)
1834
1838
1835 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1839 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1836 # deal with <nodeorrev> argument type
1840 # deal with <nodeorrev> argument type
1837 if isinstance(nodeorrev, int):
1841 if isinstance(nodeorrev, int):
1838 rev = nodeorrev
1842 rev = nodeorrev
1839 node = self.node(rev)
1843 node = self.node(rev)
1840 else:
1844 else:
1841 node = nodeorrev
1845 node = nodeorrev
1842 rev = None
1846 rev = None
1843
1847
1844 # fast path the special `nullid` rev
1848 # fast path the special `nullid` rev
1845 if node == self.nullid:
1849 if node == self.nullid:
1846 return b""
1850 return b""
1847
1851
1848 # ``rawtext`` is the text as stored inside the revlog. Might be the
1852 # ``rawtext`` is the text as stored inside the revlog. Might be the
1849 # revision or might need to be processed to retrieve the revision.
1853 # revision or might need to be processed to retrieve the revision.
1850 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1854 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1851
1855
1852 if raw and validated:
1856 if raw and validated:
1853 # if we don't want to process the raw text and that raw
1857 # if we don't want to process the raw text and that raw
1854 # text is cached, we can exit early.
1858 # text is cached, we can exit early.
1855 return rawtext
1859 return rawtext
1856 if rev is None:
1860 if rev is None:
1857 rev = self.rev(node)
1861 rev = self.rev(node)
1858 # the revlog's flag for this revision
1862 # the revlog's flag for this revision
1859 # (usually alter its state or content)
1863 # (usually alter its state or content)
1860 flags = self.flags(rev)
1864 flags = self.flags(rev)
1861
1865
1862 if validated and flags == REVIDX_DEFAULT_FLAGS:
1866 if validated and flags == REVIDX_DEFAULT_FLAGS:
1863 # no extra flags set, no flag processor runs, text = rawtext
1867 # no extra flags set, no flag processor runs, text = rawtext
1864 return rawtext
1868 return rawtext
1865
1869
1866 if raw:
1870 if raw:
1867 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1871 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1868 text = rawtext
1872 text = rawtext
1869 else:
1873 else:
1870 r = flagutil.processflagsread(self, rawtext, flags)
1874 r = flagutil.processflagsread(self, rawtext, flags)
1871 text, validatehash = r
1875 text, validatehash = r
1872 if validatehash:
1876 if validatehash:
1873 self.checkhash(text, node, rev=rev)
1877 self.checkhash(text, node, rev=rev)
1874 if not validated:
1878 if not validated:
1875 self._revisioncache = (node, rev, rawtext)
1879 self._revisioncache = (node, rev, rawtext)
1876
1880
1877 return text
1881 return text
1878
1882
1879 def _rawtext(self, node, rev, _df=None):
1883 def _rawtext(self, node, rev, _df=None):
1880 """return the possibly unvalidated rawtext for a revision
1884 """return the possibly unvalidated rawtext for a revision
1881
1885
1882 returns (rev, rawtext, validated)
1886 returns (rev, rawtext, validated)
1883 """
1887 """
1884
1888
1885 # revision in the cache (could be useful to apply delta)
1889 # revision in the cache (could be useful to apply delta)
1886 cachedrev = None
1890 cachedrev = None
1887 # An intermediate text to apply deltas to
1891 # An intermediate text to apply deltas to
1888 basetext = None
1892 basetext = None
1889
1893
1890 # Check if we have the entry in cache
1894 # Check if we have the entry in cache
1891 # The cache entry looks like (node, rev, rawtext)
1895 # The cache entry looks like (node, rev, rawtext)
1892 if self._revisioncache:
1896 if self._revisioncache:
1893 if self._revisioncache[0] == node:
1897 if self._revisioncache[0] == node:
1894 return (rev, self._revisioncache[2], True)
1898 return (rev, self._revisioncache[2], True)
1895 cachedrev = self._revisioncache[1]
1899 cachedrev = self._revisioncache[1]
1896
1900
1897 if rev is None:
1901 if rev is None:
1898 rev = self.rev(node)
1902 rev = self.rev(node)
1899
1903
1900 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1904 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1901 if stopped:
1905 if stopped:
1902 basetext = self._revisioncache[2]
1906 basetext = self._revisioncache[2]
1903
1907
1904 # drop cache to save memory, the caller is expected to
1908 # drop cache to save memory, the caller is expected to
1905 # update self._revisioncache after validating the text
1909 # update self._revisioncache after validating the text
1906 self._revisioncache = None
1910 self._revisioncache = None
1907
1911
1908 targetsize = None
1912 targetsize = None
1909 rawsize = self.index[rev][2]
1913 rawsize = self.index[rev][2]
1910 if 0 <= rawsize:
1914 if 0 <= rawsize:
1911 targetsize = 4 * rawsize
1915 targetsize = 4 * rawsize
1912
1916
1913 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1917 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1914 if basetext is None:
1918 if basetext is None:
1915 basetext = bytes(bins[0])
1919 basetext = bytes(bins[0])
1916 bins = bins[1:]
1920 bins = bins[1:]
1917
1921
1918 rawtext = mdiff.patches(basetext, bins)
1922 rawtext = mdiff.patches(basetext, bins)
1919 del basetext # let us have a chance to free memory early
1923 del basetext # let us have a chance to free memory early
1920 return (rev, rawtext, False)
1924 return (rev, rawtext, False)
1921
1925
1922 def _sidedata(self, rev):
1926 def _sidedata(self, rev):
1923 """Return the sidedata for a given revision number."""
1927 """Return the sidedata for a given revision number."""
1924 index_entry = self.index[rev]
1928 index_entry = self.index[rev]
1925 sidedata_offset = index_entry[8]
1929 sidedata_offset = index_entry[8]
1926 sidedata_size = index_entry[9]
1930 sidedata_size = index_entry[9]
1927
1931
1928 if self._inline:
1932 if self._inline:
1929 sidedata_offset += self.index.entry_size * (1 + rev)
1933 sidedata_offset += self.index.entry_size * (1 + rev)
1930 if sidedata_size == 0:
1934 if sidedata_size == 0:
1931 return {}
1935 return {}
1932
1936
1933 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1937 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
1934 filename = self._sidedatafile
1938 filename = self._sidedatafile
1935 end = self._docket.sidedata_end
1939 end = self._docket.sidedata_end
1936 offset = sidedata_offset
1940 offset = sidedata_offset
1937 length = sidedata_size
1941 length = sidedata_size
1938 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1942 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1939 raise error.RevlogError(m)
1943 raise error.RevlogError(m)
1940
1944
1941 comp_segment = self._segmentfile_sidedata.read_chunk(
1945 comp_segment = self._segmentfile_sidedata.read_chunk(
1942 sidedata_offset, sidedata_size
1946 sidedata_offset, sidedata_size
1943 )
1947 )
1944
1948
1945 comp = self.index[rev][11]
1949 comp = self.index[rev][11]
1946 if comp == COMP_MODE_PLAIN:
1950 if comp == COMP_MODE_PLAIN:
1947 segment = comp_segment
1951 segment = comp_segment
1948 elif comp == COMP_MODE_DEFAULT:
1952 elif comp == COMP_MODE_DEFAULT:
1949 segment = self._decompressor(comp_segment)
1953 segment = self._decompressor(comp_segment)
1950 elif comp == COMP_MODE_INLINE:
1954 elif comp == COMP_MODE_INLINE:
1951 segment = self.decompress(comp_segment)
1955 segment = self.decompress(comp_segment)
1952 else:
1956 else:
1953 msg = b'unknown compression mode %d'
1957 msg = b'unknown compression mode %d'
1954 msg %= comp
1958 msg %= comp
1955 raise error.RevlogError(msg)
1959 raise error.RevlogError(msg)
1956
1960
1957 sidedata = sidedatautil.deserialize_sidedata(segment)
1961 sidedata = sidedatautil.deserialize_sidedata(segment)
1958 return sidedata
1962 return sidedata
1959
1963
1960 def rawdata(self, nodeorrev, _df=None):
1964 def rawdata(self, nodeorrev, _df=None):
1961 """return an uncompressed raw data of a given node or revision number.
1965 """return an uncompressed raw data of a given node or revision number.
1962
1966
1963 _df - an existing file handle to read from. (internal-only)
1967 _df - an existing file handle to read from. (internal-only)
1964 """
1968 """
1965 return self._revisiondata(nodeorrev, _df, raw=True)
1969 return self._revisiondata(nodeorrev, _df, raw=True)
1966
1970
1967 def hash(self, text, p1, p2):
1971 def hash(self, text, p1, p2):
1968 """Compute a node hash.
1972 """Compute a node hash.
1969
1973
1970 Available as a function so that subclasses can replace the hash
1974 Available as a function so that subclasses can replace the hash
1971 as needed.
1975 as needed.
1972 """
1976 """
1973 return storageutil.hashrevisionsha1(text, p1, p2)
1977 return storageutil.hashrevisionsha1(text, p1, p2)
1974
1978
1975 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1979 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1976 """Check node hash integrity.
1980 """Check node hash integrity.
1977
1981
1978 Available as a function so that subclasses can extend hash mismatch
1982 Available as a function so that subclasses can extend hash mismatch
1979 behaviors as needed.
1983 behaviors as needed.
1980 """
1984 """
1981 try:
1985 try:
1982 if p1 is None and p2 is None:
1986 if p1 is None and p2 is None:
1983 p1, p2 = self.parents(node)
1987 p1, p2 = self.parents(node)
1984 if node != self.hash(text, p1, p2):
1988 if node != self.hash(text, p1, p2):
1985 # Clear the revision cache on hash failure. The revision cache
1989 # Clear the revision cache on hash failure. The revision cache
1986 # only stores the raw revision and clearing the cache does have
1990 # only stores the raw revision and clearing the cache does have
1987 # the side-effect that we won't have a cache hit when the raw
1991 # the side-effect that we won't have a cache hit when the raw
1988 # revision data is accessed. But this case should be rare and
1992 # revision data is accessed. But this case should be rare and
1989 # it is extra work to teach the cache about the hash
1993 # it is extra work to teach the cache about the hash
1990 # verification state.
1994 # verification state.
1991 if self._revisioncache and self._revisioncache[0] == node:
1995 if self._revisioncache and self._revisioncache[0] == node:
1992 self._revisioncache = None
1996 self._revisioncache = None
1993
1997
1994 revornode = rev
1998 revornode = rev
1995 if revornode is None:
1999 if revornode is None:
1996 revornode = templatefilters.short(hex(node))
2000 revornode = templatefilters.short(hex(node))
1997 raise error.RevlogError(
2001 raise error.RevlogError(
1998 _(b"integrity check failed on %s:%s")
2002 _(b"integrity check failed on %s:%s")
1999 % (self.display_id, pycompat.bytestr(revornode))
2003 % (self.display_id, pycompat.bytestr(revornode))
2000 )
2004 )
2001 except error.RevlogError:
2005 except error.RevlogError:
2002 if self._censorable and storageutil.iscensoredtext(text):
2006 if self._censorable and storageutil.iscensoredtext(text):
2003 raise error.CensoredNodeError(self.display_id, node, text)
2007 raise error.CensoredNodeError(self.display_id, node, text)
2004 raise
2008 raise
2005
2009
2006 def _enforceinlinesize(self, tr):
2010 def _enforceinlinesize(self, tr):
2007 """Check if the revlog is too big for inline and convert if so.
2011 """Check if the revlog is too big for inline and convert if so.
2008
2012
2009 This should be called after revisions are added to the revlog. If the
2013 This should be called after revisions are added to the revlog. If the
2010 revlog has grown too large to be an inline revlog, it will convert it
2014 revlog has grown too large to be an inline revlog, it will convert it
2011 to use multiple index and data files.
2015 to use multiple index and data files.
2012 """
2016 """
2013 tiprev = len(self) - 1
2017 tiprev = len(self) - 1
2014 total_size = self.start(tiprev) + self.length(tiprev)
2018 total_size = self.start(tiprev) + self.length(tiprev)
2015 if not self._inline or total_size < _maxinline:
2019 if not self._inline or total_size < _maxinline:
2016 return
2020 return
2017
2021
2018 troffset = tr.findoffset(self._indexfile)
2022 troffset = tr.findoffset(self._indexfile)
2019 if troffset is None:
2023 if troffset is None:
2020 raise error.RevlogError(
2024 raise error.RevlogError(
2021 _(b"%s not found in the transaction") % self._indexfile
2025 _(b"%s not found in the transaction") % self._indexfile
2022 )
2026 )
2023 trindex = None
2027 trindex = None
2024 tr.add(self._datafile, 0)
2028 tr.add(self._datafile, 0)
2025
2029
2026 existing_handles = False
2030 existing_handles = False
2027 if self._writinghandles is not None:
2031 if self._writinghandles is not None:
2028 existing_handles = True
2032 existing_handles = True
2029 fp = self._writinghandles[0]
2033 fp = self._writinghandles[0]
2030 fp.flush()
2034 fp.flush()
2031 fp.close()
2035 fp.close()
2032 # We can't use the cached file handle after close(). So prevent
2036 # We can't use the cached file handle after close(). So prevent
2033 # its usage.
2037 # its usage.
2034 self._writinghandles = None
2038 self._writinghandles = None
2035 self._segmentfile.writing_handle = None
2039 self._segmentfile.writing_handle = None
2036 # No need to deal with sidedata writing handle as it is only
2040 # No need to deal with sidedata writing handle as it is only
2037 # relevant with revlog-v2 which is never inline, not reaching
2041 # relevant with revlog-v2 which is never inline, not reaching
2038 # this code
2042 # this code
2039
2043
2040 new_dfh = self._datafp(b'w+')
2044 new_dfh = self._datafp(b'w+')
2041 new_dfh.truncate(0) # drop any potentially existing data
2045 new_dfh.truncate(0) # drop any potentially existing data
2042 try:
2046 try:
2043 with self._indexfp() as read_ifh:
2047 with self._indexfp() as read_ifh:
2044 for r in self:
2048 for r in self:
2045 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2049 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2046 if (
2050 if (
2047 trindex is None
2051 trindex is None
2048 and troffset
2052 and troffset
2049 <= self.start(r) + r * self.index.entry_size
2053 <= self.start(r) + r * self.index.entry_size
2050 ):
2054 ):
2051 trindex = r
2055 trindex = r
2052 new_dfh.flush()
2056 new_dfh.flush()
2053
2057
2054 if trindex is None:
2058 if trindex is None:
2055 trindex = 0
2059 trindex = 0
2056
2060
2057 with self.__index_new_fp() as fp:
2061 with self.__index_new_fp() as fp:
2058 self._format_flags &= ~FLAG_INLINE_DATA
2062 self._format_flags &= ~FLAG_INLINE_DATA
2059 self._inline = False
2063 self._inline = False
2060 for i in self:
2064 for i in self:
2061 e = self.index.entry_binary(i)
2065 e = self.index.entry_binary(i)
2062 if i == 0 and self._docket is None:
2066 if i == 0 and self._docket is None:
2063 header = self._format_flags | self._format_version
2067 header = self._format_flags | self._format_version
2064 header = self.index.pack_header(header)
2068 header = self.index.pack_header(header)
2065 e = header + e
2069 e = header + e
2066 fp.write(e)
2070 fp.write(e)
2067 if self._docket is not None:
2071 if self._docket is not None:
2068 self._docket.index_end = fp.tell()
2072 self._docket.index_end = fp.tell()
2069
2073
2070 # There is a small transactional race here. If the rename of
2074 # There is a small transactional race here. If the rename of
2071 # the index fails, we should remove the datafile. It is more
2075 # the index fails, we should remove the datafile. It is more
2072 # important to ensure that the data file is not truncated
2076 # important to ensure that the data file is not truncated
2073 # when the index is replaced as otherwise data is lost.
2077 # when the index is replaced as otherwise data is lost.
2074 tr.replace(self._datafile, self.start(trindex))
2078 tr.replace(self._datafile, self.start(trindex))
2075
2079
2076 # the temp file replace the real index when we exit the context
2080 # the temp file replace the real index when we exit the context
2077 # manager
2081 # manager
2078
2082
2079 tr.replace(self._indexfile, trindex * self.index.entry_size)
2083 tr.replace(self._indexfile, trindex * self.index.entry_size)
2080 nodemaputil.setup_persistent_nodemap(tr, self)
2084 nodemaputil.setup_persistent_nodemap(tr, self)
2081 self._segmentfile = randomaccessfile.randomaccessfile(
2085 self._segmentfile = randomaccessfile.randomaccessfile(
2082 self.opener,
2086 self.opener,
2083 self._datafile,
2087 self._datafile,
2084 self._chunkcachesize,
2088 self._chunkcachesize,
2085 )
2089 )
2086
2090
2087 if existing_handles:
2091 if existing_handles:
2088 # switched from inline to conventional reopen the index
2092 # switched from inline to conventional reopen the index
2089 ifh = self.__index_write_fp()
2093 ifh = self.__index_write_fp()
2090 self._writinghandles = (ifh, new_dfh, None)
2094 self._writinghandles = (ifh, new_dfh, None)
2091 self._segmentfile.writing_handle = new_dfh
2095 self._segmentfile.writing_handle = new_dfh
2092 new_dfh = None
2096 new_dfh = None
2093 # No need to deal with sidedata writing handle as it is only
2097 # No need to deal with sidedata writing handle as it is only
2094 # relevant with revlog-v2 which is never inline, not reaching
2098 # relevant with revlog-v2 which is never inline, not reaching
2095 # this code
2099 # this code
2096 finally:
2100 finally:
2097 if new_dfh is not None:
2101 if new_dfh is not None:
2098 new_dfh.close()
2102 new_dfh.close()
2099
2103
2100 def _nodeduplicatecallback(self, transaction, node):
2104 def _nodeduplicatecallback(self, transaction, node):
2101 """called when trying to add a node already stored."""
2105 """called when trying to add a node already stored."""
2102
2106
2103 @contextlib.contextmanager
2107 @contextlib.contextmanager
2104 def reading(self):
2108 def reading(self):
2105 """Context manager that keeps data and sidedata files open for reading"""
2109 """Context manager that keeps data and sidedata files open for reading"""
2106 with self._segmentfile.reading():
2110 with self._segmentfile.reading():
2107 with self._segmentfile_sidedata.reading():
2111 with self._segmentfile_sidedata.reading():
2108 yield
2112 yield
2109
2113
2110 @contextlib.contextmanager
2114 @contextlib.contextmanager
2111 def _writing(self, transaction):
2115 def _writing(self, transaction):
2112 if self._trypending:
2116 if self._trypending:
2113 msg = b'try to write in a `trypending` revlog: %s'
2117 msg = b'try to write in a `trypending` revlog: %s'
2114 msg %= self.display_id
2118 msg %= self.display_id
2115 raise error.ProgrammingError(msg)
2119 raise error.ProgrammingError(msg)
2116 if self._writinghandles is not None:
2120 if self._writinghandles is not None:
2117 yield
2121 yield
2118 else:
2122 else:
2119 ifh = dfh = sdfh = None
2123 ifh = dfh = sdfh = None
2120 try:
2124 try:
2121 r = len(self)
2125 r = len(self)
2122 # opening the data file.
2126 # opening the data file.
2123 dsize = 0
2127 dsize = 0
2124 if r:
2128 if r:
2125 dsize = self.end(r - 1)
2129 dsize = self.end(r - 1)
2126 dfh = None
2130 dfh = None
2127 if not self._inline:
2131 if not self._inline:
2128 try:
2132 try:
2129 dfh = self._datafp(b"r+")
2133 dfh = self._datafp(b"r+")
2130 if self._docket is None:
2134 if self._docket is None:
2131 dfh.seek(0, os.SEEK_END)
2135 dfh.seek(0, os.SEEK_END)
2132 else:
2136 else:
2133 dfh.seek(self._docket.data_end, os.SEEK_SET)
2137 dfh.seek(self._docket.data_end, os.SEEK_SET)
2134 except FileNotFoundError:
2138 except FileNotFoundError:
2135 dfh = self._datafp(b"w+")
2139 dfh = self._datafp(b"w+")
2136 transaction.add(self._datafile, dsize)
2140 transaction.add(self._datafile, dsize)
2137 if self._sidedatafile is not None:
2141 if self._sidedatafile is not None:
2138 # revlog-v2 does not inline, help Pytype
2142 # revlog-v2 does not inline, help Pytype
2139 assert dfh is not None
2143 assert dfh is not None
2140 try:
2144 try:
2141 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2145 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2142 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2146 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2143 except FileNotFoundError:
2147 except FileNotFoundError:
2144 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2148 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2145 transaction.add(
2149 transaction.add(
2146 self._sidedatafile, self._docket.sidedata_end
2150 self._sidedatafile, self._docket.sidedata_end
2147 )
2151 )
2148
2152
2149 # opening the index file.
2153 # opening the index file.
2150 isize = r * self.index.entry_size
2154 isize = r * self.index.entry_size
2151 ifh = self.__index_write_fp()
2155 ifh = self.__index_write_fp()
2152 if self._inline:
2156 if self._inline:
2153 transaction.add(self._indexfile, dsize + isize)
2157 transaction.add(self._indexfile, dsize + isize)
2154 else:
2158 else:
2155 transaction.add(self._indexfile, isize)
2159 transaction.add(self._indexfile, isize)
2156 # exposing all file handle for writing.
2160 # exposing all file handle for writing.
2157 self._writinghandles = (ifh, dfh, sdfh)
2161 self._writinghandles = (ifh, dfh, sdfh)
2158 self._segmentfile.writing_handle = ifh if self._inline else dfh
2162 self._segmentfile.writing_handle = ifh if self._inline else dfh
2159 self._segmentfile_sidedata.writing_handle = sdfh
2163 self._segmentfile_sidedata.writing_handle = sdfh
2160 yield
2164 yield
2161 if self._docket is not None:
2165 if self._docket is not None:
2162 self._write_docket(transaction)
2166 self._write_docket(transaction)
2163 finally:
2167 finally:
2164 self._writinghandles = None
2168 self._writinghandles = None
2165 self._segmentfile.writing_handle = None
2169 self._segmentfile.writing_handle = None
2166 self._segmentfile_sidedata.writing_handle = None
2170 self._segmentfile_sidedata.writing_handle = None
2167 if dfh is not None:
2171 if dfh is not None:
2168 dfh.close()
2172 dfh.close()
2169 if sdfh is not None:
2173 if sdfh is not None:
2170 sdfh.close()
2174 sdfh.close()
2171 # closing the index file last to avoid exposing referent to
2175 # closing the index file last to avoid exposing referent to
2172 # potential unflushed data content.
2176 # potential unflushed data content.
2173 if ifh is not None:
2177 if ifh is not None:
2174 ifh.close()
2178 ifh.close()
2175
2179
2176 def _write_docket(self, transaction):
2180 def _write_docket(self, transaction):
2177 """write the current docket on disk
2181 """write the current docket on disk
2178
2182
2179 Exist as a method to help changelog to implement transaction logic
2183 Exist as a method to help changelog to implement transaction logic
2180
2184
2181 We could also imagine using the same transaction logic for all revlog
2185 We could also imagine using the same transaction logic for all revlog
2182 since docket are cheap."""
2186 since docket are cheap."""
2183 self._docket.write(transaction)
2187 self._docket.write(transaction)
2184
2188
2185 def addrevision(
2189 def addrevision(
2186 self,
2190 self,
2187 text,
2191 text,
2188 transaction,
2192 transaction,
2189 link,
2193 link,
2190 p1,
2194 p1,
2191 p2,
2195 p2,
2192 cachedelta=None,
2196 cachedelta=None,
2193 node=None,
2197 node=None,
2194 flags=REVIDX_DEFAULT_FLAGS,
2198 flags=REVIDX_DEFAULT_FLAGS,
2195 deltacomputer=None,
2199 deltacomputer=None,
2196 sidedata=None,
2200 sidedata=None,
2197 ):
2201 ):
2198 """add a revision to the log
2202 """add a revision to the log
2199
2203
2200 text - the revision data to add
2204 text - the revision data to add
2201 transaction - the transaction object used for rollback
2205 transaction - the transaction object used for rollback
2202 link - the linkrev data to add
2206 link - the linkrev data to add
2203 p1, p2 - the parent nodeids of the revision
2207 p1, p2 - the parent nodeids of the revision
2204 cachedelta - an optional precomputed delta
2208 cachedelta - an optional precomputed delta
2205 node - nodeid of revision; typically node is not specified, and it is
2209 node - nodeid of revision; typically node is not specified, and it is
2206 computed by default as hash(text, p1, p2), however subclasses might
2210 computed by default as hash(text, p1, p2), however subclasses might
2207 use different hashing method (and override checkhash() in such case)
2211 use different hashing method (and override checkhash() in such case)
2208 flags - the known flags to set on the revision
2212 flags - the known flags to set on the revision
2209 deltacomputer - an optional deltacomputer instance shared between
2213 deltacomputer - an optional deltacomputer instance shared between
2210 multiple calls
2214 multiple calls
2211 """
2215 """
2212 if link == nullrev:
2216 if link == nullrev:
2213 raise error.RevlogError(
2217 raise error.RevlogError(
2214 _(b"attempted to add linkrev -1 to %s") % self.display_id
2218 _(b"attempted to add linkrev -1 to %s") % self.display_id
2215 )
2219 )
2216
2220
2217 if sidedata is None:
2221 if sidedata is None:
2218 sidedata = {}
2222 sidedata = {}
2219 elif sidedata and not self.hassidedata:
2223 elif sidedata and not self.hassidedata:
2220 raise error.ProgrammingError(
2224 raise error.ProgrammingError(
2221 _(b"trying to add sidedata to a revlog who don't support them")
2225 _(b"trying to add sidedata to a revlog who don't support them")
2222 )
2226 )
2223
2227
2224 if flags:
2228 if flags:
2225 node = node or self.hash(text, p1, p2)
2229 node = node or self.hash(text, p1, p2)
2226
2230
2227 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2231 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2228
2232
2229 # If the flag processor modifies the revision data, ignore any provided
2233 # If the flag processor modifies the revision data, ignore any provided
2230 # cachedelta.
2234 # cachedelta.
2231 if rawtext != text:
2235 if rawtext != text:
2232 cachedelta = None
2236 cachedelta = None
2233
2237
2234 if len(rawtext) > _maxentrysize:
2238 if len(rawtext) > _maxentrysize:
2235 raise error.RevlogError(
2239 raise error.RevlogError(
2236 _(
2240 _(
2237 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2241 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2238 )
2242 )
2239 % (self.display_id, len(rawtext))
2243 % (self.display_id, len(rawtext))
2240 )
2244 )
2241
2245
2242 node = node or self.hash(rawtext, p1, p2)
2246 node = node or self.hash(rawtext, p1, p2)
2243 rev = self.index.get_rev(node)
2247 rev = self.index.get_rev(node)
2244 if rev is not None:
2248 if rev is not None:
2245 return rev
2249 return rev
2246
2250
2247 if validatehash:
2251 if validatehash:
2248 self.checkhash(rawtext, node, p1=p1, p2=p2)
2252 self.checkhash(rawtext, node, p1=p1, p2=p2)
2249
2253
2250 return self.addrawrevision(
2254 return self.addrawrevision(
2251 rawtext,
2255 rawtext,
2252 transaction,
2256 transaction,
2253 link,
2257 link,
2254 p1,
2258 p1,
2255 p2,
2259 p2,
2256 node,
2260 node,
2257 flags,
2261 flags,
2258 cachedelta=cachedelta,
2262 cachedelta=cachedelta,
2259 deltacomputer=deltacomputer,
2263 deltacomputer=deltacomputer,
2260 sidedata=sidedata,
2264 sidedata=sidedata,
2261 )
2265 )
2262
2266
2263 def addrawrevision(
2267 def addrawrevision(
2264 self,
2268 self,
2265 rawtext,
2269 rawtext,
2266 transaction,
2270 transaction,
2267 link,
2271 link,
2268 p1,
2272 p1,
2269 p2,
2273 p2,
2270 node,
2274 node,
2271 flags,
2275 flags,
2272 cachedelta=None,
2276 cachedelta=None,
2273 deltacomputer=None,
2277 deltacomputer=None,
2274 sidedata=None,
2278 sidedata=None,
2275 ):
2279 ):
2276 """add a raw revision with known flags, node and parents
2280 """add a raw revision with known flags, node and parents
2277 useful when reusing a revision not stored in this revlog (ex: received
2281 useful when reusing a revision not stored in this revlog (ex: received
2278 over wire, or read from an external bundle).
2282 over wire, or read from an external bundle).
2279 """
2283 """
2280 with self._writing(transaction):
2284 with self._writing(transaction):
2281 return self._addrevision(
2285 return self._addrevision(
2282 node,
2286 node,
2283 rawtext,
2287 rawtext,
2284 transaction,
2288 transaction,
2285 link,
2289 link,
2286 p1,
2290 p1,
2287 p2,
2291 p2,
2288 flags,
2292 flags,
2289 cachedelta,
2293 cachedelta,
2290 deltacomputer=deltacomputer,
2294 deltacomputer=deltacomputer,
2291 sidedata=sidedata,
2295 sidedata=sidedata,
2292 )
2296 )
2293
2297
2294 def compress(self, data):
2298 def compress(self, data):
2295 """Generate a possibly-compressed representation of data."""
2299 """Generate a possibly-compressed representation of data."""
2296 if not data:
2300 if not data:
2297 return b'', data
2301 return b'', data
2298
2302
2299 compressed = self._compressor.compress(data)
2303 compressed = self._compressor.compress(data)
2300
2304
2301 if compressed:
2305 if compressed:
2302 # The revlog compressor added the header in the returned data.
2306 # The revlog compressor added the header in the returned data.
2303 return b'', compressed
2307 return b'', compressed
2304
2308
2305 if data[0:1] == b'\0':
2309 if data[0:1] == b'\0':
2306 return b'', data
2310 return b'', data
2307 return b'u', data
2311 return b'u', data
2308
2312
2309 def decompress(self, data):
2313 def decompress(self, data):
2310 """Decompress a revlog chunk.
2314 """Decompress a revlog chunk.
2311
2315
2312 The chunk is expected to begin with a header identifying the
2316 The chunk is expected to begin with a header identifying the
2313 format type so it can be routed to an appropriate decompressor.
2317 format type so it can be routed to an appropriate decompressor.
2314 """
2318 """
2315 if not data:
2319 if not data:
2316 return data
2320 return data
2317
2321
2318 # Revlogs are read much more frequently than they are written and many
2322 # Revlogs are read much more frequently than they are written and many
2319 # chunks only take microseconds to decompress, so performance is
2323 # chunks only take microseconds to decompress, so performance is
2320 # important here.
2324 # important here.
2321 #
2325 #
2322 # We can make a few assumptions about revlogs:
2326 # We can make a few assumptions about revlogs:
2323 #
2327 #
2324 # 1) the majority of chunks will be compressed (as opposed to inline
2328 # 1) the majority of chunks will be compressed (as opposed to inline
2325 # raw data).
2329 # raw data).
2326 # 2) decompressing *any* data will likely by at least 10x slower than
2330 # 2) decompressing *any* data will likely by at least 10x slower than
2327 # returning raw inline data.
2331 # returning raw inline data.
2328 # 3) we want to prioritize common and officially supported compression
2332 # 3) we want to prioritize common and officially supported compression
2329 # engines
2333 # engines
2330 #
2334 #
2331 # It follows that we want to optimize for "decompress compressed data
2335 # It follows that we want to optimize for "decompress compressed data
2332 # when encoded with common and officially supported compression engines"
2336 # when encoded with common and officially supported compression engines"
2333 # case over "raw data" and "data encoded by less common or non-official
2337 # case over "raw data" and "data encoded by less common or non-official
2334 # compression engines." That is why we have the inline lookup first
2338 # compression engines." That is why we have the inline lookup first
2335 # followed by the compengines lookup.
2339 # followed by the compengines lookup.
2336 #
2340 #
2337 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2341 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2338 # compressed chunks. And this matters for changelog and manifest reads.
2342 # compressed chunks. And this matters for changelog and manifest reads.
2339 t = data[0:1]
2343 t = data[0:1]
2340
2344
2341 if t == b'x':
2345 if t == b'x':
2342 try:
2346 try:
2343 return _zlibdecompress(data)
2347 return _zlibdecompress(data)
2344 except zlib.error as e:
2348 except zlib.error as e:
2345 raise error.RevlogError(
2349 raise error.RevlogError(
2346 _(b'revlog decompress error: %s')
2350 _(b'revlog decompress error: %s')
2347 % stringutil.forcebytestr(e)
2351 % stringutil.forcebytestr(e)
2348 )
2352 )
2349 # '\0' is more common than 'u' so it goes first.
2353 # '\0' is more common than 'u' so it goes first.
2350 elif t == b'\0':
2354 elif t == b'\0':
2351 return data
2355 return data
2352 elif t == b'u':
2356 elif t == b'u':
2353 return util.buffer(data, 1)
2357 return util.buffer(data, 1)
2354
2358
2355 compressor = self._get_decompressor(t)
2359 compressor = self._get_decompressor(t)
2356
2360
2357 return compressor.decompress(data)
2361 return compressor.decompress(data)
2358
2362
2359 def _addrevision(
2363 def _addrevision(
2360 self,
2364 self,
2361 node,
2365 node,
2362 rawtext,
2366 rawtext,
2363 transaction,
2367 transaction,
2364 link,
2368 link,
2365 p1,
2369 p1,
2366 p2,
2370 p2,
2367 flags,
2371 flags,
2368 cachedelta,
2372 cachedelta,
2369 alwayscache=False,
2373 alwayscache=False,
2370 deltacomputer=None,
2374 deltacomputer=None,
2371 sidedata=None,
2375 sidedata=None,
2372 ):
2376 ):
2373 """internal function to add revisions to the log
2377 """internal function to add revisions to the log
2374
2378
2375 see addrevision for argument descriptions.
2379 see addrevision for argument descriptions.
2376
2380
2377 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2381 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2378
2382
2379 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2383 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2380 be used.
2384 be used.
2381
2385
2382 invariants:
2386 invariants:
2383 - rawtext is optional (can be None); if not set, cachedelta must be set.
2387 - rawtext is optional (can be None); if not set, cachedelta must be set.
2384 if both are set, they must correspond to each other.
2388 if both are set, they must correspond to each other.
2385 """
2389 """
2386 if node == self.nullid:
2390 if node == self.nullid:
2387 raise error.RevlogError(
2391 raise error.RevlogError(
2388 _(b"%s: attempt to add null revision") % self.display_id
2392 _(b"%s: attempt to add null revision") % self.display_id
2389 )
2393 )
2390 if (
2394 if (
2391 node == self.nodeconstants.wdirid
2395 node == self.nodeconstants.wdirid
2392 or node in self.nodeconstants.wdirfilenodeids
2396 or node in self.nodeconstants.wdirfilenodeids
2393 ):
2397 ):
2394 raise error.RevlogError(
2398 raise error.RevlogError(
2395 _(b"%s: attempt to add wdir revision") % self.display_id
2399 _(b"%s: attempt to add wdir revision") % self.display_id
2396 )
2400 )
2397 if self._writinghandles is None:
2401 if self._writinghandles is None:
2398 msg = b'adding revision outside `revlog._writing` context'
2402 msg = b'adding revision outside `revlog._writing` context'
2399 raise error.ProgrammingError(msg)
2403 raise error.ProgrammingError(msg)
2400
2404
2401 if self._inline:
2405 if self._inline:
2402 fh = self._writinghandles[0]
2406 fh = self._writinghandles[0]
2403 else:
2407 else:
2404 fh = self._writinghandles[1]
2408 fh = self._writinghandles[1]
2405
2409
2406 btext = [rawtext]
2410 btext = [rawtext]
2407
2411
2408 curr = len(self)
2412 curr = len(self)
2409 prev = curr - 1
2413 prev = curr - 1
2410
2414
2411 offset = self._get_data_offset(prev)
2415 offset = self._get_data_offset(prev)
2412
2416
2413 if self._concurrencychecker:
2417 if self._concurrencychecker:
2414 ifh, dfh, sdfh = self._writinghandles
2418 ifh, dfh, sdfh = self._writinghandles
2415 # XXX no checking for the sidedata file
2419 # XXX no checking for the sidedata file
2416 if self._inline:
2420 if self._inline:
2417 # offset is "as if" it were in the .d file, so we need to add on
2421 # offset is "as if" it were in the .d file, so we need to add on
2418 # the size of the entry metadata.
2422 # the size of the entry metadata.
2419 self._concurrencychecker(
2423 self._concurrencychecker(
2420 ifh, self._indexfile, offset + curr * self.index.entry_size
2424 ifh, self._indexfile, offset + curr * self.index.entry_size
2421 )
2425 )
2422 else:
2426 else:
2423 # Entries in the .i are a consistent size.
2427 # Entries in the .i are a consistent size.
2424 self._concurrencychecker(
2428 self._concurrencychecker(
2425 ifh, self._indexfile, curr * self.index.entry_size
2429 ifh, self._indexfile, curr * self.index.entry_size
2426 )
2430 )
2427 self._concurrencychecker(dfh, self._datafile, offset)
2431 self._concurrencychecker(dfh, self._datafile, offset)
2428
2432
2429 p1r, p2r = self.rev(p1), self.rev(p2)
2433 p1r, p2r = self.rev(p1), self.rev(p2)
2430
2434
2431 # full versions are inserted when the needed deltas
2435 # full versions are inserted when the needed deltas
2432 # become comparable to the uncompressed text
2436 # become comparable to the uncompressed text
2433 if rawtext is None:
2437 if rawtext is None:
2434 # need rawtext size, before changed by flag processors, which is
2438 # need rawtext size, before changed by flag processors, which is
2435 # the non-raw size. use revlog explicitly to avoid filelog's extra
2439 # the non-raw size. use revlog explicitly to avoid filelog's extra
2436 # logic that might remove metadata size.
2440 # logic that might remove metadata size.
2437 textlen = mdiff.patchedsize(
2441 textlen = mdiff.patchedsize(
2438 revlog.size(self, cachedelta[0]), cachedelta[1]
2442 revlog.size(self, cachedelta[0]), cachedelta[1]
2439 )
2443 )
2440 else:
2444 else:
2441 textlen = len(rawtext)
2445 textlen = len(rawtext)
2442
2446
2443 if deltacomputer is None:
2447 if deltacomputer is None:
2444 write_debug = None
2448 write_debug = None
2445 if self._debug_delta:
2449 if self._debug_delta:
2446 write_debug = transaction._report
2450 write_debug = transaction._report
2447 deltacomputer = deltautil.deltacomputer(
2451 deltacomputer = deltautil.deltacomputer(
2448 self, write_debug=write_debug
2452 self, write_debug=write_debug
2449 )
2453 )
2450
2454
2451 revinfo = revlogutils.revisioninfo(
2455 revinfo = revlogutils.revisioninfo(
2452 node,
2456 node,
2453 p1,
2457 p1,
2454 p2,
2458 p2,
2455 btext,
2459 btext,
2456 textlen,
2460 textlen,
2457 cachedelta,
2461 cachedelta,
2458 flags,
2462 flags,
2459 )
2463 )
2460
2464
2461 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2465 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2462
2466
2463 compression_mode = COMP_MODE_INLINE
2467 compression_mode = COMP_MODE_INLINE
2464 if self._docket is not None:
2468 if self._docket is not None:
2465 default_comp = self._docket.default_compression_header
2469 default_comp = self._docket.default_compression_header
2466 r = deltautil.delta_compression(default_comp, deltainfo)
2470 r = deltautil.delta_compression(default_comp, deltainfo)
2467 compression_mode, deltainfo = r
2471 compression_mode, deltainfo = r
2468
2472
2469 sidedata_compression_mode = COMP_MODE_INLINE
2473 sidedata_compression_mode = COMP_MODE_INLINE
2470 if sidedata and self.hassidedata:
2474 if sidedata and self.hassidedata:
2471 sidedata_compression_mode = COMP_MODE_PLAIN
2475 sidedata_compression_mode = COMP_MODE_PLAIN
2472 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2476 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2473 sidedata_offset = self._docket.sidedata_end
2477 sidedata_offset = self._docket.sidedata_end
2474 h, comp_sidedata = self.compress(serialized_sidedata)
2478 h, comp_sidedata = self.compress(serialized_sidedata)
2475 if (
2479 if (
2476 h != b'u'
2480 h != b'u'
2477 and comp_sidedata[0:1] != b'\0'
2481 and comp_sidedata[0:1] != b'\0'
2478 and len(comp_sidedata) < len(serialized_sidedata)
2482 and len(comp_sidedata) < len(serialized_sidedata)
2479 ):
2483 ):
2480 assert not h
2484 assert not h
2481 if (
2485 if (
2482 comp_sidedata[0:1]
2486 comp_sidedata[0:1]
2483 == self._docket.default_compression_header
2487 == self._docket.default_compression_header
2484 ):
2488 ):
2485 sidedata_compression_mode = COMP_MODE_DEFAULT
2489 sidedata_compression_mode = COMP_MODE_DEFAULT
2486 serialized_sidedata = comp_sidedata
2490 serialized_sidedata = comp_sidedata
2487 else:
2491 else:
2488 sidedata_compression_mode = COMP_MODE_INLINE
2492 sidedata_compression_mode = COMP_MODE_INLINE
2489 serialized_sidedata = comp_sidedata
2493 serialized_sidedata = comp_sidedata
2490 else:
2494 else:
2491 serialized_sidedata = b""
2495 serialized_sidedata = b""
2492 # Don't store the offset if the sidedata is empty, that way
2496 # Don't store the offset if the sidedata is empty, that way
2493 # we can easily detect empty sidedata and they will be no different
2497 # we can easily detect empty sidedata and they will be no different
2494 # than ones we manually add.
2498 # than ones we manually add.
2495 sidedata_offset = 0
2499 sidedata_offset = 0
2496
2500
2497 rank = RANK_UNKNOWN
2501 rank = RANK_UNKNOWN
2498 if self._format_version == CHANGELOGV2:
2502 if self._format_version == CHANGELOGV2:
2499 if (p1r, p2r) == (nullrev, nullrev):
2503 if (p1r, p2r) == (nullrev, nullrev):
2500 rank = 1
2504 rank = 1
2501 elif p1r != nullrev and p2r == nullrev:
2505 elif p1r != nullrev and p2r == nullrev:
2502 rank = 1 + self.fast_rank(p1r)
2506 rank = 1 + self.fast_rank(p1r)
2503 elif p1r == nullrev and p2r != nullrev:
2507 elif p1r == nullrev and p2r != nullrev:
2504 rank = 1 + self.fast_rank(p2r)
2508 rank = 1 + self.fast_rank(p2r)
2505 else: # merge node
2509 else: # merge node
2506 if rustdagop is not None and self.index.rust_ext_compat:
2510 if rustdagop is not None and self.index.rust_ext_compat:
2507 rank = rustdagop.rank(self.index, p1r, p2r)
2511 rank = rustdagop.rank(self.index, p1r, p2r)
2508 else:
2512 else:
2509 pmin, pmax = sorted((p1r, p2r))
2513 pmin, pmax = sorted((p1r, p2r))
2510 rank = 1 + self.fast_rank(pmax)
2514 rank = 1 + self.fast_rank(pmax)
2511 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2515 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2512
2516
2513 e = revlogutils.entry(
2517 e = revlogutils.entry(
2514 flags=flags,
2518 flags=flags,
2515 data_offset=offset,
2519 data_offset=offset,
2516 data_compressed_length=deltainfo.deltalen,
2520 data_compressed_length=deltainfo.deltalen,
2517 data_uncompressed_length=textlen,
2521 data_uncompressed_length=textlen,
2518 data_compression_mode=compression_mode,
2522 data_compression_mode=compression_mode,
2519 data_delta_base=deltainfo.base,
2523 data_delta_base=deltainfo.base,
2520 link_rev=link,
2524 link_rev=link,
2521 parent_rev_1=p1r,
2525 parent_rev_1=p1r,
2522 parent_rev_2=p2r,
2526 parent_rev_2=p2r,
2523 node_id=node,
2527 node_id=node,
2524 sidedata_offset=sidedata_offset,
2528 sidedata_offset=sidedata_offset,
2525 sidedata_compressed_length=len(serialized_sidedata),
2529 sidedata_compressed_length=len(serialized_sidedata),
2526 sidedata_compression_mode=sidedata_compression_mode,
2530 sidedata_compression_mode=sidedata_compression_mode,
2527 rank=rank,
2531 rank=rank,
2528 )
2532 )
2529
2533
2530 self.index.append(e)
2534 self.index.append(e)
2531 entry = self.index.entry_binary(curr)
2535 entry = self.index.entry_binary(curr)
2532 if curr == 0 and self._docket is None:
2536 if curr == 0 and self._docket is None:
2533 header = self._format_flags | self._format_version
2537 header = self._format_flags | self._format_version
2534 header = self.index.pack_header(header)
2538 header = self.index.pack_header(header)
2535 entry = header + entry
2539 entry = header + entry
2536 self._writeentry(
2540 self._writeentry(
2537 transaction,
2541 transaction,
2538 entry,
2542 entry,
2539 deltainfo.data,
2543 deltainfo.data,
2540 link,
2544 link,
2541 offset,
2545 offset,
2542 serialized_sidedata,
2546 serialized_sidedata,
2543 sidedata_offset,
2547 sidedata_offset,
2544 )
2548 )
2545
2549
2546 rawtext = btext[0]
2550 rawtext = btext[0]
2547
2551
2548 if alwayscache and rawtext is None:
2552 if alwayscache and rawtext is None:
2549 rawtext = deltacomputer.buildtext(revinfo, fh)
2553 rawtext = deltacomputer.buildtext(revinfo, fh)
2550
2554
2551 if type(rawtext) == bytes: # only accept immutable objects
2555 if type(rawtext) == bytes: # only accept immutable objects
2552 self._revisioncache = (node, curr, rawtext)
2556 self._revisioncache = (node, curr, rawtext)
2553 self._chainbasecache[curr] = deltainfo.chainbase
2557 self._chainbasecache[curr] = deltainfo.chainbase
2554 return curr
2558 return curr
2555
2559
2556 def _get_data_offset(self, prev):
2560 def _get_data_offset(self, prev):
2557 """Returns the current offset in the (in-transaction) data file.
2561 """Returns the current offset in the (in-transaction) data file.
2558 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2562 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2559 file to store that information: since sidedata can be rewritten to the
2563 file to store that information: since sidedata can be rewritten to the
2560 end of the data file within a transaction, you can have cases where, for
2564 end of the data file within a transaction, you can have cases where, for
2561 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2565 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2562 to `n - 1`'s sidedata being written after `n`'s data.
2566 to `n - 1`'s sidedata being written after `n`'s data.
2563
2567
2564 TODO cache this in a docket file before getting out of experimental."""
2568 TODO cache this in a docket file before getting out of experimental."""
2565 if self._docket is None:
2569 if self._docket is None:
2566 return self.end(prev)
2570 return self.end(prev)
2567 else:
2571 else:
2568 return self._docket.data_end
2572 return self._docket.data_end
2569
2573
2570 def _writeentry(
2574 def _writeentry(
2571 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2575 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2572 ):
2576 ):
2573 # Files opened in a+ mode have inconsistent behavior on various
2577 # Files opened in a+ mode have inconsistent behavior on various
2574 # platforms. Windows requires that a file positioning call be made
2578 # platforms. Windows requires that a file positioning call be made
2575 # when the file handle transitions between reads and writes. See
2579 # when the file handle transitions between reads and writes. See
2576 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2580 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2577 # platforms, Python or the platform itself can be buggy. Some versions
2581 # platforms, Python or the platform itself can be buggy. Some versions
2578 # of Solaris have been observed to not append at the end of the file
2582 # of Solaris have been observed to not append at the end of the file
2579 # if the file was seeked to before the end. See issue4943 for more.
2583 # if the file was seeked to before the end. See issue4943 for more.
2580 #
2584 #
2581 # We work around this issue by inserting a seek() before writing.
2585 # We work around this issue by inserting a seek() before writing.
2582 # Note: This is likely not necessary on Python 3. However, because
2586 # Note: This is likely not necessary on Python 3. However, because
2583 # the file handle is reused for reads and may be seeked there, we need
2587 # the file handle is reused for reads and may be seeked there, we need
2584 # to be careful before changing this.
2588 # to be careful before changing this.
2585 if self._writinghandles is None:
2589 if self._writinghandles is None:
2586 msg = b'adding revision outside `revlog._writing` context'
2590 msg = b'adding revision outside `revlog._writing` context'
2587 raise error.ProgrammingError(msg)
2591 raise error.ProgrammingError(msg)
2588 ifh, dfh, sdfh = self._writinghandles
2592 ifh, dfh, sdfh = self._writinghandles
2589 if self._docket is None:
2593 if self._docket is None:
2590 ifh.seek(0, os.SEEK_END)
2594 ifh.seek(0, os.SEEK_END)
2591 else:
2595 else:
2592 ifh.seek(self._docket.index_end, os.SEEK_SET)
2596 ifh.seek(self._docket.index_end, os.SEEK_SET)
2593 if dfh:
2597 if dfh:
2594 if self._docket is None:
2598 if self._docket is None:
2595 dfh.seek(0, os.SEEK_END)
2599 dfh.seek(0, os.SEEK_END)
2596 else:
2600 else:
2597 dfh.seek(self._docket.data_end, os.SEEK_SET)
2601 dfh.seek(self._docket.data_end, os.SEEK_SET)
2598 if sdfh:
2602 if sdfh:
2599 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2603 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2600
2604
2601 curr = len(self) - 1
2605 curr = len(self) - 1
2602 if not self._inline:
2606 if not self._inline:
2603 transaction.add(self._datafile, offset)
2607 transaction.add(self._datafile, offset)
2604 if self._sidedatafile:
2608 if self._sidedatafile:
2605 transaction.add(self._sidedatafile, sidedata_offset)
2609 transaction.add(self._sidedatafile, sidedata_offset)
2606 transaction.add(self._indexfile, curr * len(entry))
2610 transaction.add(self._indexfile, curr * len(entry))
2607 if data[0]:
2611 if data[0]:
2608 dfh.write(data[0])
2612 dfh.write(data[0])
2609 dfh.write(data[1])
2613 dfh.write(data[1])
2610 if sidedata:
2614 if sidedata:
2611 sdfh.write(sidedata)
2615 sdfh.write(sidedata)
2612 ifh.write(entry)
2616 ifh.write(entry)
2613 else:
2617 else:
2614 offset += curr * self.index.entry_size
2618 offset += curr * self.index.entry_size
2615 transaction.add(self._indexfile, offset)
2619 transaction.add(self._indexfile, offset)
2616 ifh.write(entry)
2620 ifh.write(entry)
2617 ifh.write(data[0])
2621 ifh.write(data[0])
2618 ifh.write(data[1])
2622 ifh.write(data[1])
2619 assert not sidedata
2623 assert not sidedata
2620 self._enforceinlinesize(transaction)
2624 self._enforceinlinesize(transaction)
2621 if self._docket is not None:
2625 if self._docket is not None:
2622 # revlog-v2 always has 3 writing handles, help Pytype
2626 # revlog-v2 always has 3 writing handles, help Pytype
2623 wh1 = self._writinghandles[0]
2627 wh1 = self._writinghandles[0]
2624 wh2 = self._writinghandles[1]
2628 wh2 = self._writinghandles[1]
2625 wh3 = self._writinghandles[2]
2629 wh3 = self._writinghandles[2]
2626 assert wh1 is not None
2630 assert wh1 is not None
2627 assert wh2 is not None
2631 assert wh2 is not None
2628 assert wh3 is not None
2632 assert wh3 is not None
2629 self._docket.index_end = wh1.tell()
2633 self._docket.index_end = wh1.tell()
2630 self._docket.data_end = wh2.tell()
2634 self._docket.data_end = wh2.tell()
2631 self._docket.sidedata_end = wh3.tell()
2635 self._docket.sidedata_end = wh3.tell()
2632
2636
2633 nodemaputil.setup_persistent_nodemap(transaction, self)
2637 nodemaputil.setup_persistent_nodemap(transaction, self)
2634
2638
2635 def addgroup(
2639 def addgroup(
2636 self,
2640 self,
2637 deltas,
2641 deltas,
2638 linkmapper,
2642 linkmapper,
2639 transaction,
2643 transaction,
2640 alwayscache=False,
2644 alwayscache=False,
2641 addrevisioncb=None,
2645 addrevisioncb=None,
2642 duplicaterevisioncb=None,
2646 duplicaterevisioncb=None,
2643 debug_info=None,
2647 debug_info=None,
2644 ):
2648 ):
2645 """
2649 """
2646 add a delta group
2650 add a delta group
2647
2651
2648 given a set of deltas, add them to the revision log. the
2652 given a set of deltas, add them to the revision log. the
2649 first delta is against its parent, which should be in our
2653 first delta is against its parent, which should be in our
2650 log, the rest are against the previous delta.
2654 log, the rest are against the previous delta.
2651
2655
2652 If ``addrevisioncb`` is defined, it will be called with arguments of
2656 If ``addrevisioncb`` is defined, it will be called with arguments of
2653 this revlog and the node that was added.
2657 this revlog and the node that was added.
2654 """
2658 """
2655
2659
2656 if self._adding_group:
2660 if self._adding_group:
2657 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2661 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2658
2662
2659 self._adding_group = True
2663 self._adding_group = True
2660 empty = True
2664 empty = True
2661 try:
2665 try:
2662 with self._writing(transaction):
2666 with self._writing(transaction):
2663 write_debug = None
2667 write_debug = None
2664 if self._debug_delta:
2668 if self._debug_delta:
2665 write_debug = transaction._report
2669 write_debug = transaction._report
2666 deltacomputer = deltautil.deltacomputer(
2670 deltacomputer = deltautil.deltacomputer(
2667 self,
2671 self,
2668 write_debug=write_debug,
2672 write_debug=write_debug,
2669 debug_info=debug_info,
2673 debug_info=debug_info,
2670 )
2674 )
2671 # loop through our set of deltas
2675 # loop through our set of deltas
2672 for data in deltas:
2676 for data in deltas:
2673 (
2677 (
2674 node,
2678 node,
2675 p1,
2679 p1,
2676 p2,
2680 p2,
2677 linknode,
2681 linknode,
2678 deltabase,
2682 deltabase,
2679 delta,
2683 delta,
2680 flags,
2684 flags,
2681 sidedata,
2685 sidedata,
2682 ) = data
2686 ) = data
2683 link = linkmapper(linknode)
2687 link = linkmapper(linknode)
2684 flags = flags or REVIDX_DEFAULT_FLAGS
2688 flags = flags or REVIDX_DEFAULT_FLAGS
2685
2689
2686 rev = self.index.get_rev(node)
2690 rev = self.index.get_rev(node)
2687 if rev is not None:
2691 if rev is not None:
2688 # this can happen if two branches make the same change
2692 # this can happen if two branches make the same change
2689 self._nodeduplicatecallback(transaction, rev)
2693 self._nodeduplicatecallback(transaction, rev)
2690 if duplicaterevisioncb:
2694 if duplicaterevisioncb:
2691 duplicaterevisioncb(self, rev)
2695 duplicaterevisioncb(self, rev)
2692 empty = False
2696 empty = False
2693 continue
2697 continue
2694
2698
2695 for p in (p1, p2):
2699 for p in (p1, p2):
2696 if not self.index.has_node(p):
2700 if not self.index.has_node(p):
2697 raise error.LookupError(
2701 raise error.LookupError(
2698 p, self.radix, _(b'unknown parent')
2702 p, self.radix, _(b'unknown parent')
2699 )
2703 )
2700
2704
2701 if not self.index.has_node(deltabase):
2705 if not self.index.has_node(deltabase):
2702 raise error.LookupError(
2706 raise error.LookupError(
2703 deltabase, self.display_id, _(b'unknown delta base')
2707 deltabase, self.display_id, _(b'unknown delta base')
2704 )
2708 )
2705
2709
2706 baserev = self.rev(deltabase)
2710 baserev = self.rev(deltabase)
2707
2711
2708 if baserev != nullrev and self.iscensored(baserev):
2712 if baserev != nullrev and self.iscensored(baserev):
2709 # if base is censored, delta must be full replacement in a
2713 # if base is censored, delta must be full replacement in a
2710 # single patch operation
2714 # single patch operation
2711 hlen = struct.calcsize(b">lll")
2715 hlen = struct.calcsize(b">lll")
2712 oldlen = self.rawsize(baserev)
2716 oldlen = self.rawsize(baserev)
2713 newlen = len(delta) - hlen
2717 newlen = len(delta) - hlen
2714 if delta[:hlen] != mdiff.replacediffheader(
2718 if delta[:hlen] != mdiff.replacediffheader(
2715 oldlen, newlen
2719 oldlen, newlen
2716 ):
2720 ):
2717 raise error.CensoredBaseError(
2721 raise error.CensoredBaseError(
2718 self.display_id, self.node(baserev)
2722 self.display_id, self.node(baserev)
2719 )
2723 )
2720
2724
2721 if not flags and self._peek_iscensored(baserev, delta):
2725 if not flags and self._peek_iscensored(baserev, delta):
2722 flags |= REVIDX_ISCENSORED
2726 flags |= REVIDX_ISCENSORED
2723
2727
2724 # We assume consumers of addrevisioncb will want to retrieve
2728 # We assume consumers of addrevisioncb will want to retrieve
2725 # the added revision, which will require a call to
2729 # the added revision, which will require a call to
2726 # revision(). revision() will fast path if there is a cache
2730 # revision(). revision() will fast path if there is a cache
2727 # hit. So, we tell _addrevision() to always cache in this case.
2731 # hit. So, we tell _addrevision() to always cache in this case.
2728 # We're only using addgroup() in the context of changegroup
2732 # We're only using addgroup() in the context of changegroup
2729 # generation so the revision data can always be handled as raw
2733 # generation so the revision data can always be handled as raw
2730 # by the flagprocessor.
2734 # by the flagprocessor.
2731 rev = self._addrevision(
2735 rev = self._addrevision(
2732 node,
2736 node,
2733 None,
2737 None,
2734 transaction,
2738 transaction,
2735 link,
2739 link,
2736 p1,
2740 p1,
2737 p2,
2741 p2,
2738 flags,
2742 flags,
2739 (baserev, delta),
2743 (baserev, delta),
2740 alwayscache=alwayscache,
2744 alwayscache=alwayscache,
2741 deltacomputer=deltacomputer,
2745 deltacomputer=deltacomputer,
2742 sidedata=sidedata,
2746 sidedata=sidedata,
2743 )
2747 )
2744
2748
2745 if addrevisioncb:
2749 if addrevisioncb:
2746 addrevisioncb(self, rev)
2750 addrevisioncb(self, rev)
2747 empty = False
2751 empty = False
2748 finally:
2752 finally:
2749 self._adding_group = False
2753 self._adding_group = False
2750 return not empty
2754 return not empty
2751
2755
2752 def iscensored(self, rev):
2756 def iscensored(self, rev):
2753 """Check if a file revision is censored."""
2757 """Check if a file revision is censored."""
2754 if not self._censorable:
2758 if not self._censorable:
2755 return False
2759 return False
2756
2760
2757 return self.flags(rev) & REVIDX_ISCENSORED
2761 return self.flags(rev) & REVIDX_ISCENSORED
2758
2762
2759 def _peek_iscensored(self, baserev, delta):
2763 def _peek_iscensored(self, baserev, delta):
2760 """Quickly check if a delta produces a censored revision."""
2764 """Quickly check if a delta produces a censored revision."""
2761 if not self._censorable:
2765 if not self._censorable:
2762 return False
2766 return False
2763
2767
2764 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2768 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2765
2769
2766 def getstrippoint(self, minlink):
2770 def getstrippoint(self, minlink):
2767 """find the minimum rev that must be stripped to strip the linkrev
2771 """find the minimum rev that must be stripped to strip the linkrev
2768
2772
2769 Returns a tuple containing the minimum rev and a set of all revs that
2773 Returns a tuple containing the minimum rev and a set of all revs that
2770 have linkrevs that will be broken by this strip.
2774 have linkrevs that will be broken by this strip.
2771 """
2775 """
2772 return storageutil.resolvestripinfo(
2776 return storageutil.resolvestripinfo(
2773 minlink,
2777 minlink,
2774 len(self) - 1,
2778 len(self) - 1,
2775 self.headrevs(),
2779 self.headrevs(),
2776 self.linkrev,
2780 self.linkrev,
2777 self.parentrevs,
2781 self.parentrevs,
2778 )
2782 )
2779
2783
2780 def strip(self, minlink, transaction):
2784 def strip(self, minlink, transaction):
2781 """truncate the revlog on the first revision with a linkrev >= minlink
2785 """truncate the revlog on the first revision with a linkrev >= minlink
2782
2786
2783 This function is called when we're stripping revision minlink and
2787 This function is called when we're stripping revision minlink and
2784 its descendants from the repository.
2788 its descendants from the repository.
2785
2789
2786 We have to remove all revisions with linkrev >= minlink, because
2790 We have to remove all revisions with linkrev >= minlink, because
2787 the equivalent changelog revisions will be renumbered after the
2791 the equivalent changelog revisions will be renumbered after the
2788 strip.
2792 strip.
2789
2793
2790 So we truncate the revlog on the first of these revisions, and
2794 So we truncate the revlog on the first of these revisions, and
2791 trust that the caller has saved the revisions that shouldn't be
2795 trust that the caller has saved the revisions that shouldn't be
2792 removed and that it'll re-add them after this truncation.
2796 removed and that it'll re-add them after this truncation.
2793 """
2797 """
2794 if len(self) == 0:
2798 if len(self) == 0:
2795 return
2799 return
2796
2800
2797 rev, _ = self.getstrippoint(minlink)
2801 rev, _ = self.getstrippoint(minlink)
2798 if rev == len(self):
2802 if rev == len(self):
2799 return
2803 return
2800
2804
2801 # first truncate the files on disk
2805 # first truncate the files on disk
2802 data_end = self.start(rev)
2806 data_end = self.start(rev)
2803 if not self._inline:
2807 if not self._inline:
2804 transaction.add(self._datafile, data_end)
2808 transaction.add(self._datafile, data_end)
2805 end = rev * self.index.entry_size
2809 end = rev * self.index.entry_size
2806 else:
2810 else:
2807 end = data_end + (rev * self.index.entry_size)
2811 end = data_end + (rev * self.index.entry_size)
2808
2812
2809 if self._sidedatafile:
2813 if self._sidedatafile:
2810 sidedata_end = self.sidedata_cut_off(rev)
2814 sidedata_end = self.sidedata_cut_off(rev)
2811 transaction.add(self._sidedatafile, sidedata_end)
2815 transaction.add(self._sidedatafile, sidedata_end)
2812
2816
2813 transaction.add(self._indexfile, end)
2817 transaction.add(self._indexfile, end)
2814 if self._docket is not None:
2818 if self._docket is not None:
2815 # XXX we could, leverage the docket while stripping. However it is
2819 # XXX we could, leverage the docket while stripping. However it is
2816 # not powerfull enough at the time of this comment
2820 # not powerfull enough at the time of this comment
2817 self._docket.index_end = end
2821 self._docket.index_end = end
2818 self._docket.data_end = data_end
2822 self._docket.data_end = data_end
2819 self._docket.sidedata_end = sidedata_end
2823 self._docket.sidedata_end = sidedata_end
2820 self._docket.write(transaction, stripping=True)
2824 self._docket.write(transaction, stripping=True)
2821
2825
2822 # then reset internal state in memory to forget those revisions
2826 # then reset internal state in memory to forget those revisions
2823 self._revisioncache = None
2827 self._revisioncache = None
2824 self._chaininfocache = util.lrucachedict(500)
2828 self._chaininfocache = util.lrucachedict(500)
2825 self._segmentfile.clear_cache()
2829 self._segmentfile.clear_cache()
2826 self._segmentfile_sidedata.clear_cache()
2830 self._segmentfile_sidedata.clear_cache()
2827
2831
2828 del self.index[rev:-1]
2832 del self.index[rev:-1]
2829
2833
2830 def checksize(self):
2834 def checksize(self):
2831 """Check size of index and data files
2835 """Check size of index and data files
2832
2836
2833 return a (dd, di) tuple.
2837 return a (dd, di) tuple.
2834 - dd: extra bytes for the "data" file
2838 - dd: extra bytes for the "data" file
2835 - di: extra bytes for the "index" file
2839 - di: extra bytes for the "index" file
2836
2840
2837 A healthy revlog will return (0, 0).
2841 A healthy revlog will return (0, 0).
2838 """
2842 """
2839 expected = 0
2843 expected = 0
2840 if len(self):
2844 if len(self):
2841 expected = max(0, self.end(len(self) - 1))
2845 expected = max(0, self.end(len(self) - 1))
2842
2846
2843 try:
2847 try:
2844 with self._datafp() as f:
2848 with self._datafp() as f:
2845 f.seek(0, io.SEEK_END)
2849 f.seek(0, io.SEEK_END)
2846 actual = f.tell()
2850 actual = f.tell()
2847 dd = actual - expected
2851 dd = actual - expected
2848 except FileNotFoundError:
2852 except FileNotFoundError:
2849 dd = 0
2853 dd = 0
2850
2854
2851 try:
2855 try:
2852 f = self.opener(self._indexfile)
2856 f = self.opener(self._indexfile)
2853 f.seek(0, io.SEEK_END)
2857 f.seek(0, io.SEEK_END)
2854 actual = f.tell()
2858 actual = f.tell()
2855 f.close()
2859 f.close()
2856 s = self.index.entry_size
2860 s = self.index.entry_size
2857 i = max(0, actual // s)
2861 i = max(0, actual // s)
2858 di = actual - (i * s)
2862 di = actual - (i * s)
2859 if self._inline:
2863 if self._inline:
2860 databytes = 0
2864 databytes = 0
2861 for r in self:
2865 for r in self:
2862 databytes += max(0, self.length(r))
2866 databytes += max(0, self.length(r))
2863 dd = 0
2867 dd = 0
2864 di = actual - len(self) * s - databytes
2868 di = actual - len(self) * s - databytes
2865 except FileNotFoundError:
2869 except FileNotFoundError:
2866 di = 0
2870 di = 0
2867
2871
2868 return (dd, di)
2872 return (dd, di)
2869
2873
2870 def files(self):
2874 def files(self):
2871 res = [self._indexfile]
2875 res = [self._indexfile]
2872 if self._docket_file is None:
2876 if self._docket_file is None:
2873 if not self._inline:
2877 if not self._inline:
2874 res.append(self._datafile)
2878 res.append(self._datafile)
2875 else:
2879 else:
2876 res.append(self._docket_file)
2880 res.append(self._docket_file)
2877 res.extend(self._docket.old_index_filepaths(include_empty=False))
2881 res.extend(self._docket.old_index_filepaths(include_empty=False))
2878 if self._docket.data_end:
2882 if self._docket.data_end:
2879 res.append(self._datafile)
2883 res.append(self._datafile)
2880 res.extend(self._docket.old_data_filepaths(include_empty=False))
2884 res.extend(self._docket.old_data_filepaths(include_empty=False))
2881 if self._docket.sidedata_end:
2885 if self._docket.sidedata_end:
2882 res.append(self._sidedatafile)
2886 res.append(self._sidedatafile)
2883 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2887 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
2884 return res
2888 return res
2885
2889
2886 def emitrevisions(
2890 def emitrevisions(
2887 self,
2891 self,
2888 nodes,
2892 nodes,
2889 nodesorder=None,
2893 nodesorder=None,
2890 revisiondata=False,
2894 revisiondata=False,
2891 assumehaveparentrevisions=False,
2895 assumehaveparentrevisions=False,
2892 deltamode=repository.CG_DELTAMODE_STD,
2896 deltamode=repository.CG_DELTAMODE_STD,
2893 sidedata_helpers=None,
2897 sidedata_helpers=None,
2894 debug_info=None,
2898 debug_info=None,
2895 ):
2899 ):
2896 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2900 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2897 raise error.ProgrammingError(
2901 raise error.ProgrammingError(
2898 b'unhandled value for nodesorder: %s' % nodesorder
2902 b'unhandled value for nodesorder: %s' % nodesorder
2899 )
2903 )
2900
2904
2901 if nodesorder is None and not self._generaldelta:
2905 if nodesorder is None and not self._generaldelta:
2902 nodesorder = b'storage'
2906 nodesorder = b'storage'
2903
2907
2904 if (
2908 if (
2905 not self._storedeltachains
2909 not self._storedeltachains
2906 and deltamode != repository.CG_DELTAMODE_PREV
2910 and deltamode != repository.CG_DELTAMODE_PREV
2907 ):
2911 ):
2908 deltamode = repository.CG_DELTAMODE_FULL
2912 deltamode = repository.CG_DELTAMODE_FULL
2909
2913
2910 return storageutil.emitrevisions(
2914 return storageutil.emitrevisions(
2911 self,
2915 self,
2912 nodes,
2916 nodes,
2913 nodesorder,
2917 nodesorder,
2914 revlogrevisiondelta,
2918 revlogrevisiondelta,
2915 deltaparentfn=self.deltaparent,
2919 deltaparentfn=self.deltaparent,
2916 candeltafn=self.candelta,
2920 candeltafn=self.candelta,
2917 rawsizefn=self.rawsize,
2921 rawsizefn=self.rawsize,
2918 revdifffn=self.revdiff,
2922 revdifffn=self.revdiff,
2919 flagsfn=self.flags,
2923 flagsfn=self.flags,
2920 deltamode=deltamode,
2924 deltamode=deltamode,
2921 revisiondata=revisiondata,
2925 revisiondata=revisiondata,
2922 assumehaveparentrevisions=assumehaveparentrevisions,
2926 assumehaveparentrevisions=assumehaveparentrevisions,
2923 sidedata_helpers=sidedata_helpers,
2927 sidedata_helpers=sidedata_helpers,
2924 debug_info=debug_info,
2928 debug_info=debug_info,
2925 )
2929 )
2926
2930
2927 DELTAREUSEALWAYS = b'always'
2931 DELTAREUSEALWAYS = b'always'
2928 DELTAREUSESAMEREVS = b'samerevs'
2932 DELTAREUSESAMEREVS = b'samerevs'
2929 DELTAREUSENEVER = b'never'
2933 DELTAREUSENEVER = b'never'
2930
2934
2931 DELTAREUSEFULLADD = b'fulladd'
2935 DELTAREUSEFULLADD = b'fulladd'
2932
2936
2933 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2937 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2934
2938
2935 def clone(
2939 def clone(
2936 self,
2940 self,
2937 tr,
2941 tr,
2938 destrevlog,
2942 destrevlog,
2939 addrevisioncb=None,
2943 addrevisioncb=None,
2940 deltareuse=DELTAREUSESAMEREVS,
2944 deltareuse=DELTAREUSESAMEREVS,
2941 forcedeltabothparents=None,
2945 forcedeltabothparents=None,
2942 sidedata_helpers=None,
2946 sidedata_helpers=None,
2943 ):
2947 ):
2944 """Copy this revlog to another, possibly with format changes.
2948 """Copy this revlog to another, possibly with format changes.
2945
2949
2946 The destination revlog will contain the same revisions and nodes.
2950 The destination revlog will contain the same revisions and nodes.
2947 However, it may not be bit-for-bit identical due to e.g. delta encoding
2951 However, it may not be bit-for-bit identical due to e.g. delta encoding
2948 differences.
2952 differences.
2949
2953
2950 The ``deltareuse`` argument control how deltas from the existing revlog
2954 The ``deltareuse`` argument control how deltas from the existing revlog
2951 are preserved in the destination revlog. The argument can have the
2955 are preserved in the destination revlog. The argument can have the
2952 following values:
2956 following values:
2953
2957
2954 DELTAREUSEALWAYS
2958 DELTAREUSEALWAYS
2955 Deltas will always be reused (if possible), even if the destination
2959 Deltas will always be reused (if possible), even if the destination
2956 revlog would not select the same revisions for the delta. This is the
2960 revlog would not select the same revisions for the delta. This is the
2957 fastest mode of operation.
2961 fastest mode of operation.
2958 DELTAREUSESAMEREVS
2962 DELTAREUSESAMEREVS
2959 Deltas will be reused if the destination revlog would pick the same
2963 Deltas will be reused if the destination revlog would pick the same
2960 revisions for the delta. This mode strikes a balance between speed
2964 revisions for the delta. This mode strikes a balance between speed
2961 and optimization.
2965 and optimization.
2962 DELTAREUSENEVER
2966 DELTAREUSENEVER
2963 Deltas will never be reused. This is the slowest mode of execution.
2967 Deltas will never be reused. This is the slowest mode of execution.
2964 This mode can be used to recompute deltas (e.g. if the diff/delta
2968 This mode can be used to recompute deltas (e.g. if the diff/delta
2965 algorithm changes).
2969 algorithm changes).
2966 DELTAREUSEFULLADD
2970 DELTAREUSEFULLADD
2967 Revision will be re-added as if their were new content. This is
2971 Revision will be re-added as if their were new content. This is
2968 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2972 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2969 eg: large file detection and handling.
2973 eg: large file detection and handling.
2970
2974
2971 Delta computation can be slow, so the choice of delta reuse policy can
2975 Delta computation can be slow, so the choice of delta reuse policy can
2972 significantly affect run time.
2976 significantly affect run time.
2973
2977
2974 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2978 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2975 two extremes. Deltas will be reused if they are appropriate. But if the
2979 two extremes. Deltas will be reused if they are appropriate. But if the
2976 delta could choose a better revision, it will do so. This means if you
2980 delta could choose a better revision, it will do so. This means if you
2977 are converting a non-generaldelta revlog to a generaldelta revlog,
2981 are converting a non-generaldelta revlog to a generaldelta revlog,
2978 deltas will be recomputed if the delta's parent isn't a parent of the
2982 deltas will be recomputed if the delta's parent isn't a parent of the
2979 revision.
2983 revision.
2980
2984
2981 In addition to the delta policy, the ``forcedeltabothparents``
2985 In addition to the delta policy, the ``forcedeltabothparents``
2982 argument controls whether to force compute deltas against both parents
2986 argument controls whether to force compute deltas against both parents
2983 for merges. By default, the current default is used.
2987 for merges. By default, the current default is used.
2984
2988
2985 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2989 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2986 `sidedata_helpers`.
2990 `sidedata_helpers`.
2987 """
2991 """
2988 if deltareuse not in self.DELTAREUSEALL:
2992 if deltareuse not in self.DELTAREUSEALL:
2989 raise ValueError(
2993 raise ValueError(
2990 _(b'value for deltareuse invalid: %s') % deltareuse
2994 _(b'value for deltareuse invalid: %s') % deltareuse
2991 )
2995 )
2992
2996
2993 if len(destrevlog):
2997 if len(destrevlog):
2994 raise ValueError(_(b'destination revlog is not empty'))
2998 raise ValueError(_(b'destination revlog is not empty'))
2995
2999
2996 if getattr(self, 'filteredrevs', None):
3000 if getattr(self, 'filteredrevs', None):
2997 raise ValueError(_(b'source revlog has filtered revisions'))
3001 raise ValueError(_(b'source revlog has filtered revisions'))
2998 if getattr(destrevlog, 'filteredrevs', None):
3002 if getattr(destrevlog, 'filteredrevs', None):
2999 raise ValueError(_(b'destination revlog has filtered revisions'))
3003 raise ValueError(_(b'destination revlog has filtered revisions'))
3000
3004
3001 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3005 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3002 # if possible.
3006 # if possible.
3003 oldlazydelta = destrevlog._lazydelta
3007 oldlazydelta = destrevlog._lazydelta
3004 oldlazydeltabase = destrevlog._lazydeltabase
3008 oldlazydeltabase = destrevlog._lazydeltabase
3005 oldamd = destrevlog._deltabothparents
3009 oldamd = destrevlog._deltabothparents
3006
3010
3007 try:
3011 try:
3008 if deltareuse == self.DELTAREUSEALWAYS:
3012 if deltareuse == self.DELTAREUSEALWAYS:
3009 destrevlog._lazydeltabase = True
3013 destrevlog._lazydeltabase = True
3010 destrevlog._lazydelta = True
3014 destrevlog._lazydelta = True
3011 elif deltareuse == self.DELTAREUSESAMEREVS:
3015 elif deltareuse == self.DELTAREUSESAMEREVS:
3012 destrevlog._lazydeltabase = False
3016 destrevlog._lazydeltabase = False
3013 destrevlog._lazydelta = True
3017 destrevlog._lazydelta = True
3014 elif deltareuse == self.DELTAREUSENEVER:
3018 elif deltareuse == self.DELTAREUSENEVER:
3015 destrevlog._lazydeltabase = False
3019 destrevlog._lazydeltabase = False
3016 destrevlog._lazydelta = False
3020 destrevlog._lazydelta = False
3017
3021
3018 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3022 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3019
3023
3020 self._clone(
3024 self._clone(
3021 tr,
3025 tr,
3022 destrevlog,
3026 destrevlog,
3023 addrevisioncb,
3027 addrevisioncb,
3024 deltareuse,
3028 deltareuse,
3025 forcedeltabothparents,
3029 forcedeltabothparents,
3026 sidedata_helpers,
3030 sidedata_helpers,
3027 )
3031 )
3028
3032
3029 finally:
3033 finally:
3030 destrevlog._lazydelta = oldlazydelta
3034 destrevlog._lazydelta = oldlazydelta
3031 destrevlog._lazydeltabase = oldlazydeltabase
3035 destrevlog._lazydeltabase = oldlazydeltabase
3032 destrevlog._deltabothparents = oldamd
3036 destrevlog._deltabothparents = oldamd
3033
3037
3034 def _clone(
3038 def _clone(
3035 self,
3039 self,
3036 tr,
3040 tr,
3037 destrevlog,
3041 destrevlog,
3038 addrevisioncb,
3042 addrevisioncb,
3039 deltareuse,
3043 deltareuse,
3040 forcedeltabothparents,
3044 forcedeltabothparents,
3041 sidedata_helpers,
3045 sidedata_helpers,
3042 ):
3046 ):
3043 """perform the core duty of `revlog.clone` after parameter processing"""
3047 """perform the core duty of `revlog.clone` after parameter processing"""
3044 write_debug = None
3048 write_debug = None
3045 if self._debug_delta:
3049 if self._debug_delta:
3046 write_debug = tr._report
3050 write_debug = tr._report
3047 deltacomputer = deltautil.deltacomputer(
3051 deltacomputer = deltautil.deltacomputer(
3048 destrevlog,
3052 destrevlog,
3049 write_debug=write_debug,
3053 write_debug=write_debug,
3050 )
3054 )
3051 index = self.index
3055 index = self.index
3052 for rev in self:
3056 for rev in self:
3053 entry = index[rev]
3057 entry = index[rev]
3054
3058
3055 # Some classes override linkrev to take filtered revs into
3059 # Some classes override linkrev to take filtered revs into
3056 # account. Use raw entry from index.
3060 # account. Use raw entry from index.
3057 flags = entry[0] & 0xFFFF
3061 flags = entry[0] & 0xFFFF
3058 linkrev = entry[4]
3062 linkrev = entry[4]
3059 p1 = index[entry[5]][7]
3063 p1 = index[entry[5]][7]
3060 p2 = index[entry[6]][7]
3064 p2 = index[entry[6]][7]
3061 node = entry[7]
3065 node = entry[7]
3062
3066
3063 # (Possibly) reuse the delta from the revlog if allowed and
3067 # (Possibly) reuse the delta from the revlog if allowed and
3064 # the revlog chunk is a delta.
3068 # the revlog chunk is a delta.
3065 cachedelta = None
3069 cachedelta = None
3066 rawtext = None
3070 rawtext = None
3067 if deltareuse == self.DELTAREUSEFULLADD:
3071 if deltareuse == self.DELTAREUSEFULLADD:
3068 text = self._revisiondata(rev)
3072 text = self._revisiondata(rev)
3069 sidedata = self.sidedata(rev)
3073 sidedata = self.sidedata(rev)
3070
3074
3071 if sidedata_helpers is not None:
3075 if sidedata_helpers is not None:
3072 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3076 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3073 self, sidedata_helpers, sidedata, rev
3077 self, sidedata_helpers, sidedata, rev
3074 )
3078 )
3075 flags = flags | new_flags[0] & ~new_flags[1]
3079 flags = flags | new_flags[0] & ~new_flags[1]
3076
3080
3077 destrevlog.addrevision(
3081 destrevlog.addrevision(
3078 text,
3082 text,
3079 tr,
3083 tr,
3080 linkrev,
3084 linkrev,
3081 p1,
3085 p1,
3082 p2,
3086 p2,
3083 cachedelta=cachedelta,
3087 cachedelta=cachedelta,
3084 node=node,
3088 node=node,
3085 flags=flags,
3089 flags=flags,
3086 deltacomputer=deltacomputer,
3090 deltacomputer=deltacomputer,
3087 sidedata=sidedata,
3091 sidedata=sidedata,
3088 )
3092 )
3089 else:
3093 else:
3090 if destrevlog._lazydelta:
3094 if destrevlog._lazydelta:
3091 dp = self.deltaparent(rev)
3095 dp = self.deltaparent(rev)
3092 if dp != nullrev:
3096 if dp != nullrev:
3093 cachedelta = (dp, bytes(self._chunk(rev)))
3097 cachedelta = (dp, bytes(self._chunk(rev)))
3094
3098
3095 sidedata = None
3099 sidedata = None
3096 if not cachedelta:
3100 if not cachedelta:
3097 rawtext = self._revisiondata(rev)
3101 rawtext = self._revisiondata(rev)
3098 sidedata = self.sidedata(rev)
3102 sidedata = self.sidedata(rev)
3099 if sidedata is None:
3103 if sidedata is None:
3100 sidedata = self.sidedata(rev)
3104 sidedata = self.sidedata(rev)
3101
3105
3102 if sidedata_helpers is not None:
3106 if sidedata_helpers is not None:
3103 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3107 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3104 self, sidedata_helpers, sidedata, rev
3108 self, sidedata_helpers, sidedata, rev
3105 )
3109 )
3106 flags = flags | new_flags[0] & ~new_flags[1]
3110 flags = flags | new_flags[0] & ~new_flags[1]
3107
3111
3108 with destrevlog._writing(tr):
3112 with destrevlog._writing(tr):
3109 destrevlog._addrevision(
3113 destrevlog._addrevision(
3110 node,
3114 node,
3111 rawtext,
3115 rawtext,
3112 tr,
3116 tr,
3113 linkrev,
3117 linkrev,
3114 p1,
3118 p1,
3115 p2,
3119 p2,
3116 flags,
3120 flags,
3117 cachedelta,
3121 cachedelta,
3118 deltacomputer=deltacomputer,
3122 deltacomputer=deltacomputer,
3119 sidedata=sidedata,
3123 sidedata=sidedata,
3120 )
3124 )
3121
3125
3122 if addrevisioncb:
3126 if addrevisioncb:
3123 addrevisioncb(self, rev, node)
3127 addrevisioncb(self, rev, node)
3124
3128
3125 def censorrevision(self, tr, censornode, tombstone=b''):
3129 def censorrevision(self, tr, censornode, tombstone=b''):
3126 if self._format_version == REVLOGV0:
3130 if self._format_version == REVLOGV0:
3127 raise error.RevlogError(
3131 raise error.RevlogError(
3128 _(b'cannot censor with version %d revlogs')
3132 _(b'cannot censor with version %d revlogs')
3129 % self._format_version
3133 % self._format_version
3130 )
3134 )
3131 elif self._format_version == REVLOGV1:
3135 elif self._format_version == REVLOGV1:
3132 rewrite.v1_censor(self, tr, censornode, tombstone)
3136 rewrite.v1_censor(self, tr, censornode, tombstone)
3133 else:
3137 else:
3134 rewrite.v2_censor(self, tr, censornode, tombstone)
3138 rewrite.v2_censor(self, tr, censornode, tombstone)
3135
3139
3136 def verifyintegrity(self, state):
3140 def verifyintegrity(self, state):
3137 """Verifies the integrity of the revlog.
3141 """Verifies the integrity of the revlog.
3138
3142
3139 Yields ``revlogproblem`` instances describing problems that are
3143 Yields ``revlogproblem`` instances describing problems that are
3140 found.
3144 found.
3141 """
3145 """
3142 dd, di = self.checksize()
3146 dd, di = self.checksize()
3143 if dd:
3147 if dd:
3144 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3148 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3145 if di:
3149 if di:
3146 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3150 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3147
3151
3148 version = self._format_version
3152 version = self._format_version
3149
3153
3150 # The verifier tells us what version revlog we should be.
3154 # The verifier tells us what version revlog we should be.
3151 if version != state[b'expectedversion']:
3155 if version != state[b'expectedversion']:
3152 yield revlogproblem(
3156 yield revlogproblem(
3153 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3157 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3154 % (self.display_id, version, state[b'expectedversion'])
3158 % (self.display_id, version, state[b'expectedversion'])
3155 )
3159 )
3156
3160
3157 state[b'skipread'] = set()
3161 state[b'skipread'] = set()
3158 state[b'safe_renamed'] = set()
3162 state[b'safe_renamed'] = set()
3159
3163
3160 for rev in self:
3164 for rev in self:
3161 node = self.node(rev)
3165 node = self.node(rev)
3162
3166
3163 # Verify contents. 4 cases to care about:
3167 # Verify contents. 4 cases to care about:
3164 #
3168 #
3165 # common: the most common case
3169 # common: the most common case
3166 # rename: with a rename
3170 # rename: with a rename
3167 # meta: file content starts with b'\1\n', the metadata
3171 # meta: file content starts with b'\1\n', the metadata
3168 # header defined in filelog.py, but without a rename
3172 # header defined in filelog.py, but without a rename
3169 # ext: content stored externally
3173 # ext: content stored externally
3170 #
3174 #
3171 # More formally, their differences are shown below:
3175 # More formally, their differences are shown below:
3172 #
3176 #
3173 # | common | rename | meta | ext
3177 # | common | rename | meta | ext
3174 # -------------------------------------------------------
3178 # -------------------------------------------------------
3175 # flags() | 0 | 0 | 0 | not 0
3179 # flags() | 0 | 0 | 0 | not 0
3176 # renamed() | False | True | False | ?
3180 # renamed() | False | True | False | ?
3177 # rawtext[0:2]=='\1\n'| False | True | True | ?
3181 # rawtext[0:2]=='\1\n'| False | True | True | ?
3178 #
3182 #
3179 # "rawtext" means the raw text stored in revlog data, which
3183 # "rawtext" means the raw text stored in revlog data, which
3180 # could be retrieved by "rawdata(rev)". "text"
3184 # could be retrieved by "rawdata(rev)". "text"
3181 # mentioned below is "revision(rev)".
3185 # mentioned below is "revision(rev)".
3182 #
3186 #
3183 # There are 3 different lengths stored physically:
3187 # There are 3 different lengths stored physically:
3184 # 1. L1: rawsize, stored in revlog index
3188 # 1. L1: rawsize, stored in revlog index
3185 # 2. L2: len(rawtext), stored in revlog data
3189 # 2. L2: len(rawtext), stored in revlog data
3186 # 3. L3: len(text), stored in revlog data if flags==0, or
3190 # 3. L3: len(text), stored in revlog data if flags==0, or
3187 # possibly somewhere else if flags!=0
3191 # possibly somewhere else if flags!=0
3188 #
3192 #
3189 # L1 should be equal to L2. L3 could be different from them.
3193 # L1 should be equal to L2. L3 could be different from them.
3190 # "text" may or may not affect commit hash depending on flag
3194 # "text" may or may not affect commit hash depending on flag
3191 # processors (see flagutil.addflagprocessor).
3195 # processors (see flagutil.addflagprocessor).
3192 #
3196 #
3193 # | common | rename | meta | ext
3197 # | common | rename | meta | ext
3194 # -------------------------------------------------
3198 # -------------------------------------------------
3195 # rawsize() | L1 | L1 | L1 | L1
3199 # rawsize() | L1 | L1 | L1 | L1
3196 # size() | L1 | L2-LM | L1(*) | L1 (?)
3200 # size() | L1 | L2-LM | L1(*) | L1 (?)
3197 # len(rawtext) | L2 | L2 | L2 | L2
3201 # len(rawtext) | L2 | L2 | L2 | L2
3198 # len(text) | L2 | L2 | L2 | L3
3202 # len(text) | L2 | L2 | L2 | L3
3199 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3203 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3200 #
3204 #
3201 # LM: length of metadata, depending on rawtext
3205 # LM: length of metadata, depending on rawtext
3202 # (*): not ideal, see comment in filelog.size
3206 # (*): not ideal, see comment in filelog.size
3203 # (?): could be "- len(meta)" if the resolved content has
3207 # (?): could be "- len(meta)" if the resolved content has
3204 # rename metadata
3208 # rename metadata
3205 #
3209 #
3206 # Checks needed to be done:
3210 # Checks needed to be done:
3207 # 1. length check: L1 == L2, in all cases.
3211 # 1. length check: L1 == L2, in all cases.
3208 # 2. hash check: depending on flag processor, we may need to
3212 # 2. hash check: depending on flag processor, we may need to
3209 # use either "text" (external), or "rawtext" (in revlog).
3213 # use either "text" (external), or "rawtext" (in revlog).
3210
3214
3211 try:
3215 try:
3212 skipflags = state.get(b'skipflags', 0)
3216 skipflags = state.get(b'skipflags', 0)
3213 if skipflags:
3217 if skipflags:
3214 skipflags &= self.flags(rev)
3218 skipflags &= self.flags(rev)
3215
3219
3216 _verify_revision(self, skipflags, state, node)
3220 _verify_revision(self, skipflags, state, node)
3217
3221
3218 l1 = self.rawsize(rev)
3222 l1 = self.rawsize(rev)
3219 l2 = len(self.rawdata(node))
3223 l2 = len(self.rawdata(node))
3220
3224
3221 if l1 != l2:
3225 if l1 != l2:
3222 yield revlogproblem(
3226 yield revlogproblem(
3223 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3227 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3224 node=node,
3228 node=node,
3225 )
3229 )
3226
3230
3227 except error.CensoredNodeError:
3231 except error.CensoredNodeError:
3228 if state[b'erroroncensored']:
3232 if state[b'erroroncensored']:
3229 yield revlogproblem(
3233 yield revlogproblem(
3230 error=_(b'censored file data'), node=node
3234 error=_(b'censored file data'), node=node
3231 )
3235 )
3232 state[b'skipread'].add(node)
3236 state[b'skipread'].add(node)
3233 except Exception as e:
3237 except Exception as e:
3234 yield revlogproblem(
3238 yield revlogproblem(
3235 error=_(b'unpacking %s: %s')
3239 error=_(b'unpacking %s: %s')
3236 % (short(node), stringutil.forcebytestr(e)),
3240 % (short(node), stringutil.forcebytestr(e)),
3237 node=node,
3241 node=node,
3238 )
3242 )
3239 state[b'skipread'].add(node)
3243 state[b'skipread'].add(node)
3240
3244
3241 def storageinfo(
3245 def storageinfo(
3242 self,
3246 self,
3243 exclusivefiles=False,
3247 exclusivefiles=False,
3244 sharedfiles=False,
3248 sharedfiles=False,
3245 revisionscount=False,
3249 revisionscount=False,
3246 trackedsize=False,
3250 trackedsize=False,
3247 storedsize=False,
3251 storedsize=False,
3248 ):
3252 ):
3249 d = {}
3253 d = {}
3250
3254
3251 if exclusivefiles:
3255 if exclusivefiles:
3252 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3256 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3253 if not self._inline:
3257 if not self._inline:
3254 d[b'exclusivefiles'].append((self.opener, self._datafile))
3258 d[b'exclusivefiles'].append((self.opener, self._datafile))
3255
3259
3256 if sharedfiles:
3260 if sharedfiles:
3257 d[b'sharedfiles'] = []
3261 d[b'sharedfiles'] = []
3258
3262
3259 if revisionscount:
3263 if revisionscount:
3260 d[b'revisionscount'] = len(self)
3264 d[b'revisionscount'] = len(self)
3261
3265
3262 if trackedsize:
3266 if trackedsize:
3263 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3267 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3264
3268
3265 if storedsize:
3269 if storedsize:
3266 d[b'storedsize'] = sum(
3270 d[b'storedsize'] = sum(
3267 self.opener.stat(path).st_size for path in self.files()
3271 self.opener.stat(path).st_size for path in self.files()
3268 )
3272 )
3269
3273
3270 return d
3274 return d
3271
3275
3272 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3276 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3273 if not self.hassidedata:
3277 if not self.hassidedata:
3274 return
3278 return
3275 # revlog formats with sidedata support does not support inline
3279 # revlog formats with sidedata support does not support inline
3276 assert not self._inline
3280 assert not self._inline
3277 if not helpers[1] and not helpers[2]:
3281 if not helpers[1] and not helpers[2]:
3278 # Nothing to generate or remove
3282 # Nothing to generate or remove
3279 return
3283 return
3280
3284
3281 new_entries = []
3285 new_entries = []
3282 # append the new sidedata
3286 # append the new sidedata
3283 with self._writing(transaction):
3287 with self._writing(transaction):
3284 ifh, dfh, sdfh = self._writinghandles
3288 ifh, dfh, sdfh = self._writinghandles
3285 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3289 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3286
3290
3287 current_offset = sdfh.tell()
3291 current_offset = sdfh.tell()
3288 for rev in range(startrev, endrev + 1):
3292 for rev in range(startrev, endrev + 1):
3289 entry = self.index[rev]
3293 entry = self.index[rev]
3290 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3294 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3291 store=self,
3295 store=self,
3292 sidedata_helpers=helpers,
3296 sidedata_helpers=helpers,
3293 sidedata={},
3297 sidedata={},
3294 rev=rev,
3298 rev=rev,
3295 )
3299 )
3296
3300
3297 serialized_sidedata = sidedatautil.serialize_sidedata(
3301 serialized_sidedata = sidedatautil.serialize_sidedata(
3298 new_sidedata
3302 new_sidedata
3299 )
3303 )
3300
3304
3301 sidedata_compression_mode = COMP_MODE_INLINE
3305 sidedata_compression_mode = COMP_MODE_INLINE
3302 if serialized_sidedata and self.hassidedata:
3306 if serialized_sidedata and self.hassidedata:
3303 sidedata_compression_mode = COMP_MODE_PLAIN
3307 sidedata_compression_mode = COMP_MODE_PLAIN
3304 h, comp_sidedata = self.compress(serialized_sidedata)
3308 h, comp_sidedata = self.compress(serialized_sidedata)
3305 if (
3309 if (
3306 h != b'u'
3310 h != b'u'
3307 and comp_sidedata[0] != b'\0'
3311 and comp_sidedata[0] != b'\0'
3308 and len(comp_sidedata) < len(serialized_sidedata)
3312 and len(comp_sidedata) < len(serialized_sidedata)
3309 ):
3313 ):
3310 assert not h
3314 assert not h
3311 if (
3315 if (
3312 comp_sidedata[0]
3316 comp_sidedata[0]
3313 == self._docket.default_compression_header
3317 == self._docket.default_compression_header
3314 ):
3318 ):
3315 sidedata_compression_mode = COMP_MODE_DEFAULT
3319 sidedata_compression_mode = COMP_MODE_DEFAULT
3316 serialized_sidedata = comp_sidedata
3320 serialized_sidedata = comp_sidedata
3317 else:
3321 else:
3318 sidedata_compression_mode = COMP_MODE_INLINE
3322 sidedata_compression_mode = COMP_MODE_INLINE
3319 serialized_sidedata = comp_sidedata
3323 serialized_sidedata = comp_sidedata
3320 if entry[8] != 0 or entry[9] != 0:
3324 if entry[8] != 0 or entry[9] != 0:
3321 # rewriting entries that already have sidedata is not
3325 # rewriting entries that already have sidedata is not
3322 # supported yet, because it introduces garbage data in the
3326 # supported yet, because it introduces garbage data in the
3323 # revlog.
3327 # revlog.
3324 msg = b"rewriting existing sidedata is not supported yet"
3328 msg = b"rewriting existing sidedata is not supported yet"
3325 raise error.Abort(msg)
3329 raise error.Abort(msg)
3326
3330
3327 # Apply (potential) flags to add and to remove after running
3331 # Apply (potential) flags to add and to remove after running
3328 # the sidedata helpers
3332 # the sidedata helpers
3329 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3333 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3330 entry_update = (
3334 entry_update = (
3331 current_offset,
3335 current_offset,
3332 len(serialized_sidedata),
3336 len(serialized_sidedata),
3333 new_offset_flags,
3337 new_offset_flags,
3334 sidedata_compression_mode,
3338 sidedata_compression_mode,
3335 )
3339 )
3336
3340
3337 # the sidedata computation might have move the file cursors around
3341 # the sidedata computation might have move the file cursors around
3338 sdfh.seek(current_offset, os.SEEK_SET)
3342 sdfh.seek(current_offset, os.SEEK_SET)
3339 sdfh.write(serialized_sidedata)
3343 sdfh.write(serialized_sidedata)
3340 new_entries.append(entry_update)
3344 new_entries.append(entry_update)
3341 current_offset += len(serialized_sidedata)
3345 current_offset += len(serialized_sidedata)
3342 self._docket.sidedata_end = sdfh.tell()
3346 self._docket.sidedata_end = sdfh.tell()
3343
3347
3344 # rewrite the new index entries
3348 # rewrite the new index entries
3345 ifh.seek(startrev * self.index.entry_size)
3349 ifh.seek(startrev * self.index.entry_size)
3346 for i, e in enumerate(new_entries):
3350 for i, e in enumerate(new_entries):
3347 rev = startrev + i
3351 rev = startrev + i
3348 self.index.replace_sidedata_info(rev, *e)
3352 self.index.replace_sidedata_info(rev, *e)
3349 packed = self.index.entry_binary(rev)
3353 packed = self.index.entry_binary(rev)
3350 if rev == 0 and self._docket is None:
3354 if rev == 0 and self._docket is None:
3351 header = self._format_flags | self._format_version
3355 header = self._format_flags | self._format_version
3352 header = self.index.pack_header(header)
3356 header = self.index.pack_header(header)
3353 packed = header + packed
3357 packed = header + packed
3354 ifh.write(packed)
3358 ifh.write(packed)
@@ -1,1407 +1,1427 b''
1 # revlogdeltas.py - Logic around delta computation for revlog
1 # revlogdeltas.py - Logic around delta computation for revlog
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2018 Octobus <contact@octobus.net>
4 # Copyright 2018 Octobus <contact@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """Helper class to compute deltas stored inside revlogs"""
8 """Helper class to compute deltas stored inside revlogs"""
9
9
10
10
11 import collections
11 import collections
12 import struct
12 import struct
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from ..node import nullrev
15 from ..node import nullrev
16 from ..i18n import _
16 from ..i18n import _
17 from ..pycompat import getattr
17 from ..pycompat import getattr
18
18
19 from .constants import (
19 from .constants import (
20 COMP_MODE_DEFAULT,
20 COMP_MODE_DEFAULT,
21 COMP_MODE_INLINE,
21 COMP_MODE_INLINE,
22 COMP_MODE_PLAIN,
22 COMP_MODE_PLAIN,
23 KIND_CHANGELOG,
23 KIND_CHANGELOG,
24 KIND_FILELOG,
24 KIND_FILELOG,
25 KIND_MANIFESTLOG,
25 KIND_MANIFESTLOG,
26 REVIDX_ISCENSORED,
26 REVIDX_ISCENSORED,
27 REVIDX_RAWTEXT_CHANGING_FLAGS,
27 REVIDX_RAWTEXT_CHANGING_FLAGS,
28 )
28 )
29
29
30 from ..thirdparty import attr
30 from ..thirdparty import attr
31
31
32 from .. import (
32 from .. import (
33 error,
33 error,
34 mdiff,
34 mdiff,
35 util,
35 util,
36 )
36 )
37
37
38 from . import flagutil
38 from . import flagutil
39
39
40 # maximum <delta-chain-data>/<revision-text-length> ratio
40 # maximum <delta-chain-data>/<revision-text-length> ratio
41 LIMIT_DELTA2TEXT = 2
41 LIMIT_DELTA2TEXT = 2
42
42
43
43
44 class _testrevlog:
44 class _testrevlog:
45 """minimalist fake revlog to use in doctests"""
45 """minimalist fake revlog to use in doctests"""
46
46
47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
47 def __init__(self, data, density=0.5, mingap=0, snapshot=()):
48 """data is an list of revision payload boundaries"""
48 """data is an list of revision payload boundaries"""
49 self._data = data
49 self._data = data
50 self._srdensitythreshold = density
50 self._srdensitythreshold = density
51 self._srmingapsize = mingap
51 self._srmingapsize = mingap
52 self._snapshot = set(snapshot)
52 self._snapshot = set(snapshot)
53 self.index = None
53 self.index = None
54
54
55 def start(self, rev):
55 def start(self, rev):
56 if rev == nullrev:
56 if rev == nullrev:
57 return 0
57 return 0
58 if rev == 0:
58 if rev == 0:
59 return 0
59 return 0
60 return self._data[rev - 1]
60 return self._data[rev - 1]
61
61
62 def end(self, rev):
62 def end(self, rev):
63 if rev == nullrev:
63 if rev == nullrev:
64 return 0
64 return 0
65 return self._data[rev]
65 return self._data[rev]
66
66
67 def length(self, rev):
67 def length(self, rev):
68 return self.end(rev) - self.start(rev)
68 return self.end(rev) - self.start(rev)
69
69
70 def __len__(self):
70 def __len__(self):
71 return len(self._data)
71 return len(self._data)
72
72
73 def issnapshot(self, rev):
73 def issnapshot(self, rev):
74 if rev == nullrev:
74 if rev == nullrev:
75 return True
75 return True
76 return rev in self._snapshot
76 return rev in self._snapshot
77
77
78
78
79 def slicechunk(revlog, revs, targetsize=None):
79 def slicechunk(revlog, revs, targetsize=None):
80 """slice revs to reduce the amount of unrelated data to be read from disk.
80 """slice revs to reduce the amount of unrelated data to be read from disk.
81
81
82 ``revs`` is sliced into groups that should be read in one time.
82 ``revs`` is sliced into groups that should be read in one time.
83 Assume that revs are sorted.
83 Assume that revs are sorted.
84
84
85 The initial chunk is sliced until the overall density (payload/chunks-span
85 The initial chunk is sliced until the overall density (payload/chunks-span
86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
86 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
87 `revlog._srmingapsize` is skipped.
87 `revlog._srmingapsize` is skipped.
88
88
89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
89 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
90 For consistency with other slicing choice, this limit won't go lower than
90 For consistency with other slicing choice, this limit won't go lower than
91 `revlog._srmingapsize`.
91 `revlog._srmingapsize`.
92
92
93 If individual revisions chunk are larger than this limit, they will still
93 If individual revisions chunk are larger than this limit, they will still
94 be raised individually.
94 be raised individually.
95
95
96 >>> data = [
96 >>> data = [
97 ... 5, #00 (5)
97 ... 5, #00 (5)
98 ... 10, #01 (5)
98 ... 10, #01 (5)
99 ... 12, #02 (2)
99 ... 12, #02 (2)
100 ... 12, #03 (empty)
100 ... 12, #03 (empty)
101 ... 27, #04 (15)
101 ... 27, #04 (15)
102 ... 31, #05 (4)
102 ... 31, #05 (4)
103 ... 31, #06 (empty)
103 ... 31, #06 (empty)
104 ... 42, #07 (11)
104 ... 42, #07 (11)
105 ... 47, #08 (5)
105 ... 47, #08 (5)
106 ... 47, #09 (empty)
106 ... 47, #09 (empty)
107 ... 48, #10 (1)
107 ... 48, #10 (1)
108 ... 51, #11 (3)
108 ... 51, #11 (3)
109 ... 74, #12 (23)
109 ... 74, #12 (23)
110 ... 85, #13 (11)
110 ... 85, #13 (11)
111 ... 86, #14 (1)
111 ... 86, #14 (1)
112 ... 91, #15 (5)
112 ... 91, #15 (5)
113 ... ]
113 ... ]
114 >>> revlog = _testrevlog(data, snapshot=range(16))
114 >>> revlog = _testrevlog(data, snapshot=range(16))
115
115
116 >>> list(slicechunk(revlog, list(range(16))))
116 >>> list(slicechunk(revlog, list(range(16))))
117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
117 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
118 >>> list(slicechunk(revlog, [0, 15]))
118 >>> list(slicechunk(revlog, [0, 15]))
119 [[0], [15]]
119 [[0], [15]]
120 >>> list(slicechunk(revlog, [0, 11, 15]))
120 >>> list(slicechunk(revlog, [0, 11, 15]))
121 [[0], [11], [15]]
121 [[0], [11], [15]]
122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
122 >>> list(slicechunk(revlog, [0, 11, 13, 15]))
123 [[0], [11, 13, 15]]
123 [[0], [11, 13, 15]]
124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
124 >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
125 [[1, 2], [5, 8, 10, 11], [14]]
125 [[1, 2], [5, 8, 10, 11], [14]]
126
126
127 Slicing with a maximum chunk size
127 Slicing with a maximum chunk size
128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
128 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
129 [[0], [11], [13], [15]]
129 [[0], [11], [13], [15]]
130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
130 >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
131 [[0], [11], [13, 15]]
131 [[0], [11], [13, 15]]
132
132
133 Slicing involving nullrev
133 Slicing involving nullrev
134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
134 >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
135 [[-1, 0], [11], [13, 15]]
135 [[-1, 0], [11], [13, 15]]
136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
136 >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
137 [[-1], [13], [15]]
137 [[-1], [13], [15]]
138 """
138 """
139 if targetsize is not None:
139 if targetsize is not None:
140 targetsize = max(targetsize, revlog._srmingapsize)
140 targetsize = max(targetsize, revlog._srmingapsize)
141 # targetsize should not be specified when evaluating delta candidates:
141 # targetsize should not be specified when evaluating delta candidates:
142 # * targetsize is used to ensure we stay within specification when reading,
142 # * targetsize is used to ensure we stay within specification when reading,
143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
143 densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
144 if densityslicing is None:
144 if densityslicing is None:
145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
145 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
146 for chunk in densityslicing(
146 for chunk in densityslicing(
147 revs, revlog._srdensitythreshold, revlog._srmingapsize
147 revs, revlog._srdensitythreshold, revlog._srmingapsize
148 ):
148 ):
149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
149 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
150 yield subchunk
150 yield subchunk
151
151
152
152
153 def _slicechunktosize(revlog, revs, targetsize=None):
153 def _slicechunktosize(revlog, revs, targetsize=None):
154 """slice revs to match the target size
154 """slice revs to match the target size
155
155
156 This is intended to be used on chunk that density slicing selected by that
156 This is intended to be used on chunk that density slicing selected by that
157 are still too large compared to the read garantee of revlog. This might
157 are still too large compared to the read garantee of revlog. This might
158 happens when "minimal gap size" interrupted the slicing or when chain are
158 happens when "minimal gap size" interrupted the slicing or when chain are
159 built in a way that create large blocks next to each other.
159 built in a way that create large blocks next to each other.
160
160
161 >>> data = [
161 >>> data = [
162 ... 3, #0 (3)
162 ... 3, #0 (3)
163 ... 5, #1 (2)
163 ... 5, #1 (2)
164 ... 6, #2 (1)
164 ... 6, #2 (1)
165 ... 8, #3 (2)
165 ... 8, #3 (2)
166 ... 8, #4 (empty)
166 ... 8, #4 (empty)
167 ... 11, #5 (3)
167 ... 11, #5 (3)
168 ... 12, #6 (1)
168 ... 12, #6 (1)
169 ... 13, #7 (1)
169 ... 13, #7 (1)
170 ... 14, #8 (1)
170 ... 14, #8 (1)
171 ... ]
171 ... ]
172
172
173 == All snapshots cases ==
173 == All snapshots cases ==
174 >>> revlog = _testrevlog(data, snapshot=range(9))
174 >>> revlog = _testrevlog(data, snapshot=range(9))
175
175
176 Cases where chunk is already small enough
176 Cases where chunk is already small enough
177 >>> list(_slicechunktosize(revlog, [0], 3))
177 >>> list(_slicechunktosize(revlog, [0], 3))
178 [[0]]
178 [[0]]
179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
179 >>> list(_slicechunktosize(revlog, [6, 7], 3))
180 [[6, 7]]
180 [[6, 7]]
181 >>> list(_slicechunktosize(revlog, [0], None))
181 >>> list(_slicechunktosize(revlog, [0], None))
182 [[0]]
182 [[0]]
183 >>> list(_slicechunktosize(revlog, [6, 7], None))
183 >>> list(_slicechunktosize(revlog, [6, 7], None))
184 [[6, 7]]
184 [[6, 7]]
185
185
186 cases where we need actual slicing
186 cases where we need actual slicing
187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
187 >>> list(_slicechunktosize(revlog, [0, 1], 3))
188 [[0], [1]]
188 [[0], [1]]
189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
189 >>> list(_slicechunktosize(revlog, [1, 3], 3))
190 [[1], [3]]
190 [[1], [3]]
191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
191 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
192 [[1, 2], [3]]
192 [[1, 2], [3]]
193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
193 >>> list(_slicechunktosize(revlog, [3, 5], 3))
194 [[3], [5]]
194 [[3], [5]]
195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
195 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
196 [[3], [5]]
196 [[3], [5]]
197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
197 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
198 [[5], [6, 7, 8]]
198 [[5], [6, 7, 8]]
199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
199 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
200 [[0], [1, 2], [3], [5], [6, 7, 8]]
200 [[0], [1, 2], [3], [5], [6, 7, 8]]
201
201
202 Case with too large individual chunk (must return valid chunk)
202 Case with too large individual chunk (must return valid chunk)
203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
203 >>> list(_slicechunktosize(revlog, [0, 1], 2))
204 [[0], [1]]
204 [[0], [1]]
205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
205 >>> list(_slicechunktosize(revlog, [1, 3], 1))
206 [[1], [3]]
206 [[1], [3]]
207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
207 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
208 [[3], [5]]
208 [[3], [5]]
209
209
210 == No Snapshot cases ==
210 == No Snapshot cases ==
211 >>> revlog = _testrevlog(data)
211 >>> revlog = _testrevlog(data)
212
212
213 Cases where chunk is already small enough
213 Cases where chunk is already small enough
214 >>> list(_slicechunktosize(revlog, [0], 3))
214 >>> list(_slicechunktosize(revlog, [0], 3))
215 [[0]]
215 [[0]]
216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
216 >>> list(_slicechunktosize(revlog, [6, 7], 3))
217 [[6, 7]]
217 [[6, 7]]
218 >>> list(_slicechunktosize(revlog, [0], None))
218 >>> list(_slicechunktosize(revlog, [0], None))
219 [[0]]
219 [[0]]
220 >>> list(_slicechunktosize(revlog, [6, 7], None))
220 >>> list(_slicechunktosize(revlog, [6, 7], None))
221 [[6, 7]]
221 [[6, 7]]
222
222
223 cases where we need actual slicing
223 cases where we need actual slicing
224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
224 >>> list(_slicechunktosize(revlog, [0, 1], 3))
225 [[0], [1]]
225 [[0], [1]]
226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
226 >>> list(_slicechunktosize(revlog, [1, 3], 3))
227 [[1], [3]]
227 [[1], [3]]
228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
228 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
229 [[1], [2, 3]]
229 [[1], [2, 3]]
230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
230 >>> list(_slicechunktosize(revlog, [3, 5], 3))
231 [[3], [5]]
231 [[3], [5]]
232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
232 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
233 [[3], [4, 5]]
233 [[3], [4, 5]]
234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
234 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
235 [[5], [6, 7, 8]]
235 [[5], [6, 7, 8]]
236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
236 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
237 [[0], [1, 2], [3], [5], [6, 7, 8]]
237 [[0], [1, 2], [3], [5], [6, 7, 8]]
238
238
239 Case with too large individual chunk (must return valid chunk)
239 Case with too large individual chunk (must return valid chunk)
240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
240 >>> list(_slicechunktosize(revlog, [0, 1], 2))
241 [[0], [1]]
241 [[0], [1]]
242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
242 >>> list(_slicechunktosize(revlog, [1, 3], 1))
243 [[1], [3]]
243 [[1], [3]]
244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
244 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
245 [[3], [5]]
245 [[3], [5]]
246
246
247 == mixed case ==
247 == mixed case ==
248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
248 >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
249 >>> list(_slicechunktosize(revlog, list(range(9)), 5))
250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
250 [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
251 """
251 """
252 assert targetsize is None or 0 <= targetsize
252 assert targetsize is None or 0 <= targetsize
253 startdata = revlog.start(revs[0])
253 startdata = revlog.start(revs[0])
254 enddata = revlog.end(revs[-1])
254 enddata = revlog.end(revs[-1])
255 fullspan = enddata - startdata
255 fullspan = enddata - startdata
256 if targetsize is None or fullspan <= targetsize:
256 if targetsize is None or fullspan <= targetsize:
257 yield revs
257 yield revs
258 return
258 return
259
259
260 startrevidx = 0
260 startrevidx = 0
261 endrevidx = 1
261 endrevidx = 1
262 iterrevs = enumerate(revs)
262 iterrevs = enumerate(revs)
263 next(iterrevs) # skip first rev.
263 next(iterrevs) # skip first rev.
264 # first step: get snapshots out of the way
264 # first step: get snapshots out of the way
265 for idx, r in iterrevs:
265 for idx, r in iterrevs:
266 span = revlog.end(r) - startdata
266 span = revlog.end(r) - startdata
267 snapshot = revlog.issnapshot(r)
267 snapshot = revlog.issnapshot(r)
268 if span <= targetsize and snapshot:
268 if span <= targetsize and snapshot:
269 endrevidx = idx + 1
269 endrevidx = idx + 1
270 else:
270 else:
271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
271 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
272 if chunk:
272 if chunk:
273 yield chunk
273 yield chunk
274 startrevidx = idx
274 startrevidx = idx
275 startdata = revlog.start(r)
275 startdata = revlog.start(r)
276 endrevidx = idx + 1
276 endrevidx = idx + 1
277 if not snapshot:
277 if not snapshot:
278 break
278 break
279
279
280 # for the others, we use binary slicing to quickly converge toward valid
280 # for the others, we use binary slicing to quickly converge toward valid
281 # chunks (otherwise, we might end up looking for start/end of many
281 # chunks (otherwise, we might end up looking for start/end of many
282 # revisions). This logic is not looking for the perfect slicing point, it
282 # revisions). This logic is not looking for the perfect slicing point, it
283 # focuses on quickly converging toward valid chunks.
283 # focuses on quickly converging toward valid chunks.
284 nbitem = len(revs)
284 nbitem = len(revs)
285 while (enddata - startdata) > targetsize:
285 while (enddata - startdata) > targetsize:
286 endrevidx = nbitem
286 endrevidx = nbitem
287 if nbitem - startrevidx <= 1:
287 if nbitem - startrevidx <= 1:
288 break # protect against individual chunk larger than limit
288 break # protect against individual chunk larger than limit
289 localenddata = revlog.end(revs[endrevidx - 1])
289 localenddata = revlog.end(revs[endrevidx - 1])
290 span = localenddata - startdata
290 span = localenddata - startdata
291 while span > targetsize:
291 while span > targetsize:
292 if endrevidx - startrevidx <= 1:
292 if endrevidx - startrevidx <= 1:
293 break # protect against individual chunk larger than limit
293 break # protect against individual chunk larger than limit
294 endrevidx -= (endrevidx - startrevidx) // 2
294 endrevidx -= (endrevidx - startrevidx) // 2
295 localenddata = revlog.end(revs[endrevidx - 1])
295 localenddata = revlog.end(revs[endrevidx - 1])
296 span = localenddata - startdata
296 span = localenddata - startdata
297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
297 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
298 if chunk:
298 if chunk:
299 yield chunk
299 yield chunk
300 startrevidx = endrevidx
300 startrevidx = endrevidx
301 startdata = revlog.start(revs[startrevidx])
301 startdata = revlog.start(revs[startrevidx])
302
302
303 chunk = _trimchunk(revlog, revs, startrevidx)
303 chunk = _trimchunk(revlog, revs, startrevidx)
304 if chunk:
304 if chunk:
305 yield chunk
305 yield chunk
306
306
307
307
308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
308 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0):
309 """slice revs to reduce the amount of unrelated data to be read from disk.
309 """slice revs to reduce the amount of unrelated data to be read from disk.
310
310
311 ``revs`` is sliced into groups that should be read in one time.
311 ``revs`` is sliced into groups that should be read in one time.
312 Assume that revs are sorted.
312 Assume that revs are sorted.
313
313
314 The initial chunk is sliced until the overall density (payload/chunks-span
314 The initial chunk is sliced until the overall density (payload/chunks-span
315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
315 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
316 skipped.
316 skipped.
317
317
318 >>> revlog = _testrevlog([
318 >>> revlog = _testrevlog([
319 ... 5, #00 (5)
319 ... 5, #00 (5)
320 ... 10, #01 (5)
320 ... 10, #01 (5)
321 ... 12, #02 (2)
321 ... 12, #02 (2)
322 ... 12, #03 (empty)
322 ... 12, #03 (empty)
323 ... 27, #04 (15)
323 ... 27, #04 (15)
324 ... 31, #05 (4)
324 ... 31, #05 (4)
325 ... 31, #06 (empty)
325 ... 31, #06 (empty)
326 ... 42, #07 (11)
326 ... 42, #07 (11)
327 ... 47, #08 (5)
327 ... 47, #08 (5)
328 ... 47, #09 (empty)
328 ... 47, #09 (empty)
329 ... 48, #10 (1)
329 ... 48, #10 (1)
330 ... 51, #11 (3)
330 ... 51, #11 (3)
331 ... 74, #12 (23)
331 ... 74, #12 (23)
332 ... 85, #13 (11)
332 ... 85, #13 (11)
333 ... 86, #14 (1)
333 ... 86, #14 (1)
334 ... 91, #15 (5)
334 ... 91, #15 (5)
335 ... ])
335 ... ])
336
336
337 >>> list(_slicechunktodensity(revlog, list(range(16))))
337 >>> list(_slicechunktodensity(revlog, list(range(16))))
338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
338 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
339 >>> list(_slicechunktodensity(revlog, [0, 15]))
339 >>> list(_slicechunktodensity(revlog, [0, 15]))
340 [[0], [15]]
340 [[0], [15]]
341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
341 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
342 [[0], [11], [15]]
342 [[0], [11], [15]]
343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
343 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
344 [[0], [11, 13, 15]]
344 [[0], [11, 13, 15]]
345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
345 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
346 [[1, 2], [5, 8, 10, 11], [14]]
346 [[1, 2], [5, 8, 10, 11], [14]]
347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
347 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
348 ... mingapsize=20))
348 ... mingapsize=20))
349 [[1, 2, 3, 5, 8, 10, 11], [14]]
349 [[1, 2, 3, 5, 8, 10, 11], [14]]
350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
350 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
351 ... targetdensity=0.95))
351 ... targetdensity=0.95))
352 [[1, 2], [5], [8, 10, 11], [14]]
352 [[1, 2], [5], [8, 10, 11], [14]]
353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
353 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
354 ... targetdensity=0.95, mingapsize=12))
354 ... targetdensity=0.95, mingapsize=12))
355 [[1, 2], [5, 8, 10, 11], [14]]
355 [[1, 2], [5, 8, 10, 11], [14]]
356 """
356 """
357 start = revlog.start
357 start = revlog.start
358 length = revlog.length
358 length = revlog.length
359
359
360 if len(revs) <= 1:
360 if len(revs) <= 1:
361 yield revs
361 yield revs
362 return
362 return
363
363
364 deltachainspan = segmentspan(revlog, revs)
364 deltachainspan = segmentspan(revlog, revs)
365
365
366 if deltachainspan < mingapsize:
366 if deltachainspan < mingapsize:
367 yield revs
367 yield revs
368 return
368 return
369
369
370 readdata = deltachainspan
370 readdata = deltachainspan
371 chainpayload = sum(length(r) for r in revs)
371 chainpayload = sum(length(r) for r in revs)
372
372
373 if deltachainspan:
373 if deltachainspan:
374 density = chainpayload / float(deltachainspan)
374 density = chainpayload / float(deltachainspan)
375 else:
375 else:
376 density = 1.0
376 density = 1.0
377
377
378 if density >= targetdensity:
378 if density >= targetdensity:
379 yield revs
379 yield revs
380 return
380 return
381
381
382 # Store the gaps in a heap to have them sorted by decreasing size
382 # Store the gaps in a heap to have them sorted by decreasing size
383 gaps = []
383 gaps = []
384 prevend = None
384 prevend = None
385 for i, rev in enumerate(revs):
385 for i, rev in enumerate(revs):
386 revstart = start(rev)
386 revstart = start(rev)
387 revlen = length(rev)
387 revlen = length(rev)
388
388
389 # Skip empty revisions to form larger holes
389 # Skip empty revisions to form larger holes
390 if revlen == 0:
390 if revlen == 0:
391 continue
391 continue
392
392
393 if prevend is not None:
393 if prevend is not None:
394 gapsize = revstart - prevend
394 gapsize = revstart - prevend
395 # only consider holes that are large enough
395 # only consider holes that are large enough
396 if gapsize > mingapsize:
396 if gapsize > mingapsize:
397 gaps.append((gapsize, i))
397 gaps.append((gapsize, i))
398
398
399 prevend = revstart + revlen
399 prevend = revstart + revlen
400 # sort the gaps to pop them from largest to small
400 # sort the gaps to pop them from largest to small
401 gaps.sort()
401 gaps.sort()
402
402
403 # Collect the indices of the largest holes until the density is acceptable
403 # Collect the indices of the largest holes until the density is acceptable
404 selected = []
404 selected = []
405 while gaps and density < targetdensity:
405 while gaps and density < targetdensity:
406 gapsize, gapidx = gaps.pop()
406 gapsize, gapidx = gaps.pop()
407
407
408 selected.append(gapidx)
408 selected.append(gapidx)
409
409
410 # the gap sizes are stored as negatives to be sorted decreasingly
410 # the gap sizes are stored as negatives to be sorted decreasingly
411 # by the heap
411 # by the heap
412 readdata -= gapsize
412 readdata -= gapsize
413 if readdata > 0:
413 if readdata > 0:
414 density = chainpayload / float(readdata)
414 density = chainpayload / float(readdata)
415 else:
415 else:
416 density = 1.0
416 density = 1.0
417 selected.sort()
417 selected.sort()
418
418
419 # Cut the revs at collected indices
419 # Cut the revs at collected indices
420 previdx = 0
420 previdx = 0
421 for idx in selected:
421 for idx in selected:
422
422
423 chunk = _trimchunk(revlog, revs, previdx, idx)
423 chunk = _trimchunk(revlog, revs, previdx, idx)
424 if chunk:
424 if chunk:
425 yield chunk
425 yield chunk
426
426
427 previdx = idx
427 previdx = idx
428
428
429 chunk = _trimchunk(revlog, revs, previdx)
429 chunk = _trimchunk(revlog, revs, previdx)
430 if chunk:
430 if chunk:
431 yield chunk
431 yield chunk
432
432
433
433
434 def _trimchunk(revlog, revs, startidx, endidx=None):
434 def _trimchunk(revlog, revs, startidx, endidx=None):
435 """returns revs[startidx:endidx] without empty trailing revs
435 """returns revs[startidx:endidx] without empty trailing revs
436
436
437 Doctest Setup
437 Doctest Setup
438 >>> revlog = _testrevlog([
438 >>> revlog = _testrevlog([
439 ... 5, #0
439 ... 5, #0
440 ... 10, #1
440 ... 10, #1
441 ... 12, #2
441 ... 12, #2
442 ... 12, #3 (empty)
442 ... 12, #3 (empty)
443 ... 17, #4
443 ... 17, #4
444 ... 21, #5
444 ... 21, #5
445 ... 21, #6 (empty)
445 ... 21, #6 (empty)
446 ... ])
446 ... ])
447
447
448 Contiguous cases:
448 Contiguous cases:
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
449 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
450 [0, 1, 2, 3, 4, 5]
450 [0, 1, 2, 3, 4, 5]
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
451 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
452 [0, 1, 2, 3, 4]
452 [0, 1, 2, 3, 4]
453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
453 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
454 [0, 1, 2]
454 [0, 1, 2]
455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
455 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
456 [2]
456 [2]
457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
457 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
458 [3, 4, 5]
458 [3, 4, 5]
459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
459 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
460 [3, 4]
460 [3, 4]
461
461
462 Discontiguous cases:
462 Discontiguous cases:
463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
463 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
464 [1, 3, 5]
464 [1, 3, 5]
465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
465 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
466 [1]
466 [1]
467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
467 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
468 [3, 5]
468 [3, 5]
469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
469 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
470 [3, 5]
470 [3, 5]
471 """
471 """
472 length = revlog.length
472 length = revlog.length
473
473
474 if endidx is None:
474 if endidx is None:
475 endidx = len(revs)
475 endidx = len(revs)
476
476
477 # If we have a non-emtpy delta candidate, there are nothing to trim
477 # If we have a non-emtpy delta candidate, there are nothing to trim
478 if revs[endidx - 1] < len(revlog):
478 if revs[endidx - 1] < len(revlog):
479 # Trim empty revs at the end, except the very first revision of a chain
479 # Trim empty revs at the end, except the very first revision of a chain
480 while (
480 while (
481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
481 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0
482 ):
482 ):
483 endidx -= 1
483 endidx -= 1
484
484
485 return revs[startidx:endidx]
485 return revs[startidx:endidx]
486
486
487
487
488 def segmentspan(revlog, revs):
488 def segmentspan(revlog, revs):
489 """Get the byte span of a segment of revisions
489 """Get the byte span of a segment of revisions
490
490
491 revs is a sorted array of revision numbers
491 revs is a sorted array of revision numbers
492
492
493 >>> revlog = _testrevlog([
493 >>> revlog = _testrevlog([
494 ... 5, #0
494 ... 5, #0
495 ... 10, #1
495 ... 10, #1
496 ... 12, #2
496 ... 12, #2
497 ... 12, #3 (empty)
497 ... 12, #3 (empty)
498 ... 17, #4
498 ... 17, #4
499 ... ])
499 ... ])
500
500
501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
501 >>> segmentspan(revlog, [0, 1, 2, 3, 4])
502 17
502 17
503 >>> segmentspan(revlog, [0, 4])
503 >>> segmentspan(revlog, [0, 4])
504 17
504 17
505 >>> segmentspan(revlog, [3, 4])
505 >>> segmentspan(revlog, [3, 4])
506 5
506 5
507 >>> segmentspan(revlog, [1, 2, 3,])
507 >>> segmentspan(revlog, [1, 2, 3,])
508 7
508 7
509 >>> segmentspan(revlog, [1, 3])
509 >>> segmentspan(revlog, [1, 3])
510 7
510 7
511 """
511 """
512 if not revs:
512 if not revs:
513 return 0
513 return 0
514 end = revlog.end(revs[-1])
514 end = revlog.end(revs[-1])
515 return end - revlog.start(revs[0])
515 return end - revlog.start(revs[0])
516
516
517
517
518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
518 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
519 """build full text from a (base, delta) pair and other metadata"""
519 """build full text from a (base, delta) pair and other metadata"""
520 # special case deltas which replace entire base; no need to decode
520 # special case deltas which replace entire base; no need to decode
521 # base revision. this neatly avoids censored bases, which throw when
521 # base revision. this neatly avoids censored bases, which throw when
522 # they're decoded.
522 # they're decoded.
523 hlen = struct.calcsize(b">lll")
523 hlen = struct.calcsize(b">lll")
524 if delta[:hlen] == mdiff.replacediffheader(
524 if delta[:hlen] == mdiff.replacediffheader(
525 revlog.rawsize(baserev), len(delta) - hlen
525 revlog.rawsize(baserev), len(delta) - hlen
526 ):
526 ):
527 fulltext = delta[hlen:]
527 fulltext = delta[hlen:]
528 else:
528 else:
529 # deltabase is rawtext before changed by flag processors, which is
529 # deltabase is rawtext before changed by flag processors, which is
530 # equivalent to non-raw text
530 # equivalent to non-raw text
531 basetext = revlog.revision(baserev, _df=fh)
531 basetext = revlog.revision(baserev, _df=fh)
532 fulltext = mdiff.patch(basetext, delta)
532 fulltext = mdiff.patch(basetext, delta)
533
533
534 try:
534 try:
535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
535 validatehash = flagutil.processflagsraw(revlog, fulltext, flags)
536 if validatehash:
536 if validatehash:
537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
537 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
538 if flags & REVIDX_ISCENSORED:
538 if flags & REVIDX_ISCENSORED:
539 raise error.StorageError(
539 raise error.StorageError(
540 _(b'node %s is not censored') % expectednode
540 _(b'node %s is not censored') % expectednode
541 )
541 )
542 except error.CensoredNodeError:
542 except error.CensoredNodeError:
543 # must pass the censored index flag to add censored revisions
543 # must pass the censored index flag to add censored revisions
544 if not flags & REVIDX_ISCENSORED:
544 if not flags & REVIDX_ISCENSORED:
545 raise
545 raise
546 return fulltext
546 return fulltext
547
547
548
548
549 @attr.s(slots=True, frozen=True)
549 @attr.s(slots=True, frozen=True)
550 class _deltainfo:
550 class _deltainfo:
551 distance = attr.ib()
551 distance = attr.ib()
552 deltalen = attr.ib()
552 deltalen = attr.ib()
553 data = attr.ib()
553 data = attr.ib()
554 base = attr.ib()
554 base = attr.ib()
555 chainbase = attr.ib()
555 chainbase = attr.ib()
556 chainlen = attr.ib()
556 chainlen = attr.ib()
557 compresseddeltalen = attr.ib()
557 compresseddeltalen = attr.ib()
558 snapshotdepth = attr.ib()
558 snapshotdepth = attr.ib()
559
559
560
560
561 def drop_u_compression(delta):
561 def drop_u_compression(delta):
562 """turn into a "u" (no-compression) into no-compression without header
562 """turn into a "u" (no-compression) into no-compression without header
563
563
564 This is useful for revlog format that has better compression method.
564 This is useful for revlog format that has better compression method.
565 """
565 """
566 assert delta.data[0] == b'u', delta.data[0]
566 assert delta.data[0] == b'u', delta.data[0]
567 return _deltainfo(
567 return _deltainfo(
568 delta.distance,
568 delta.distance,
569 delta.deltalen - 1,
569 delta.deltalen - 1,
570 (b'', delta.data[1]),
570 (b'', delta.data[1]),
571 delta.base,
571 delta.base,
572 delta.chainbase,
572 delta.chainbase,
573 delta.chainlen,
573 delta.chainlen,
574 delta.compresseddeltalen,
574 delta.compresseddeltalen,
575 delta.snapshotdepth,
575 delta.snapshotdepth,
576 )
576 )
577
577
578
578
579 def isgooddeltainfo(revlog, deltainfo, revinfo):
579 def isgooddeltainfo(revlog, deltainfo, revinfo):
580 """Returns True if the given delta is good. Good means that it is within
580 """Returns True if the given delta is good. Good means that it is within
581 the disk span, disk size, and chain length bounds that we know to be
581 the disk span, disk size, and chain length bounds that we know to be
582 performant."""
582 performant."""
583 if deltainfo is None:
583 if deltainfo is None:
584 return False
584 return False
585
585
586 # - 'deltainfo.distance' is the distance from the base revision --
586 # - 'deltainfo.distance' is the distance from the base revision --
587 # bounding it limits the amount of I/O we need to do.
587 # bounding it limits the amount of I/O we need to do.
588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
588 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
589 # deltas we need to apply -- bounding it limits the amount of CPU
589 # deltas we need to apply -- bounding it limits the amount of CPU
590 # we consume.
590 # we consume.
591
591
592 textlen = revinfo.textlen
592 textlen = revinfo.textlen
593 defaultmax = textlen * 4
593 defaultmax = textlen * 4
594 maxdist = revlog._maxdeltachainspan
594 maxdist = revlog._maxdeltachainspan
595 if not maxdist:
595 if not maxdist:
596 maxdist = deltainfo.distance # ensure the conditional pass
596 maxdist = deltainfo.distance # ensure the conditional pass
597 maxdist = max(maxdist, defaultmax)
597 maxdist = max(maxdist, defaultmax)
598
598
599 # Bad delta from read span:
599 # Bad delta from read span:
600 #
600 #
601 # If the span of data read is larger than the maximum allowed.
601 # If the span of data read is larger than the maximum allowed.
602 #
602 #
603 # In the sparse-revlog case, we rely on the associated "sparse reading"
603 # In the sparse-revlog case, we rely on the associated "sparse reading"
604 # to avoid issue related to the span of data. In theory, it would be
604 # to avoid issue related to the span of data. In theory, it would be
605 # possible to build pathological revlog where delta pattern would lead
605 # possible to build pathological revlog where delta pattern would lead
606 # to too many reads. However, they do not happen in practice at all. So
606 # to too many reads. However, they do not happen in practice at all. So
607 # we skip the span check entirely.
607 # we skip the span check entirely.
608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
608 if not revlog._sparserevlog and maxdist < deltainfo.distance:
609 return False
609 return False
610
610
611 # Bad delta from new delta size:
611 # Bad delta from new delta size:
612 #
612 #
613 # If the delta size is larger than the target text, storing the
613 # If the delta size is larger than the target text, storing the
614 # delta will be inefficient.
614 # delta will be inefficient.
615 if textlen < deltainfo.deltalen:
615 if textlen < deltainfo.deltalen:
616 return False
616 return False
617
617
618 # Bad delta from cumulated payload size:
618 # Bad delta from cumulated payload size:
619 #
619 #
620 # If the sum of delta get larger than K * target text length.
620 # If the sum of delta get larger than K * target text length.
621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
621 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
622 return False
622 return False
623
623
624 # Bad delta from chain length:
624 # Bad delta from chain length:
625 #
625 #
626 # If the number of delta in the chain gets too high.
626 # If the number of delta in the chain gets too high.
627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
627 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
628 return False
628 return False
629
629
630 # bad delta from intermediate snapshot size limit
630 # bad delta from intermediate snapshot size limit
631 #
631 #
632 # If an intermediate snapshot size is higher than the limit. The
632 # If an intermediate snapshot size is higher than the limit. The
633 # limit exist to prevent endless chain of intermediate delta to be
633 # limit exist to prevent endless chain of intermediate delta to be
634 # created.
634 # created.
635 if (
635 if (
636 deltainfo.snapshotdepth is not None
636 deltainfo.snapshotdepth is not None
637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
637 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen
638 ):
638 ):
639 return False
639 return False
640
640
641 # bad delta if new intermediate snapshot is larger than the previous
641 # bad delta if new intermediate snapshot is larger than the previous
642 # snapshot
642 # snapshot
643 if (
643 if (
644 deltainfo.snapshotdepth
644 deltainfo.snapshotdepth
645 and revlog.length(deltainfo.base) < deltainfo.deltalen
645 and revlog.length(deltainfo.base) < deltainfo.deltalen
646 ):
646 ):
647 return False
647 return False
648
648
649 return True
649 return True
650
650
651
651
652 # If a revision's full text is that much bigger than a base candidate full
652 # If a revision's full text is that much bigger than a base candidate full
653 # text's, it is very unlikely that it will produce a valid delta. We no longer
653 # text's, it is very unlikely that it will produce a valid delta. We no longer
654 # consider these candidates.
654 # consider these candidates.
655 LIMIT_BASE2TEXT = 500
655 LIMIT_BASE2TEXT = 500
656
656
657
657
658 def _candidategroups(
658 def _candidategroups(
659 revlog,
659 revlog,
660 textlen,
660 textlen,
661 p1,
661 p1,
662 p2,
662 p2,
663 cachedelta,
663 cachedelta,
664 excluded_bases=None,
664 excluded_bases=None,
665 target_rev=None,
665 target_rev=None,
666 ):
666 ):
667 """Provides group of revision to be tested as delta base
667 """Provides group of revision to be tested as delta base
668
668
669 This top level function focus on emitting groups with unique and worthwhile
669 This top level function focus on emitting groups with unique and worthwhile
670 content. See _raw_candidate_groups for details about the group order.
670 content. See _raw_candidate_groups for details about the group order.
671 """
671 """
672 # should we try to build a delta?
672 # should we try to build a delta?
673 if not (len(revlog) and revlog._storedeltachains):
673 if not (len(revlog) and revlog._storedeltachains):
674 yield None
674 yield None
675 return
675 return
676
676
677 deltalength = revlog.length
677 deltalength = revlog.length
678 deltaparent = revlog.deltaparent
678 deltaparent = revlog.deltaparent
679 sparse = revlog._sparserevlog
679 sparse = revlog._sparserevlog
680 good = None
680 good = None
681
681
682 deltas_limit = textlen * LIMIT_DELTA2TEXT
682 deltas_limit = textlen * LIMIT_DELTA2TEXT
683 group_chunk_size = revlog._candidate_group_chunk_size
683
684
684 tested = {nullrev}
685 tested = {nullrev}
685 candidates = _refinedgroups(
686 candidates = _refinedgroups(
686 revlog,
687 revlog,
687 p1,
688 p1,
688 p2,
689 p2,
689 cachedelta,
690 cachedelta,
690 )
691 )
691 while True:
692 while True:
692 temptative = candidates.send(good)
693 temptative = candidates.send(good)
693 if temptative is None:
694 if temptative is None:
694 break
695 break
695 group = []
696 group = []
696 for rev in temptative:
697 for rev in temptative:
697 # skip over empty delta (no need to include them in a chain)
698 # skip over empty delta (no need to include them in a chain)
698 while revlog._generaldelta and not (
699 while revlog._generaldelta and not (
699 rev == nullrev or rev in tested or deltalength(rev)
700 rev == nullrev or rev in tested or deltalength(rev)
700 ):
701 ):
701 tested.add(rev)
702 tested.add(rev)
702 rev = deltaparent(rev)
703 rev = deltaparent(rev)
703 # no need to try a delta against nullrev, this will be done as a
704 # no need to try a delta against nullrev, this will be done as a
704 # last resort.
705 # last resort.
705 if rev == nullrev:
706 if rev == nullrev:
706 continue
707 continue
707 # filter out revision we tested already
708 # filter out revision we tested already
708 if rev in tested:
709 if rev in tested:
709 continue
710 continue
710 # an higher authority deamed the base unworthy (e.g. censored)
711 # an higher authority deamed the base unworthy (e.g. censored)
711 if excluded_bases is not None and rev in excluded_bases:
712 if excluded_bases is not None and rev in excluded_bases:
712 tested.add(rev)
713 tested.add(rev)
713 continue
714 continue
714 # We are in some recomputation cases and that rev is too high in
715 # We are in some recomputation cases and that rev is too high in
715 # the revlog
716 # the revlog
716 if target_rev is not None and rev >= target_rev:
717 if target_rev is not None and rev >= target_rev:
717 tested.add(rev)
718 tested.add(rev)
718 continue
719 continue
719 # filter out delta base that will never produce good delta
720 # filter out delta base that will never produce good delta
720 if deltas_limit < revlog.length(rev):
721 if deltas_limit < revlog.length(rev):
721 tested.add(rev)
722 tested.add(rev)
722 continue
723 continue
723 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
724 if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
724 tested.add(rev)
725 tested.add(rev)
725 continue
726 continue
726 # no delta for rawtext-changing revs (see "candelta" for why)
727 # no delta for rawtext-changing revs (see "candelta" for why)
727 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
728 if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
728 tested.add(rev)
729 tested.add(rev)
729 continue
730 continue
730
731
731 # If we reach here, we are about to build and test a delta.
732 # If we reach here, we are about to build and test a delta.
732 # The delta building process will compute the chaininfo in all
733 # The delta building process will compute the chaininfo in all
733 # case, since that computation is cached, it is fine to access it
734 # case, since that computation is cached, it is fine to access it
734 # here too.
735 # here too.
735 chainlen, chainsize = revlog._chaininfo(rev)
736 chainlen, chainsize = revlog._chaininfo(rev)
736 # if chain will be too long, skip base
737 # if chain will be too long, skip base
737 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
738 if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
738 tested.add(rev)
739 tested.add(rev)
739 continue
740 continue
740 # if chain already have too much data, skip base
741 # if chain already have too much data, skip base
741 if deltas_limit < chainsize:
742 if deltas_limit < chainsize:
742 tested.add(rev)
743 tested.add(rev)
743 continue
744 continue
744 if sparse and revlog.upperboundcomp is not None:
745 if sparse and revlog.upperboundcomp is not None:
745 maxcomp = revlog.upperboundcomp
746 maxcomp = revlog.upperboundcomp
746 basenotsnap = (p1, p2, nullrev)
747 basenotsnap = (p1, p2, nullrev)
747 if rev not in basenotsnap and revlog.issnapshot(rev):
748 if rev not in basenotsnap and revlog.issnapshot(rev):
748 snapshotdepth = revlog.snapshotdepth(rev)
749 snapshotdepth = revlog.snapshotdepth(rev)
749 # If text is significantly larger than the base, we can
750 # If text is significantly larger than the base, we can
750 # expect the resulting delta to be proportional to the size
751 # expect the resulting delta to be proportional to the size
751 # difference
752 # difference
752 revsize = revlog.rawsize(rev)
753 revsize = revlog.rawsize(rev)
753 rawsizedistance = max(textlen - revsize, 0)
754 rawsizedistance = max(textlen - revsize, 0)
754 # use an estimate of the compression upper bound.
755 # use an estimate of the compression upper bound.
755 lowestrealisticdeltalen = rawsizedistance // maxcomp
756 lowestrealisticdeltalen = rawsizedistance // maxcomp
756
757
757 # check the absolute constraint on the delta size
758 # check the absolute constraint on the delta size
758 snapshotlimit = textlen >> snapshotdepth
759 snapshotlimit = textlen >> snapshotdepth
759 if snapshotlimit < lowestrealisticdeltalen:
760 if snapshotlimit < lowestrealisticdeltalen:
760 # delta lower bound is larger than accepted upper bound
761 # delta lower bound is larger than accepted upper bound
761 tested.add(rev)
762 tested.add(rev)
762 continue
763 continue
763
764
764 # check the relative constraint on the delta size
765 # check the relative constraint on the delta size
765 revlength = revlog.length(rev)
766 revlength = revlog.length(rev)
766 if revlength < lowestrealisticdeltalen:
767 if revlength < lowestrealisticdeltalen:
767 # delta probable lower bound is larger than target base
768 # delta probable lower bound is larger than target base
768 tested.add(rev)
769 tested.add(rev)
769 continue
770 continue
770
771
771 group.append(rev)
772 group.append(rev)
772 if group:
773 if group:
773 # XXX: in the sparse revlog case, group can become large,
774 # When the size of the candidate group is big, it can result in a
774 # impacting performances. Some bounding or slicing mecanism
775 # quite significant performance impact. To reduce this, we can send
775 # would help to reduce this impact.
776 # them in smaller batches until the new batch does not provide any
776 tested.update(group)
777 # improvements.
777 good = yield tuple(group)
778 #
779 # This might reduce the overall efficiency of the compression in
780 # some corner cases, but that should also prevent very pathological
781 # cases from being an issue. (eg. 20 000 candidates).
782 #
783 # XXX note that the ordering of the group becomes important as it
784 # now impacts the final result. The current order is unprocessed
785 # and can be improved.
786 if group_chunk_size == 0:
787 tested.update(group)
788 good = yield tuple(group)
789 else:
790 prev_good = good
791 for start in range(0, len(group), group_chunk_size):
792 sub_group = group[start : start + group_chunk_size]
793 tested.update(sub_group)
794 good = yield tuple(sub_group)
795 if prev_good == good:
796 break
797
778 yield None
798 yield None
779
799
780
800
781 def _findsnapshots(revlog, cache, start_rev):
801 def _findsnapshots(revlog, cache, start_rev):
782 """find snapshot from start_rev to tip"""
802 """find snapshot from start_rev to tip"""
783 if util.safehasattr(revlog.index, b'findsnapshots'):
803 if util.safehasattr(revlog.index, b'findsnapshots'):
784 revlog.index.findsnapshots(cache, start_rev)
804 revlog.index.findsnapshots(cache, start_rev)
785 else:
805 else:
786 deltaparent = revlog.deltaparent
806 deltaparent = revlog.deltaparent
787 issnapshot = revlog.issnapshot
807 issnapshot = revlog.issnapshot
788 for rev in revlog.revs(start_rev):
808 for rev in revlog.revs(start_rev):
789 if issnapshot(rev):
809 if issnapshot(rev):
790 cache[deltaparent(rev)].append(rev)
810 cache[deltaparent(rev)].append(rev)
791
811
792
812
793 def _refinedgroups(revlog, p1, p2, cachedelta):
813 def _refinedgroups(revlog, p1, p2, cachedelta):
794 good = None
814 good = None
795 # First we try to reuse a the delta contained in the bundle.
815 # First we try to reuse a the delta contained in the bundle.
796 # (or from the source revlog)
816 # (or from the source revlog)
797 #
817 #
798 # This logic only applies to general delta repositories and can be disabled
818 # This logic only applies to general delta repositories and can be disabled
799 # through configuration. Disabling reuse source delta is useful when
819 # through configuration. Disabling reuse source delta is useful when
800 # we want to make sure we recomputed "optimal" deltas.
820 # we want to make sure we recomputed "optimal" deltas.
801 debug_info = None
821 debug_info = None
802 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
822 if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
803 # Assume what we received from the server is a good choice
823 # Assume what we received from the server is a good choice
804 # build delta will reuse the cache
824 # build delta will reuse the cache
805 if debug_info is not None:
825 if debug_info is not None:
806 debug_info['cached-delta.tested'] += 1
826 debug_info['cached-delta.tested'] += 1
807 good = yield (cachedelta[0],)
827 good = yield (cachedelta[0],)
808 if good is not None:
828 if good is not None:
809 if debug_info is not None:
829 if debug_info is not None:
810 debug_info['cached-delta.accepted'] += 1
830 debug_info['cached-delta.accepted'] += 1
811 yield None
831 yield None
812 return
832 return
813 # XXX cache me higher
833 # XXX cache me higher
814 snapshots = collections.defaultdict(list)
834 snapshots = collections.defaultdict(list)
815 groups = _rawgroups(
835 groups = _rawgroups(
816 revlog,
836 revlog,
817 p1,
837 p1,
818 p2,
838 p2,
819 cachedelta,
839 cachedelta,
820 snapshots,
840 snapshots,
821 )
841 )
822 for candidates in groups:
842 for candidates in groups:
823 good = yield candidates
843 good = yield candidates
824 if good is not None:
844 if good is not None:
825 break
845 break
826
846
827 # If sparse revlog is enabled, we can try to refine the available deltas
847 # If sparse revlog is enabled, we can try to refine the available deltas
828 if not revlog._sparserevlog:
848 if not revlog._sparserevlog:
829 yield None
849 yield None
830 return
850 return
831
851
832 # if we have a refinable value, try to refine it
852 # if we have a refinable value, try to refine it
833 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
853 if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
834 # refine snapshot down
854 # refine snapshot down
835 previous = None
855 previous = None
836 while previous != good:
856 while previous != good:
837 previous = good
857 previous = good
838 base = revlog.deltaparent(good)
858 base = revlog.deltaparent(good)
839 if base == nullrev:
859 if base == nullrev:
840 break
860 break
841 good = yield (base,)
861 good = yield (base,)
842 # refine snapshot up
862 # refine snapshot up
843 if not snapshots:
863 if not snapshots:
844 _findsnapshots(revlog, snapshots, good + 1)
864 _findsnapshots(revlog, snapshots, good + 1)
845 previous = None
865 previous = None
846 while good != previous:
866 while good != previous:
847 previous = good
867 previous = good
848 children = tuple(sorted(c for c in snapshots[good]))
868 children = tuple(sorted(c for c in snapshots[good]))
849 good = yield children
869 good = yield children
850
870
851 if debug_info is not None:
871 if debug_info is not None:
852 if good is None:
872 if good is None:
853 debug_info['no-solution'] += 1
873 debug_info['no-solution'] += 1
854
874
855 yield None
875 yield None
856
876
857
877
858 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
878 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
859 """Provides group of revision to be tested as delta base
879 """Provides group of revision to be tested as delta base
860
880
861 This lower level function focus on emitting delta theorically interresting
881 This lower level function focus on emitting delta theorically interresting
862 without looking it any practical details.
882 without looking it any practical details.
863
883
864 The group order aims at providing fast or small candidates first.
884 The group order aims at providing fast or small candidates first.
865 """
885 """
866 gdelta = revlog._generaldelta
886 gdelta = revlog._generaldelta
867 # gate sparse behind general-delta because of issue6056
887 # gate sparse behind general-delta because of issue6056
868 sparse = gdelta and revlog._sparserevlog
888 sparse = gdelta and revlog._sparserevlog
869 curr = len(revlog)
889 curr = len(revlog)
870 prev = curr - 1
890 prev = curr - 1
871 deltachain = lambda rev: revlog._deltachain(rev)[0]
891 deltachain = lambda rev: revlog._deltachain(rev)[0]
872
892
873 if gdelta:
893 if gdelta:
874 # exclude already lazy tested base if any
894 # exclude already lazy tested base if any
875 parents = [p for p in (p1, p2) if p != nullrev]
895 parents = [p for p in (p1, p2) if p != nullrev]
876
896
877 if not revlog._deltabothparents and len(parents) == 2:
897 if not revlog._deltabothparents and len(parents) == 2:
878 parents.sort()
898 parents.sort()
879 # To minimize the chance of having to build a fulltext,
899 # To minimize the chance of having to build a fulltext,
880 # pick first whichever parent is closest to us (max rev)
900 # pick first whichever parent is closest to us (max rev)
881 yield (parents[1],)
901 yield (parents[1],)
882 # then the other one (min rev) if the first did not fit
902 # then the other one (min rev) if the first did not fit
883 yield (parents[0],)
903 yield (parents[0],)
884 elif len(parents) > 0:
904 elif len(parents) > 0:
885 # Test all parents (1 or 2), and keep the best candidate
905 # Test all parents (1 or 2), and keep the best candidate
886 yield parents
906 yield parents
887
907
888 if sparse and parents:
908 if sparse and parents:
889 if snapshots is None:
909 if snapshots is None:
890 # map: base-rev: [snapshot-revs]
910 # map: base-rev: [snapshot-revs]
891 snapshots = collections.defaultdict(list)
911 snapshots = collections.defaultdict(list)
892 # See if we can use an existing snapshot in the parent chains to use as
912 # See if we can use an existing snapshot in the parent chains to use as
893 # a base for a new intermediate-snapshot
913 # a base for a new intermediate-snapshot
894 #
914 #
895 # search for snapshot in parents delta chain
915 # search for snapshot in parents delta chain
896 # map: snapshot-level: snapshot-rev
916 # map: snapshot-level: snapshot-rev
897 parents_snaps = collections.defaultdict(set)
917 parents_snaps = collections.defaultdict(set)
898 candidate_chains = [deltachain(p) for p in parents]
918 candidate_chains = [deltachain(p) for p in parents]
899 for chain in candidate_chains:
919 for chain in candidate_chains:
900 for idx, s in enumerate(chain):
920 for idx, s in enumerate(chain):
901 if not revlog.issnapshot(s):
921 if not revlog.issnapshot(s):
902 break
922 break
903 parents_snaps[idx].add(s)
923 parents_snaps[idx].add(s)
904 snapfloor = min(parents_snaps[0]) + 1
924 snapfloor = min(parents_snaps[0]) + 1
905 _findsnapshots(revlog, snapshots, snapfloor)
925 _findsnapshots(revlog, snapshots, snapfloor)
906 # search for the highest "unrelated" revision
926 # search for the highest "unrelated" revision
907 #
927 #
908 # Adding snapshots used by "unrelated" revision increase the odd we
928 # Adding snapshots used by "unrelated" revision increase the odd we
909 # reuse an independant, yet better snapshot chain.
929 # reuse an independant, yet better snapshot chain.
910 #
930 #
911 # XXX instead of building a set of revisions, we could lazily enumerate
931 # XXX instead of building a set of revisions, we could lazily enumerate
912 # over the chains. That would be more efficient, however we stick to
932 # over the chains. That would be more efficient, however we stick to
913 # simple code for now.
933 # simple code for now.
914 all_revs = set()
934 all_revs = set()
915 for chain in candidate_chains:
935 for chain in candidate_chains:
916 all_revs.update(chain)
936 all_revs.update(chain)
917 other = None
937 other = None
918 for r in revlog.revs(prev, snapfloor):
938 for r in revlog.revs(prev, snapfloor):
919 if r not in all_revs:
939 if r not in all_revs:
920 other = r
940 other = r
921 break
941 break
922 if other is not None:
942 if other is not None:
923 # To avoid unfair competition, we won't use unrelated intermediate
943 # To avoid unfair competition, we won't use unrelated intermediate
924 # snapshot that are deeper than the ones from the parent delta
944 # snapshot that are deeper than the ones from the parent delta
925 # chain.
945 # chain.
926 max_depth = max(parents_snaps.keys())
946 max_depth = max(parents_snaps.keys())
927 chain = deltachain(other)
947 chain = deltachain(other)
928 for depth, s in enumerate(chain):
948 for depth, s in enumerate(chain):
929 if s < snapfloor:
949 if s < snapfloor:
930 continue
950 continue
931 if max_depth < depth:
951 if max_depth < depth:
932 break
952 break
933 if not revlog.issnapshot(s):
953 if not revlog.issnapshot(s):
934 break
954 break
935 parents_snaps[depth].add(s)
955 parents_snaps[depth].add(s)
936 # Test them as possible intermediate snapshot base
956 # Test them as possible intermediate snapshot base
937 # We test them from highest to lowest level. High level one are more
957 # We test them from highest to lowest level. High level one are more
938 # likely to result in small delta
958 # likely to result in small delta
939 floor = None
959 floor = None
940 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
960 for idx, snaps in sorted(parents_snaps.items(), reverse=True):
941 siblings = set()
961 siblings = set()
942 for s in snaps:
962 for s in snaps:
943 siblings.update(snapshots[s])
963 siblings.update(snapshots[s])
944 # Before considering making a new intermediate snapshot, we check
964 # Before considering making a new intermediate snapshot, we check
945 # if an existing snapshot, children of base we consider, would be
965 # if an existing snapshot, children of base we consider, would be
946 # suitable.
966 # suitable.
947 #
967 #
948 # It give a change to reuse a delta chain "unrelated" to the
968 # It give a change to reuse a delta chain "unrelated" to the
949 # current revision instead of starting our own. Without such
969 # current revision instead of starting our own. Without such
950 # re-use, topological branches would keep reopening new chains.
970 # re-use, topological branches would keep reopening new chains.
951 # Creating more and more snapshot as the repository grow.
971 # Creating more and more snapshot as the repository grow.
952
972
953 if floor is not None:
973 if floor is not None:
954 # We only do this for siblings created after the one in our
974 # We only do this for siblings created after the one in our
955 # parent's delta chain. Those created before has less chances
975 # parent's delta chain. Those created before has less chances
956 # to be valid base since our ancestors had to create a new
976 # to be valid base since our ancestors had to create a new
957 # snapshot.
977 # snapshot.
958 siblings = [r for r in siblings if floor < r]
978 siblings = [r for r in siblings if floor < r]
959 yield tuple(sorted(siblings))
979 yield tuple(sorted(siblings))
960 # then test the base from our parent's delta chain.
980 # then test the base from our parent's delta chain.
961 yield tuple(sorted(snaps))
981 yield tuple(sorted(snaps))
962 floor = min(snaps)
982 floor = min(snaps)
963 # No suitable base found in the parent chain, search if any full
983 # No suitable base found in the parent chain, search if any full
964 # snapshots emitted since parent's base would be a suitable base for an
984 # snapshots emitted since parent's base would be a suitable base for an
965 # intermediate snapshot.
985 # intermediate snapshot.
966 #
986 #
967 # It give a chance to reuse a delta chain unrelated to the current
987 # It give a chance to reuse a delta chain unrelated to the current
968 # revisions instead of starting our own. Without such re-use,
988 # revisions instead of starting our own. Without such re-use,
969 # topological branches would keep reopening new full chains. Creating
989 # topological branches would keep reopening new full chains. Creating
970 # more and more snapshot as the repository grow.
990 # more and more snapshot as the repository grow.
971 yield tuple(snapshots[nullrev])
991 yield tuple(snapshots[nullrev])
972
992
973 if not sparse:
993 if not sparse:
974 # other approach failed try against prev to hopefully save us a
994 # other approach failed try against prev to hopefully save us a
975 # fulltext.
995 # fulltext.
976 yield (prev,)
996 yield (prev,)
977
997
978
998
979 class deltacomputer:
999 class deltacomputer:
980 def __init__(
1000 def __init__(
981 self,
1001 self,
982 revlog,
1002 revlog,
983 write_debug=None,
1003 write_debug=None,
984 debug_search=False,
1004 debug_search=False,
985 debug_info=None,
1005 debug_info=None,
986 ):
1006 ):
987 self.revlog = revlog
1007 self.revlog = revlog
988 self._write_debug = write_debug
1008 self._write_debug = write_debug
989 self._debug_search = debug_search
1009 self._debug_search = debug_search
990 self._debug_info = debug_info
1010 self._debug_info = debug_info
991
1011
992 def buildtext(self, revinfo, fh):
1012 def buildtext(self, revinfo, fh):
993 """Builds a fulltext version of a revision
1013 """Builds a fulltext version of a revision
994
1014
995 revinfo: revisioninfo instance that contains all needed info
1015 revinfo: revisioninfo instance that contains all needed info
996 fh: file handle to either the .i or the .d revlog file,
1016 fh: file handle to either the .i or the .d revlog file,
997 depending on whether it is inlined or not
1017 depending on whether it is inlined or not
998 """
1018 """
999 btext = revinfo.btext
1019 btext = revinfo.btext
1000 if btext[0] is not None:
1020 if btext[0] is not None:
1001 return btext[0]
1021 return btext[0]
1002
1022
1003 revlog = self.revlog
1023 revlog = self.revlog
1004 cachedelta = revinfo.cachedelta
1024 cachedelta = revinfo.cachedelta
1005 baserev = cachedelta[0]
1025 baserev = cachedelta[0]
1006 delta = cachedelta[1]
1026 delta = cachedelta[1]
1007
1027
1008 fulltext = btext[0] = _textfromdelta(
1028 fulltext = btext[0] = _textfromdelta(
1009 fh,
1029 fh,
1010 revlog,
1030 revlog,
1011 baserev,
1031 baserev,
1012 delta,
1032 delta,
1013 revinfo.p1,
1033 revinfo.p1,
1014 revinfo.p2,
1034 revinfo.p2,
1015 revinfo.flags,
1035 revinfo.flags,
1016 revinfo.node,
1036 revinfo.node,
1017 )
1037 )
1018 return fulltext
1038 return fulltext
1019
1039
1020 def _builddeltadiff(self, base, revinfo, fh):
1040 def _builddeltadiff(self, base, revinfo, fh):
1021 revlog = self.revlog
1041 revlog = self.revlog
1022 t = self.buildtext(revinfo, fh)
1042 t = self.buildtext(revinfo, fh)
1023 if revlog.iscensored(base):
1043 if revlog.iscensored(base):
1024 # deltas based on a censored revision must replace the
1044 # deltas based on a censored revision must replace the
1025 # full content in one patch, so delta works everywhere
1045 # full content in one patch, so delta works everywhere
1026 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
1046 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
1027 delta = header + t
1047 delta = header + t
1028 else:
1048 else:
1029 ptext = revlog.rawdata(base, _df=fh)
1049 ptext = revlog.rawdata(base, _df=fh)
1030 delta = mdiff.textdiff(ptext, t)
1050 delta = mdiff.textdiff(ptext, t)
1031
1051
1032 return delta
1052 return delta
1033
1053
1034 def _builddeltainfo(self, revinfo, base, fh):
1054 def _builddeltainfo(self, revinfo, base, fh):
1035 # can we use the cached delta?
1055 # can we use the cached delta?
1036 revlog = self.revlog
1056 revlog = self.revlog
1037 debug_search = self._write_debug is not None and self._debug_search
1057 debug_search = self._write_debug is not None and self._debug_search
1038 chainbase = revlog.chainbase(base)
1058 chainbase = revlog.chainbase(base)
1039 if revlog._generaldelta:
1059 if revlog._generaldelta:
1040 deltabase = base
1060 deltabase = base
1041 else:
1061 else:
1042 deltabase = chainbase
1062 deltabase = chainbase
1043 snapshotdepth = None
1063 snapshotdepth = None
1044 if revlog._sparserevlog and deltabase == nullrev:
1064 if revlog._sparserevlog and deltabase == nullrev:
1045 snapshotdepth = 0
1065 snapshotdepth = 0
1046 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
1066 elif revlog._sparserevlog and revlog.issnapshot(deltabase):
1047 # A delta chain should always be one full snapshot,
1067 # A delta chain should always be one full snapshot,
1048 # zero or more semi-snapshots, and zero or more deltas
1068 # zero or more semi-snapshots, and zero or more deltas
1049 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
1069 p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
1050 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
1070 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
1051 snapshotdepth = len(revlog._deltachain(deltabase)[0])
1071 snapshotdepth = len(revlog._deltachain(deltabase)[0])
1052 delta = None
1072 delta = None
1053 if revinfo.cachedelta:
1073 if revinfo.cachedelta:
1054 cachebase, cachediff = revinfo.cachedelta
1074 cachebase, cachediff = revinfo.cachedelta
1055 # check if the diff still apply
1075 # check if the diff still apply
1056 currentbase = cachebase
1076 currentbase = cachebase
1057 while (
1077 while (
1058 currentbase != nullrev
1078 currentbase != nullrev
1059 and currentbase != base
1079 and currentbase != base
1060 and self.revlog.length(currentbase) == 0
1080 and self.revlog.length(currentbase) == 0
1061 ):
1081 ):
1062 currentbase = self.revlog.deltaparent(currentbase)
1082 currentbase = self.revlog.deltaparent(currentbase)
1063 if self.revlog._lazydelta and currentbase == base:
1083 if self.revlog._lazydelta and currentbase == base:
1064 delta = revinfo.cachedelta[1]
1084 delta = revinfo.cachedelta[1]
1065 if delta is None:
1085 if delta is None:
1066 delta = self._builddeltadiff(base, revinfo, fh)
1086 delta = self._builddeltadiff(base, revinfo, fh)
1067 if debug_search:
1087 if debug_search:
1068 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1088 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1069 msg %= len(delta)
1089 msg %= len(delta)
1070 self._write_debug(msg)
1090 self._write_debug(msg)
1071 # snapshotdept need to be neither None nor 0 level snapshot
1091 # snapshotdept need to be neither None nor 0 level snapshot
1072 if revlog.upperboundcomp is not None and snapshotdepth:
1092 if revlog.upperboundcomp is not None and snapshotdepth:
1073 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1093 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1074 snapshotlimit = revinfo.textlen >> snapshotdepth
1094 snapshotlimit = revinfo.textlen >> snapshotdepth
1075 if debug_search:
1095 if debug_search:
1076 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1096 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1077 msg %= lowestrealisticdeltalen
1097 msg %= lowestrealisticdeltalen
1078 self._write_debug(msg)
1098 self._write_debug(msg)
1079 if snapshotlimit < lowestrealisticdeltalen:
1099 if snapshotlimit < lowestrealisticdeltalen:
1080 if debug_search:
1100 if debug_search:
1081 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1101 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1082 self._write_debug(msg)
1102 self._write_debug(msg)
1083 return None
1103 return None
1084 if revlog.length(base) < lowestrealisticdeltalen:
1104 if revlog.length(base) < lowestrealisticdeltalen:
1085 if debug_search:
1105 if debug_search:
1086 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1106 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1087 self._write_debug(msg)
1107 self._write_debug(msg)
1088 return None
1108 return None
1089 header, data = revlog.compress(delta)
1109 header, data = revlog.compress(delta)
1090 deltalen = len(header) + len(data)
1110 deltalen = len(header) + len(data)
1091 offset = revlog.end(len(revlog) - 1)
1111 offset = revlog.end(len(revlog) - 1)
1092 dist = deltalen + offset - revlog.start(chainbase)
1112 dist = deltalen + offset - revlog.start(chainbase)
1093 chainlen, compresseddeltalen = revlog._chaininfo(base)
1113 chainlen, compresseddeltalen = revlog._chaininfo(base)
1094 chainlen += 1
1114 chainlen += 1
1095 compresseddeltalen += deltalen
1115 compresseddeltalen += deltalen
1096
1116
1097 return _deltainfo(
1117 return _deltainfo(
1098 dist,
1118 dist,
1099 deltalen,
1119 deltalen,
1100 (header, data),
1120 (header, data),
1101 deltabase,
1121 deltabase,
1102 chainbase,
1122 chainbase,
1103 chainlen,
1123 chainlen,
1104 compresseddeltalen,
1124 compresseddeltalen,
1105 snapshotdepth,
1125 snapshotdepth,
1106 )
1126 )
1107
1127
1108 def _fullsnapshotinfo(self, fh, revinfo, curr):
1128 def _fullsnapshotinfo(self, fh, revinfo, curr):
1109 rawtext = self.buildtext(revinfo, fh)
1129 rawtext = self.buildtext(revinfo, fh)
1110 data = self.revlog.compress(rawtext)
1130 data = self.revlog.compress(rawtext)
1111 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1131 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
1112 deltabase = chainbase = curr
1132 deltabase = chainbase = curr
1113 snapshotdepth = 0
1133 snapshotdepth = 0
1114 chainlen = 1
1134 chainlen = 1
1115
1135
1116 return _deltainfo(
1136 return _deltainfo(
1117 dist,
1137 dist,
1118 deltalen,
1138 deltalen,
1119 data,
1139 data,
1120 deltabase,
1140 deltabase,
1121 chainbase,
1141 chainbase,
1122 chainlen,
1142 chainlen,
1123 compresseddeltalen,
1143 compresseddeltalen,
1124 snapshotdepth,
1144 snapshotdepth,
1125 )
1145 )
1126
1146
1127 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1147 def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
1128 """Find an acceptable delta against a candidate revision
1148 """Find an acceptable delta against a candidate revision
1129
1149
1130 revinfo: information about the revision (instance of _revisioninfo)
1150 revinfo: information about the revision (instance of _revisioninfo)
1131 fh: file handle to either the .i or the .d revlog file,
1151 fh: file handle to either the .i or the .d revlog file,
1132 depending on whether it is inlined or not
1152 depending on whether it is inlined or not
1133
1153
1134 Returns the first acceptable candidate revision, as ordered by
1154 Returns the first acceptable candidate revision, as ordered by
1135 _candidategroups
1155 _candidategroups
1136
1156
1137 If no suitable deltabase is found, we return delta info for a full
1157 If no suitable deltabase is found, we return delta info for a full
1138 snapshot.
1158 snapshot.
1139
1159
1140 `excluded_bases` is an optional set of revision that cannot be used as
1160 `excluded_bases` is an optional set of revision that cannot be used as
1141 a delta base. Use this to recompute delta suitable in censor or strip
1161 a delta base. Use this to recompute delta suitable in censor or strip
1142 context.
1162 context.
1143 """
1163 """
1144 if target_rev is None:
1164 if target_rev is None:
1145 target_rev = len(self.revlog)
1165 target_rev = len(self.revlog)
1146
1166
1147 if not revinfo.textlen:
1167 if not revinfo.textlen:
1148 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1168 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1149
1169
1150 if excluded_bases is None:
1170 if excluded_bases is None:
1151 excluded_bases = set()
1171 excluded_bases = set()
1152
1172
1153 # no delta for flag processor revision (see "candelta" for why)
1173 # no delta for flag processor revision (see "candelta" for why)
1154 # not calling candelta since only one revision needs test, also to
1174 # not calling candelta since only one revision needs test, also to
1155 # avoid overhead fetching flags again.
1175 # avoid overhead fetching flags again.
1156 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1176 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1157 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1177 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1158
1178
1159 gather_debug = (
1179 gather_debug = (
1160 self._write_debug is not None or self._debug_info is not None
1180 self._write_debug is not None or self._debug_info is not None
1161 )
1181 )
1162 debug_search = self._write_debug is not None and self._debug_search
1182 debug_search = self._write_debug is not None and self._debug_search
1163
1183
1164 if gather_debug:
1184 if gather_debug:
1165 start = util.timer()
1185 start = util.timer()
1166
1186
1167 # count the number of different delta we tried (for debug purpose)
1187 # count the number of different delta we tried (for debug purpose)
1168 dbg_try_count = 0
1188 dbg_try_count = 0
1169 # count the number of "search round" we did. (for debug purpose)
1189 # count the number of "search round" we did. (for debug purpose)
1170 dbg_try_rounds = 0
1190 dbg_try_rounds = 0
1171 dbg_type = b'unknown'
1191 dbg_type = b'unknown'
1172
1192
1173 cachedelta = revinfo.cachedelta
1193 cachedelta = revinfo.cachedelta
1174 p1 = revinfo.p1
1194 p1 = revinfo.p1
1175 p2 = revinfo.p2
1195 p2 = revinfo.p2
1176 revlog = self.revlog
1196 revlog = self.revlog
1177
1197
1178 deltainfo = None
1198 deltainfo = None
1179 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1199 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1180
1200
1181 if gather_debug:
1201 if gather_debug:
1182 if p1r != nullrev:
1202 if p1r != nullrev:
1183 p1_chain_len = revlog._chaininfo(p1r)[0]
1203 p1_chain_len = revlog._chaininfo(p1r)[0]
1184 else:
1204 else:
1185 p1_chain_len = -1
1205 p1_chain_len = -1
1186 if p2r != nullrev:
1206 if p2r != nullrev:
1187 p2_chain_len = revlog._chaininfo(p2r)[0]
1207 p2_chain_len = revlog._chaininfo(p2r)[0]
1188 else:
1208 else:
1189 p2_chain_len = -1
1209 p2_chain_len = -1
1190 if debug_search:
1210 if debug_search:
1191 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1211 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1192 msg %= target_rev
1212 msg %= target_rev
1193 self._write_debug(msg)
1213 self._write_debug(msg)
1194
1214
1195 groups = _candidategroups(
1215 groups = _candidategroups(
1196 self.revlog,
1216 self.revlog,
1197 revinfo.textlen,
1217 revinfo.textlen,
1198 p1r,
1218 p1r,
1199 p2r,
1219 p2r,
1200 cachedelta,
1220 cachedelta,
1201 excluded_bases,
1221 excluded_bases,
1202 target_rev,
1222 target_rev,
1203 )
1223 )
1204 candidaterevs = next(groups)
1224 candidaterevs = next(groups)
1205 while candidaterevs is not None:
1225 while candidaterevs is not None:
1206 dbg_try_rounds += 1
1226 dbg_try_rounds += 1
1207 if debug_search:
1227 if debug_search:
1208 prev = None
1228 prev = None
1209 if deltainfo is not None:
1229 if deltainfo is not None:
1210 prev = deltainfo.base
1230 prev = deltainfo.base
1211
1231
1212 if (
1232 if (
1213 cachedelta is not None
1233 cachedelta is not None
1214 and len(candidaterevs) == 1
1234 and len(candidaterevs) == 1
1215 and cachedelta[0] in candidaterevs
1235 and cachedelta[0] in candidaterevs
1216 ):
1236 ):
1217 round_type = b"cached-delta"
1237 round_type = b"cached-delta"
1218 elif p1 in candidaterevs or p2 in candidaterevs:
1238 elif p1 in candidaterevs or p2 in candidaterevs:
1219 round_type = b"parents"
1239 round_type = b"parents"
1220 elif prev is not None and all(c < prev for c in candidaterevs):
1240 elif prev is not None and all(c < prev for c in candidaterevs):
1221 round_type = b"refine-down"
1241 round_type = b"refine-down"
1222 elif prev is not None and all(c > prev for c in candidaterevs):
1242 elif prev is not None and all(c > prev for c in candidaterevs):
1223 round_type = b"refine-up"
1243 round_type = b"refine-up"
1224 else:
1244 else:
1225 round_type = b"search-down"
1245 round_type = b"search-down"
1226 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1246 msg = b"DBG-DELTAS-SEARCH: ROUND #%d - %d candidates - %s\n"
1227 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1247 msg %= (dbg_try_rounds, len(candidaterevs), round_type)
1228 self._write_debug(msg)
1248 self._write_debug(msg)
1229 nominateddeltas = []
1249 nominateddeltas = []
1230 if deltainfo is not None:
1250 if deltainfo is not None:
1231 if debug_search:
1251 if debug_search:
1232 msg = (
1252 msg = (
1233 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1253 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1234 )
1254 )
1235 msg %= (deltainfo.base, deltainfo.deltalen)
1255 msg %= (deltainfo.base, deltainfo.deltalen)
1236 self._write_debug(msg)
1256 self._write_debug(msg)
1237 # if we already found a good delta,
1257 # if we already found a good delta,
1238 # challenge it against refined candidates
1258 # challenge it against refined candidates
1239 nominateddeltas.append(deltainfo)
1259 nominateddeltas.append(deltainfo)
1240 for candidaterev in candidaterevs:
1260 for candidaterev in candidaterevs:
1241 if debug_search:
1261 if debug_search:
1242 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1262 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1243 msg %= candidaterev
1263 msg %= candidaterev
1244 self._write_debug(msg)
1264 self._write_debug(msg)
1245 candidate_type = None
1265 candidate_type = None
1246 if candidaterev == p1:
1266 if candidaterev == p1:
1247 candidate_type = b"p1"
1267 candidate_type = b"p1"
1248 elif candidaterev == p2:
1268 elif candidaterev == p2:
1249 candidate_type = b"p2"
1269 candidate_type = b"p2"
1250 elif self.revlog.issnapshot(candidaterev):
1270 elif self.revlog.issnapshot(candidaterev):
1251 candidate_type = b"snapshot-%d"
1271 candidate_type = b"snapshot-%d"
1252 candidate_type %= self.revlog.snapshotdepth(
1272 candidate_type %= self.revlog.snapshotdepth(
1253 candidaterev
1273 candidaterev
1254 )
1274 )
1255
1275
1256 if candidate_type is not None:
1276 if candidate_type is not None:
1257 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1277 msg = b"DBG-DELTAS-SEARCH: type=%s\n"
1258 msg %= candidate_type
1278 msg %= candidate_type
1259 self._write_debug(msg)
1279 self._write_debug(msg)
1260 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1280 msg = b"DBG-DELTAS-SEARCH: size=%d\n"
1261 msg %= self.revlog.length(candidaterev)
1281 msg %= self.revlog.length(candidaterev)
1262 self._write_debug(msg)
1282 self._write_debug(msg)
1263 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1283 msg = b"DBG-DELTAS-SEARCH: base=%d\n"
1264 msg %= self.revlog.deltaparent(candidaterev)
1284 msg %= self.revlog.deltaparent(candidaterev)
1265 self._write_debug(msg)
1285 self._write_debug(msg)
1266
1286
1267 dbg_try_count += 1
1287 dbg_try_count += 1
1268
1288
1269 if debug_search:
1289 if debug_search:
1270 delta_start = util.timer()
1290 delta_start = util.timer()
1271 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1291 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
1272 if debug_search:
1292 if debug_search:
1273 delta_end = util.timer()
1293 delta_end = util.timer()
1274 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1294 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1275 msg %= delta_end - delta_start
1295 msg %= delta_end - delta_start
1276 self._write_debug(msg)
1296 self._write_debug(msg)
1277 if candidatedelta is not None:
1297 if candidatedelta is not None:
1278 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1298 if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
1279 if debug_search:
1299 if debug_search:
1280 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1300 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1281 msg %= candidatedelta.deltalen
1301 msg %= candidatedelta.deltalen
1282 self._write_debug(msg)
1302 self._write_debug(msg)
1283 nominateddeltas.append(candidatedelta)
1303 nominateddeltas.append(candidatedelta)
1284 elif debug_search:
1304 elif debug_search:
1285 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1305 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1286 msg %= candidatedelta.deltalen
1306 msg %= candidatedelta.deltalen
1287 self._write_debug(msg)
1307 self._write_debug(msg)
1288 elif debug_search:
1308 elif debug_search:
1289 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1309 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1290 self._write_debug(msg)
1310 self._write_debug(msg)
1291 if nominateddeltas:
1311 if nominateddeltas:
1292 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1312 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
1293 if deltainfo is not None:
1313 if deltainfo is not None:
1294 candidaterevs = groups.send(deltainfo.base)
1314 candidaterevs = groups.send(deltainfo.base)
1295 else:
1315 else:
1296 candidaterevs = next(groups)
1316 candidaterevs = next(groups)
1297
1317
1298 if deltainfo is None:
1318 if deltainfo is None:
1299 dbg_type = b"full"
1319 dbg_type = b"full"
1300 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1320 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1301 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1321 elif deltainfo.snapshotdepth: # pytype: disable=attribute-error
1302 dbg_type = b"snapshot"
1322 dbg_type = b"snapshot"
1303 else:
1323 else:
1304 dbg_type = b"delta"
1324 dbg_type = b"delta"
1305
1325
1306 if gather_debug:
1326 if gather_debug:
1307 end = util.timer()
1327 end = util.timer()
1308 used_cached = (
1328 used_cached = (
1309 cachedelta is not None
1329 cachedelta is not None
1310 and dbg_try_rounds == 1
1330 and dbg_try_rounds == 1
1311 and dbg_try_count == 1
1331 and dbg_try_count == 1
1312 and deltainfo.base == cachedelta[0]
1332 and deltainfo.base == cachedelta[0]
1313 )
1333 )
1314 dbg = {
1334 dbg = {
1315 'duration': end - start,
1335 'duration': end - start,
1316 'revision': target_rev,
1336 'revision': target_rev,
1317 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1337 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1318 'search_round_count': dbg_try_rounds,
1338 'search_round_count': dbg_try_rounds,
1319 'using-cached-base': used_cached,
1339 'using-cached-base': used_cached,
1320 'delta_try_count': dbg_try_count,
1340 'delta_try_count': dbg_try_count,
1321 'type': dbg_type,
1341 'type': dbg_type,
1322 'p1-chain-len': p1_chain_len,
1342 'p1-chain-len': p1_chain_len,
1323 'p2-chain-len': p2_chain_len,
1343 'p2-chain-len': p2_chain_len,
1324 }
1344 }
1325 if (
1345 if (
1326 deltainfo.snapshotdepth # pytype: disable=attribute-error
1346 deltainfo.snapshotdepth # pytype: disable=attribute-error
1327 is not None
1347 is not None
1328 ):
1348 ):
1329 dbg[
1349 dbg[
1330 'snapshot-depth'
1350 'snapshot-depth'
1331 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1351 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1332 else:
1352 else:
1333 dbg['snapshot-depth'] = 0
1353 dbg['snapshot-depth'] = 0
1334 target_revlog = b"UNKNOWN"
1354 target_revlog = b"UNKNOWN"
1335 target_type = self.revlog.target[0]
1355 target_type = self.revlog.target[0]
1336 target_key = self.revlog.target[1]
1356 target_key = self.revlog.target[1]
1337 if target_type == KIND_CHANGELOG:
1357 if target_type == KIND_CHANGELOG:
1338 target_revlog = b'CHANGELOG:'
1358 target_revlog = b'CHANGELOG:'
1339 elif target_type == KIND_MANIFESTLOG:
1359 elif target_type == KIND_MANIFESTLOG:
1340 target_revlog = b'MANIFESTLOG:'
1360 target_revlog = b'MANIFESTLOG:'
1341 if target_key:
1361 if target_key:
1342 target_revlog += b'%s:' % target_key
1362 target_revlog += b'%s:' % target_key
1343 elif target_type == KIND_FILELOG:
1363 elif target_type == KIND_FILELOG:
1344 target_revlog = b'FILELOG:'
1364 target_revlog = b'FILELOG:'
1345 if target_key:
1365 if target_key:
1346 target_revlog += b'%s:' % target_key
1366 target_revlog += b'%s:' % target_key
1347 dbg['target-revlog'] = target_revlog
1367 dbg['target-revlog'] = target_revlog
1348
1368
1349 if self._debug_info is not None:
1369 if self._debug_info is not None:
1350 self._debug_info.append(dbg)
1370 self._debug_info.append(dbg)
1351
1371
1352 if self._write_debug is not None:
1372 if self._write_debug is not None:
1353 msg = (
1373 msg = (
1354 b"DBG-DELTAS:"
1374 b"DBG-DELTAS:"
1355 b" %-12s"
1375 b" %-12s"
1356 b" rev=%d:"
1376 b" rev=%d:"
1357 b" delta-base=%d"
1377 b" delta-base=%d"
1358 b" is-cached=%d"
1378 b" is-cached=%d"
1359 b" - search-rounds=%d"
1379 b" - search-rounds=%d"
1360 b" try-count=%d"
1380 b" try-count=%d"
1361 b" - delta-type=%-6s"
1381 b" - delta-type=%-6s"
1362 b" snap-depth=%d"
1382 b" snap-depth=%d"
1363 b" - p1-chain-length=%d"
1383 b" - p1-chain-length=%d"
1364 b" p2-chain-length=%d"
1384 b" p2-chain-length=%d"
1365 b" - duration=%f"
1385 b" - duration=%f"
1366 b"\n"
1386 b"\n"
1367 )
1387 )
1368 msg %= (
1388 msg %= (
1369 dbg["target-revlog"],
1389 dbg["target-revlog"],
1370 dbg["revision"],
1390 dbg["revision"],
1371 dbg["delta-base"],
1391 dbg["delta-base"],
1372 dbg["using-cached-base"],
1392 dbg["using-cached-base"],
1373 dbg["search_round_count"],
1393 dbg["search_round_count"],
1374 dbg["delta_try_count"],
1394 dbg["delta_try_count"],
1375 dbg["type"],
1395 dbg["type"],
1376 dbg["snapshot-depth"],
1396 dbg["snapshot-depth"],
1377 dbg["p1-chain-len"],
1397 dbg["p1-chain-len"],
1378 dbg["p2-chain-len"],
1398 dbg["p2-chain-len"],
1379 dbg["duration"],
1399 dbg["duration"],
1380 )
1400 )
1381 self._write_debug(msg)
1401 self._write_debug(msg)
1382 return deltainfo
1402 return deltainfo
1383
1403
1384
1404
1385 def delta_compression(default_compression_header, deltainfo):
1405 def delta_compression(default_compression_header, deltainfo):
1386 """return (COMPRESSION_MODE, deltainfo)
1406 """return (COMPRESSION_MODE, deltainfo)
1387
1407
1388 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1408 used by revlog v2+ format to dispatch between PLAIN and DEFAULT
1389 compression.
1409 compression.
1390 """
1410 """
1391 h, d = deltainfo.data
1411 h, d = deltainfo.data
1392 compression_mode = COMP_MODE_INLINE
1412 compression_mode = COMP_MODE_INLINE
1393 if not h and not d:
1413 if not h and not d:
1394 # not data to store at all... declare them uncompressed
1414 # not data to store at all... declare them uncompressed
1395 compression_mode = COMP_MODE_PLAIN
1415 compression_mode = COMP_MODE_PLAIN
1396 elif not h:
1416 elif not h:
1397 t = d[0:1]
1417 t = d[0:1]
1398 if t == b'\0':
1418 if t == b'\0':
1399 compression_mode = COMP_MODE_PLAIN
1419 compression_mode = COMP_MODE_PLAIN
1400 elif t == default_compression_header:
1420 elif t == default_compression_header:
1401 compression_mode = COMP_MODE_DEFAULT
1421 compression_mode = COMP_MODE_DEFAULT
1402 elif h == b'u':
1422 elif h == b'u':
1403 # we have a more efficient way to declare uncompressed
1423 # we have a more efficient way to declare uncompressed
1404 h = b''
1424 h = b''
1405 compression_mode = COMP_MODE_PLAIN
1425 compression_mode = COMP_MODE_PLAIN
1406 deltainfo = drop_u_compression(deltainfo)
1426 deltainfo = drop_u_compression(deltainfo)
1407 return compression_mode, deltainfo
1427 return compression_mode, deltainfo
General Comments 0
You need to be logged in to leave comments. Login now