##// END OF EJS Templates
configitems: use standard "dynamicdefault" approach in edge case...
Raphaël Gomès -
r51652:f0ae403b default
parent child Browse files
Show More
@@ -1,2984 +1,2972 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrites config item '%s.%s'"
25 msg = b"extension '%s' overwrites config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
587 b'debug',
588 b'revlog.debug-delta',
588 b'revlog.debug-delta',
589 default=False,
589 default=False,
590 )
590 )
591 # display extra information about the bundling process
591 # display extra information about the bundling process
592 coreconfigitem(
592 coreconfigitem(
593 b'debug',
593 b'debug',
594 b'bundling-stats',
594 b'bundling-stats',
595 default=False,
595 default=False,
596 )
596 )
597 # display extra information about the unbundling process
597 # display extra information about the unbundling process
598 coreconfigitem(
598 coreconfigitem(
599 b'debug',
599 b'debug',
600 b'unbundling-stats',
600 b'unbundling-stats',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'defaults',
604 b'defaults',
605 b'.*',
605 b'.*',
606 default=None,
606 default=None,
607 generic=True,
607 generic=True,
608 )
608 )
609 coreconfigitem(
609 coreconfigitem(
610 b'devel',
610 b'devel',
611 b'all-warnings',
611 b'all-warnings',
612 default=False,
612 default=False,
613 )
613 )
614 coreconfigitem(
614 coreconfigitem(
615 b'devel',
615 b'devel',
616 b'bundle2.debug',
616 b'bundle2.debug',
617 default=False,
617 default=False,
618 )
618 )
619 # which kind of delta to put in the bundled changegroup. Possible value
619 # which kind of delta to put in the bundled changegroup. Possible value
620 # - '': use default behavior
620 # - '': use default behavior
621 # - p1: force to always use delta against p1
621 # - p1: force to always use delta against p1
622 # - full: force to always use full content
622 # - full: force to always use full content
623 coreconfigitem(
623 coreconfigitem(
624 b'devel',
624 b'devel',
625 b'bundle.delta',
625 b'bundle.delta',
626 default=b'',
626 default=b'',
627 )
627 )
628 coreconfigitem(
628 coreconfigitem(
629 b'devel',
629 b'devel',
630 b'cache-vfs',
630 b'cache-vfs',
631 default=None,
631 default=None,
632 )
632 )
633 coreconfigitem(
633 coreconfigitem(
634 b'devel',
634 b'devel',
635 b'check-locks',
635 b'check-locks',
636 default=False,
636 default=False,
637 )
637 )
638 coreconfigitem(
638 coreconfigitem(
639 b'devel',
639 b'devel',
640 b'check-relroot',
640 b'check-relroot',
641 default=False,
641 default=False,
642 )
642 )
643 # Track copy information for all file, not just "added" one (very slow)
643 # Track copy information for all file, not just "added" one (very slow)
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'copy-tracing.trace-all-files',
646 b'copy-tracing.trace-all-files',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'default-date',
651 b'default-date',
652 default=None,
652 default=None,
653 )
653 )
654 coreconfigitem(
654 coreconfigitem(
655 b'devel',
655 b'devel',
656 b'deprec-warn',
656 b'deprec-warn',
657 default=False,
657 default=False,
658 )
658 )
659 # possible values:
659 # possible values:
660 # - auto (the default)
660 # - auto (the default)
661 # - force-append
661 # - force-append
662 # - force-new
662 # - force-new
663 coreconfigitem(
663 coreconfigitem(
664 b'devel',
664 b'devel',
665 b'dirstate.v2.data_update_mode',
665 b'dirstate.v2.data_update_mode',
666 default="auto",
666 default="auto",
667 )
667 )
668 coreconfigitem(
668 coreconfigitem(
669 b'devel',
669 b'devel',
670 b'disableloaddefaultcerts',
670 b'disableloaddefaultcerts',
671 default=False,
671 default=False,
672 )
672 )
673 coreconfigitem(
673 coreconfigitem(
674 b'devel',
674 b'devel',
675 b'warn-empty-changegroup',
675 b'warn-empty-changegroup',
676 default=False,
676 default=False,
677 )
677 )
678 coreconfigitem(
678 coreconfigitem(
679 b'devel',
679 b'devel',
680 b'legacy.exchange',
680 b'legacy.exchange',
681 default=list,
681 default=list,
682 )
682 )
683 # When True, revlogs use a special reference version of the nodemap, that is not
683 # When True, revlogs use a special reference version of the nodemap, that is not
684 # performant but is "known" to behave properly.
684 # performant but is "known" to behave properly.
685 coreconfigitem(
685 coreconfigitem(
686 b'devel',
686 b'devel',
687 b'persistent-nodemap',
687 b'persistent-nodemap',
688 default=False,
688 default=False,
689 )
689 )
690 coreconfigitem(
690 coreconfigitem(
691 b'devel',
691 b'devel',
692 b'servercafile',
692 b'servercafile',
693 default=b'',
693 default=b'',
694 )
694 )
695 # This config option is intended for use in tests only. It is a giant
695 # This config option is intended for use in tests only. It is a giant
696 # footgun to kill security. Don't define it.
696 # footgun to kill security. Don't define it.
697 coreconfigitem(
697 coreconfigitem(
698 b'devel',
698 b'devel',
699 b'server-insecure-exact-protocol',
699 b'server-insecure-exact-protocol',
700 default=b'',
700 default=b'',
701 )
701 )
702 coreconfigitem(
702 coreconfigitem(
703 b'devel',
703 b'devel',
704 b'serverrequirecert',
704 b'serverrequirecert',
705 default=False,
705 default=False,
706 )
706 )
707 # Makes the status algorithm wait for the existence of this file
707 # Makes the status algorithm wait for the existence of this file
708 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
708 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
709 # seconds) before taking the lock and writing the dirstate.
709 # seconds) before taking the lock and writing the dirstate.
710 # Status signals that it's ready to wait by creating a file
710 # Status signals that it's ready to wait by creating a file
711 # with the same name + `.waiting`.
711 # with the same name + `.waiting`.
712 # Useful when testing race conditions.
712 # Useful when testing race conditions.
713 coreconfigitem(
713 coreconfigitem(
714 b'devel',
714 b'devel',
715 b'sync.status.pre-dirstate-write-file',
715 b'sync.status.pre-dirstate-write-file',
716 default=None,
716 default=None,
717 )
717 )
718 coreconfigitem(
718 coreconfigitem(
719 b'devel',
719 b'devel',
720 b'sync.status.pre-dirstate-write-file-timeout',
720 b'sync.status.pre-dirstate-write-file-timeout',
721 default=2,
721 default=2,
722 )
722 )
723 coreconfigitem(
723 coreconfigitem(
724 b'devel',
724 b'devel',
725 b'sync.dirstate.post-docket-read-file',
725 b'sync.dirstate.post-docket-read-file',
726 default=None,
726 default=None,
727 )
727 )
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'sync.dirstate.post-docket-read-file-timeout',
730 b'sync.dirstate.post-docket-read-file-timeout',
731 default=2,
731 default=2,
732 )
732 )
733 coreconfigitem(
733 coreconfigitem(
734 b'devel',
734 b'devel',
735 b'sync.dirstate.pre-read-file',
735 b'sync.dirstate.pre-read-file',
736 default=None,
736 default=None,
737 )
737 )
738 coreconfigitem(
738 coreconfigitem(
739 b'devel',
739 b'devel',
740 b'sync.dirstate.pre-read-file-timeout',
740 b'sync.dirstate.pre-read-file-timeout',
741 default=2,
741 default=2,
742 )
742 )
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'strip-obsmarkers',
745 b'strip-obsmarkers',
746 default=True,
746 default=True,
747 )
747 )
748 coreconfigitem(
748 coreconfigitem(
749 b'devel',
749 b'devel',
750 b'warn-config',
750 b'warn-config',
751 default=None,
751 default=None,
752 )
752 )
753 coreconfigitem(
753 coreconfigitem(
754 b'devel',
754 b'devel',
755 b'warn-config-default',
755 b'warn-config-default',
756 default=None,
756 default=None,
757 )
757 )
758 coreconfigitem(
758 coreconfigitem(
759 b'devel',
759 b'devel',
760 b'user.obsmarker',
760 b'user.obsmarker',
761 default=None,
761 default=None,
762 )
762 )
763 coreconfigitem(
763 coreconfigitem(
764 b'devel',
764 b'devel',
765 b'warn-config-unknown',
765 b'warn-config-unknown',
766 default=None,
766 default=None,
767 )
767 )
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'debug.copies',
770 b'debug.copies',
771 default=False,
771 default=False,
772 )
772 )
773 coreconfigitem(
773 coreconfigitem(
774 b'devel',
774 b'devel',
775 b'copy-tracing.multi-thread',
775 b'copy-tracing.multi-thread',
776 default=True,
776 default=True,
777 )
777 )
778 coreconfigitem(
778 coreconfigitem(
779 b'devel',
779 b'devel',
780 b'debug.extensions',
780 b'debug.extensions',
781 default=False,
781 default=False,
782 )
782 )
783 coreconfigitem(
783 coreconfigitem(
784 b'devel',
784 b'devel',
785 b'debug.repo-filters',
785 b'debug.repo-filters',
786 default=False,
786 default=False,
787 )
787 )
788 coreconfigitem(
788 coreconfigitem(
789 b'devel',
789 b'devel',
790 b'debug.peer-request',
790 b'debug.peer-request',
791 default=False,
791 default=False,
792 )
792 )
793 # If discovery.exchange-heads is False, the discovery will not start with
793 # If discovery.exchange-heads is False, the discovery will not start with
794 # remote head fetching and local head querying.
794 # remote head fetching and local head querying.
795 coreconfigitem(
795 coreconfigitem(
796 b'devel',
796 b'devel',
797 b'discovery.exchange-heads',
797 b'discovery.exchange-heads',
798 default=True,
798 default=True,
799 )
799 )
800 # If devel.debug.abort-update is True, then any merge with the working copy,
800 # If devel.debug.abort-update is True, then any merge with the working copy,
801 # e.g. [hg update], will be aborted after figuring out what needs to be done,
801 # e.g. [hg update], will be aborted after figuring out what needs to be done,
802 # but before spawning the parallel worker
802 # but before spawning the parallel worker
803 coreconfigitem(
803 coreconfigitem(
804 b'devel',
804 b'devel',
805 b'debug.abort-update',
805 b'debug.abort-update',
806 default=False,
806 default=False,
807 )
807 )
808 # If discovery.grow-sample is False, the sample size used in set discovery will
808 # If discovery.grow-sample is False, the sample size used in set discovery will
809 # not be increased through the process
809 # not be increased through the process
810 coreconfigitem(
810 coreconfigitem(
811 b'devel',
811 b'devel',
812 b'discovery.grow-sample',
812 b'discovery.grow-sample',
813 default=True,
813 default=True,
814 )
814 )
815 # When discovery.grow-sample.dynamic is True, the default, the sample size is
815 # When discovery.grow-sample.dynamic is True, the default, the sample size is
816 # adapted to the shape of the undecided set (it is set to the max of:
816 # adapted to the shape of the undecided set (it is set to the max of:
817 # <target-size>, len(roots(undecided)), len(heads(undecided)
817 # <target-size>, len(roots(undecided)), len(heads(undecided)
818 coreconfigitem(
818 coreconfigitem(
819 b'devel',
819 b'devel',
820 b'discovery.grow-sample.dynamic',
820 b'discovery.grow-sample.dynamic',
821 default=True,
821 default=True,
822 )
822 )
823 # discovery.grow-sample.rate control the rate at which the sample grow
823 # discovery.grow-sample.rate control the rate at which the sample grow
824 coreconfigitem(
824 coreconfigitem(
825 b'devel',
825 b'devel',
826 b'discovery.grow-sample.rate',
826 b'discovery.grow-sample.rate',
827 default=1.05,
827 default=1.05,
828 )
828 )
829 # If discovery.randomize is False, random sampling during discovery are
829 # If discovery.randomize is False, random sampling during discovery are
830 # deterministic. It is meant for integration tests.
830 # deterministic. It is meant for integration tests.
831 coreconfigitem(
831 coreconfigitem(
832 b'devel',
832 b'devel',
833 b'discovery.randomize',
833 b'discovery.randomize',
834 default=True,
834 default=True,
835 )
835 )
836 # Control the initial size of the discovery sample
836 # Control the initial size of the discovery sample
837 coreconfigitem(
837 coreconfigitem(
838 b'devel',
838 b'devel',
839 b'discovery.sample-size',
839 b'discovery.sample-size',
840 default=200,
840 default=200,
841 )
841 )
842 # Control the initial size of the discovery for initial change
842 # Control the initial size of the discovery for initial change
843 coreconfigitem(
843 coreconfigitem(
844 b'devel',
844 b'devel',
845 b'discovery.sample-size.initial',
845 b'discovery.sample-size.initial',
846 default=100,
846 default=100,
847 )
847 )
848 _registerdiffopts(section=b'diff')
848 _registerdiffopts(section=b'diff')
849 coreconfigitem(
849 coreconfigitem(
850 b'diff',
850 b'diff',
851 b'merge',
851 b'merge',
852 default=False,
852 default=False,
853 experimental=True,
853 experimental=True,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'email',
856 b'email',
857 b'bcc',
857 b'bcc',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'email',
861 b'email',
862 b'cc',
862 b'cc',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'email',
866 b'email',
867 b'charsets',
867 b'charsets',
868 default=list,
868 default=list,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'email',
871 b'email',
872 b'from',
872 b'from',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'email',
876 b'email',
877 b'method',
877 b'method',
878 default=b'smtp',
878 default=b'smtp',
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'email',
881 b'email',
882 b'reply-to',
882 b'reply-to',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'email',
886 b'email',
887 b'to',
887 b'to',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'archivemetatemplate',
892 b'archivemetatemplate',
893 default=dynamicdefault,
893 default=dynamicdefault,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'auto-publish',
897 b'auto-publish',
898 default=b'publish',
898 default=b'publish',
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'bundle-phases',
902 b'bundle-phases',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'bundle2-advertise',
907 b'bundle2-advertise',
908 default=True,
908 default=True,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'bundle2-output-capture',
912 b'bundle2-output-capture',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'bundle2.pushback',
917 b'bundle2.pushback',
918 default=False,
918 default=False,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'bundle2lazylocking',
922 b'bundle2lazylocking',
923 default=False,
923 default=False,
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'bundlecomplevel',
927 b'bundlecomplevel',
928 default=None,
928 default=None,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'bundlecomplevel.bzip2',
932 b'bundlecomplevel.bzip2',
933 default=None,
933 default=None,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'bundlecomplevel.gzip',
937 b'bundlecomplevel.gzip',
938 default=None,
938 default=None,
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'bundlecomplevel.none',
942 b'bundlecomplevel.none',
943 default=None,
943 default=None,
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'bundlecomplevel.zstd',
947 b'bundlecomplevel.zstd',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'bundlecompthreads',
952 b'bundlecompthreads',
953 default=None,
953 default=None,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'bundlecompthreads.bzip2',
957 b'bundlecompthreads.bzip2',
958 default=None,
958 default=None,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'bundlecompthreads.gzip',
962 b'bundlecompthreads.gzip',
963 default=None,
963 default=None,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'bundlecompthreads.none',
967 b'bundlecompthreads.none',
968 default=None,
968 default=None,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'bundlecompthreads.zstd',
972 b'bundlecompthreads.zstd',
973 default=None,
973 default=None,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'changegroup3',
977 b'changegroup3',
978 default=True,
978 default=True,
979 )
979 )
980 coreconfigitem(
980 coreconfigitem(
981 b'experimental',
981 b'experimental',
982 b'changegroup4',
982 b'changegroup4',
983 default=False,
983 default=False,
984 )
984 )
985
985
986 # might remove rank configuration once the computation has no impact
986 # might remove rank configuration once the computation has no impact
987 coreconfigitem(
987 coreconfigitem(
988 b'experimental',
988 b'experimental',
989 b'changelog-v2.compute-rank',
989 b'changelog-v2.compute-rank',
990 default=True,
990 default=True,
991 )
991 )
992 coreconfigitem(
992 coreconfigitem(
993 b'experimental',
993 b'experimental',
994 b'cleanup-as-archived',
994 b'cleanup-as-archived',
995 default=False,
995 default=False,
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'clientcompressionengines',
999 b'clientcompressionengines',
1000 default=list,
1000 default=list,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'copytrace',
1004 b'copytrace',
1005 default=b'on',
1005 default=b'on',
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'copytrace.movecandidateslimit',
1009 b'copytrace.movecandidateslimit',
1010 default=100,
1010 default=100,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'copytrace.sourcecommitlimit',
1014 b'copytrace.sourcecommitlimit',
1015 default=100,
1015 default=100,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'copies.read-from',
1019 b'copies.read-from',
1020 default=b"filelog-only",
1020 default=b"filelog-only",
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'copies.write-to',
1024 b'copies.write-to',
1025 default=b'filelog-only',
1025 default=b'filelog-only',
1026 )
1026 )
1027 coreconfigitem(
1027 coreconfigitem(
1028 b'experimental',
1028 b'experimental',
1029 b'crecordtest',
1029 b'crecordtest',
1030 default=None,
1030 default=None,
1031 )
1031 )
1032 coreconfigitem(
1032 coreconfigitem(
1033 b'experimental',
1033 b'experimental',
1034 b'directaccess',
1034 b'directaccess',
1035 default=False,
1035 default=False,
1036 )
1036 )
1037 coreconfigitem(
1037 coreconfigitem(
1038 b'experimental',
1038 b'experimental',
1039 b'directaccess.revnums',
1039 b'directaccess.revnums',
1040 default=False,
1040 default=False,
1041 )
1041 )
1042 coreconfigitem(
1042 coreconfigitem(
1043 b'experimental',
1043 b'experimental',
1044 b'editortmpinhg',
1044 b'editortmpinhg',
1045 default=False,
1045 default=False,
1046 )
1046 )
1047 coreconfigitem(
1047 coreconfigitem(
1048 b'experimental',
1048 b'experimental',
1049 b'evolution',
1049 b'evolution',
1050 default=list,
1050 default=list,
1051 )
1051 )
1052 coreconfigitem(
1052 coreconfigitem(
1053 b'experimental',
1053 b'experimental',
1054 b'evolution.allowdivergence',
1054 b'evolution.allowdivergence',
1055 default=False,
1055 default=False,
1056 alias=[(b'experimental', b'allowdivergence')],
1056 alias=[(b'experimental', b'allowdivergence')],
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'evolution.allowunstable',
1060 b'evolution.allowunstable',
1061 default=None,
1061 default=None,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'evolution.createmarkers',
1065 b'evolution.createmarkers',
1066 default=None,
1066 default=None,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'evolution.effect-flags',
1070 b'evolution.effect-flags',
1071 default=True,
1071 default=True,
1072 alias=[(b'experimental', b'effect-flags')],
1072 alias=[(b'experimental', b'effect-flags')],
1073 )
1073 )
1074 coreconfigitem(
1074 coreconfigitem(
1075 b'experimental',
1075 b'experimental',
1076 b'evolution.exchange',
1076 b'evolution.exchange',
1077 default=None,
1077 default=None,
1078 )
1078 )
1079 coreconfigitem(
1079 coreconfigitem(
1080 b'experimental',
1080 b'experimental',
1081 b'evolution.bundle-obsmarker',
1081 b'evolution.bundle-obsmarker',
1082 default=False,
1082 default=False,
1083 )
1083 )
1084 coreconfigitem(
1084 coreconfigitem(
1085 b'experimental',
1085 b'experimental',
1086 b'evolution.bundle-obsmarker:mandatory',
1086 b'evolution.bundle-obsmarker:mandatory',
1087 default=True,
1087 default=True,
1088 )
1088 )
1089 coreconfigitem(
1089 coreconfigitem(
1090 b'experimental',
1090 b'experimental',
1091 b'log.topo',
1091 b'log.topo',
1092 default=False,
1092 default=False,
1093 )
1093 )
1094 coreconfigitem(
1094 coreconfigitem(
1095 b'experimental',
1095 b'experimental',
1096 b'evolution.report-instabilities',
1096 b'evolution.report-instabilities',
1097 default=True,
1097 default=True,
1098 )
1098 )
1099 coreconfigitem(
1099 coreconfigitem(
1100 b'experimental',
1100 b'experimental',
1101 b'evolution.track-operation',
1101 b'evolution.track-operation',
1102 default=True,
1102 default=True,
1103 )
1103 )
1104 # repo-level config to exclude a revset visibility
1104 # repo-level config to exclude a revset visibility
1105 #
1105 #
1106 # The target use case is to use `share` to expose different subset of the same
1106 # The target use case is to use `share` to expose different subset of the same
1107 # repository, especially server side. See also `server.view`.
1107 # repository, especially server side. See also `server.view`.
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'extra-filter-revs',
1110 b'extra-filter-revs',
1111 default=None,
1111 default=None,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'maxdeltachainspan',
1115 b'maxdeltachainspan',
1116 default=-1,
1116 default=-1,
1117 )
1117 )
1118 # tracks files which were undeleted (merge might delete them but we explicitly
1118 # tracks files which were undeleted (merge might delete them but we explicitly
1119 # kept/undeleted them) and creates new filenodes for them
1119 # kept/undeleted them) and creates new filenodes for them
1120 coreconfigitem(
1120 coreconfigitem(
1121 b'experimental',
1121 b'experimental',
1122 b'merge-track-salvaged',
1122 b'merge-track-salvaged',
1123 default=False,
1123 default=False,
1124 )
1124 )
1125 coreconfigitem(
1125 coreconfigitem(
1126 b'experimental',
1126 b'experimental',
1127 b'mmapindexthreshold',
1127 b'mmapindexthreshold',
1128 default=None,
1128 default=None,
1129 )
1129 )
1130 coreconfigitem(
1130 coreconfigitem(
1131 b'experimental',
1131 b'experimental',
1132 b'narrow',
1132 b'narrow',
1133 default=False,
1133 default=False,
1134 )
1134 )
1135 coreconfigitem(
1135 coreconfigitem(
1136 b'experimental',
1136 b'experimental',
1137 b'nonnormalparanoidcheck',
1137 b'nonnormalparanoidcheck',
1138 default=False,
1138 default=False,
1139 )
1139 )
1140 coreconfigitem(
1140 coreconfigitem(
1141 b'experimental',
1141 b'experimental',
1142 b'exportableenviron',
1142 b'exportableenviron',
1143 default=list,
1143 default=list,
1144 )
1144 )
1145 coreconfigitem(
1145 coreconfigitem(
1146 b'experimental',
1146 b'experimental',
1147 b'extendedheader.index',
1147 b'extendedheader.index',
1148 default=None,
1148 default=None,
1149 )
1149 )
1150 coreconfigitem(
1150 coreconfigitem(
1151 b'experimental',
1151 b'experimental',
1152 b'extendedheader.similarity',
1152 b'extendedheader.similarity',
1153 default=False,
1153 default=False,
1154 )
1154 )
1155 coreconfigitem(
1155 coreconfigitem(
1156 b'experimental',
1156 b'experimental',
1157 b'graphshorten',
1157 b'graphshorten',
1158 default=False,
1158 default=False,
1159 )
1159 )
1160 coreconfigitem(
1160 coreconfigitem(
1161 b'experimental',
1161 b'experimental',
1162 b'graphstyle.parent',
1162 b'graphstyle.parent',
1163 default=dynamicdefault,
1163 default=dynamicdefault,
1164 )
1164 )
1165 coreconfigitem(
1165 coreconfigitem(
1166 b'experimental',
1166 b'experimental',
1167 b'graphstyle.missing',
1167 b'graphstyle.missing',
1168 default=dynamicdefault,
1168 default=dynamicdefault,
1169 )
1169 )
1170 coreconfigitem(
1170 coreconfigitem(
1171 b'experimental',
1171 b'experimental',
1172 b'graphstyle.grandparent',
1172 b'graphstyle.grandparent',
1173 default=dynamicdefault,
1173 default=dynamicdefault,
1174 )
1174 )
1175 coreconfigitem(
1175 coreconfigitem(
1176 b'experimental',
1176 b'experimental',
1177 b'hook-track-tags',
1177 b'hook-track-tags',
1178 default=False,
1178 default=False,
1179 )
1179 )
1180 coreconfigitem(
1180 coreconfigitem(
1181 b'experimental',
1181 b'experimental',
1182 b'httppostargs',
1182 b'httppostargs',
1183 default=False,
1183 default=False,
1184 )
1184 )
1185 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1185 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1186 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1186 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1187
1187
1188 coreconfigitem(
1188 coreconfigitem(
1189 b'experimental',
1189 b'experimental',
1190 b'obsmarkers-exchange-debug',
1190 b'obsmarkers-exchange-debug',
1191 default=False,
1191 default=False,
1192 )
1192 )
1193 coreconfigitem(
1193 coreconfigitem(
1194 b'experimental',
1194 b'experimental',
1195 b'remotenames',
1195 b'remotenames',
1196 default=False,
1196 default=False,
1197 )
1197 )
1198 coreconfigitem(
1198 coreconfigitem(
1199 b'experimental',
1199 b'experimental',
1200 b'removeemptydirs',
1200 b'removeemptydirs',
1201 default=True,
1201 default=True,
1202 )
1202 )
1203 coreconfigitem(
1203 coreconfigitem(
1204 b'experimental',
1204 b'experimental',
1205 b'revert.interactive.select-to-keep',
1205 b'revert.interactive.select-to-keep',
1206 default=False,
1206 default=False,
1207 )
1207 )
1208 coreconfigitem(
1208 coreconfigitem(
1209 b'experimental',
1209 b'experimental',
1210 b'revisions.prefixhexnode',
1210 b'revisions.prefixhexnode',
1211 default=False,
1211 default=False,
1212 )
1212 )
1213 # "out of experimental" todo list.
1213 # "out of experimental" todo list.
1214 #
1214 #
1215 # * include management of a persistent nodemap in the main docket
1215 # * include management of a persistent nodemap in the main docket
1216 # * enforce a "no-truncate" policy for mmap safety
1216 # * enforce a "no-truncate" policy for mmap safety
1217 # - for censoring operation
1217 # - for censoring operation
1218 # - for stripping operation
1218 # - for stripping operation
1219 # - for rollback operation
1219 # - for rollback operation
1220 # * proper streaming (race free) of the docket file
1220 # * proper streaming (race free) of the docket file
1221 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1221 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1222 # * Exchange-wise, we will also need to do something more efficient than
1222 # * Exchange-wise, we will also need to do something more efficient than
1223 # keeping references to the affected revlogs, especially memory-wise when
1223 # keeping references to the affected revlogs, especially memory-wise when
1224 # rewriting sidedata.
1224 # rewriting sidedata.
1225 # * introduce a proper solution to reduce the number of filelog related files.
1225 # * introduce a proper solution to reduce the number of filelog related files.
1226 # * use caching for reading sidedata (similar to what we do for data).
1226 # * use caching for reading sidedata (similar to what we do for data).
1227 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1227 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1228 # * Improvement to consider
1228 # * Improvement to consider
1229 # - avoid compression header in chunk using the default compression?
1229 # - avoid compression header in chunk using the default compression?
1230 # - forbid "inline" compression mode entirely?
1230 # - forbid "inline" compression mode entirely?
1231 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1231 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1232 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1232 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1233 # - keep track of chain base or size (probably not that useful anymore)
1233 # - keep track of chain base or size (probably not that useful anymore)
1234 coreconfigitem(
1234 coreconfigitem(
1235 b'experimental',
1235 b'experimental',
1236 b'revlogv2',
1236 b'revlogv2',
1237 default=None,
1237 default=None,
1238 )
1238 )
1239 coreconfigitem(
1239 coreconfigitem(
1240 b'experimental',
1240 b'experimental',
1241 b'revisions.disambiguatewithin',
1241 b'revisions.disambiguatewithin',
1242 default=None,
1242 default=None,
1243 )
1243 )
1244 coreconfigitem(
1244 coreconfigitem(
1245 b'experimental',
1245 b'experimental',
1246 b'rust.index',
1246 b'rust.index',
1247 default=False,
1247 default=False,
1248 )
1248 )
1249 coreconfigitem(
1249 coreconfigitem(
1250 b'experimental',
1250 b'experimental',
1251 b'server.allow-hidden-access',
1251 b'server.allow-hidden-access',
1252 default=list,
1252 default=list,
1253 )
1253 )
1254 coreconfigitem(
1254 coreconfigitem(
1255 b'experimental',
1255 b'experimental',
1256 b'server.filesdata.recommended-batch-size',
1256 b'server.filesdata.recommended-batch-size',
1257 default=50000,
1257 default=50000,
1258 )
1258 )
1259 coreconfigitem(
1259 coreconfigitem(
1260 b'experimental',
1260 b'experimental',
1261 b'server.manifestdata.recommended-batch-size',
1261 b'server.manifestdata.recommended-batch-size',
1262 default=100000,
1262 default=100000,
1263 )
1263 )
1264 coreconfigitem(
1264 coreconfigitem(
1265 b'experimental',
1265 b'experimental',
1266 b'server.stream-narrow-clones',
1266 b'server.stream-narrow-clones',
1267 default=False,
1267 default=False,
1268 )
1268 )
1269 coreconfigitem(
1269 coreconfigitem(
1270 b'experimental',
1270 b'experimental',
1271 b'single-head-per-branch',
1271 b'single-head-per-branch',
1272 default=False,
1272 default=False,
1273 )
1273 )
1274 coreconfigitem(
1274 coreconfigitem(
1275 b'experimental',
1275 b'experimental',
1276 b'single-head-per-branch:account-closed-heads',
1276 b'single-head-per-branch:account-closed-heads',
1277 default=False,
1277 default=False,
1278 )
1278 )
1279 coreconfigitem(
1279 coreconfigitem(
1280 b'experimental',
1280 b'experimental',
1281 b'single-head-per-branch:public-changes-only',
1281 b'single-head-per-branch:public-changes-only',
1282 default=False,
1282 default=False,
1283 )
1283 )
1284 coreconfigitem(
1284 coreconfigitem(
1285 b'experimental',
1285 b'experimental',
1286 b'sparse-read',
1286 b'sparse-read',
1287 default=False,
1287 default=False,
1288 )
1288 )
1289 coreconfigitem(
1289 coreconfigitem(
1290 b'experimental',
1290 b'experimental',
1291 b'sparse-read.density-threshold',
1291 b'sparse-read.density-threshold',
1292 default=0.50,
1292 default=0.50,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'experimental',
1295 b'experimental',
1296 b'sparse-read.min-gap-size',
1296 b'sparse-read.min-gap-size',
1297 default=b'65K',
1297 default=b'65K',
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'experimental',
1300 b'experimental',
1301 b'stream-v3',
1301 b'stream-v3',
1302 default=False,
1302 default=False,
1303 )
1303 )
1304 coreconfigitem(
1304 coreconfigitem(
1305 b'experimental',
1305 b'experimental',
1306 b'treemanifest',
1306 b'treemanifest',
1307 default=False,
1307 default=False,
1308 )
1308 )
1309 coreconfigitem(
1309 coreconfigitem(
1310 b'experimental',
1310 b'experimental',
1311 b'update.atomic-file',
1311 b'update.atomic-file',
1312 default=False,
1312 default=False,
1313 )
1313 )
1314 coreconfigitem(
1314 coreconfigitem(
1315 b'experimental',
1315 b'experimental',
1316 b'web.full-garbage-collection-rate',
1316 b'web.full-garbage-collection-rate',
1317 default=1, # still forcing a full collection on each request
1317 default=1, # still forcing a full collection on each request
1318 )
1318 )
1319 coreconfigitem(
1319 coreconfigitem(
1320 b'experimental',
1320 b'experimental',
1321 b'worker.wdir-get-thread-safe',
1321 b'worker.wdir-get-thread-safe',
1322 default=False,
1322 default=False,
1323 )
1323 )
1324 coreconfigitem(
1324 coreconfigitem(
1325 b'experimental',
1325 b'experimental',
1326 b'worker.repository-upgrade',
1326 b'worker.repository-upgrade',
1327 default=False,
1327 default=False,
1328 )
1328 )
1329 coreconfigitem(
1329 coreconfigitem(
1330 b'experimental',
1330 b'experimental',
1331 b'xdiff',
1331 b'xdiff',
1332 default=False,
1332 default=False,
1333 )
1333 )
1334 coreconfigitem(
1334 coreconfigitem(
1335 b'extensions',
1335 b'extensions',
1336 b'[^:]*',
1336 b'[^:]*',
1337 default=None,
1337 default=None,
1338 generic=True,
1338 generic=True,
1339 )
1339 )
1340 coreconfigitem(
1340 coreconfigitem(
1341 b'extensions',
1341 b'extensions',
1342 b'[^:]*:required',
1342 b'[^:]*:required',
1343 default=False,
1343 default=False,
1344 generic=True,
1344 generic=True,
1345 )
1345 )
1346 coreconfigitem(
1346 coreconfigitem(
1347 b'extdata',
1347 b'extdata',
1348 b'.*',
1348 b'.*',
1349 default=None,
1349 default=None,
1350 generic=True,
1350 generic=True,
1351 )
1351 )
1352 coreconfigitem(
1352 coreconfigitem(
1353 b'format',
1353 b'format',
1354 b'bookmarks-in-store',
1354 b'bookmarks-in-store',
1355 default=False,
1355 default=False,
1356 )
1356 )
1357 coreconfigitem(
1357 coreconfigitem(
1358 b'format',
1358 b'format',
1359 b'chunkcachesize',
1359 b'chunkcachesize',
1360 default=None,
1360 default=None,
1361 experimental=True,
1361 experimental=True,
1362 )
1362 )
1363 coreconfigitem(
1363 coreconfigitem(
1364 # Enable this dirstate format *when creating a new repository*.
1364 # Enable this dirstate format *when creating a new repository*.
1365 # Which format to use for existing repos is controlled by .hg/requires
1365 # Which format to use for existing repos is controlled by .hg/requires
1366 b'format',
1366 b'format',
1367 b'use-dirstate-v2',
1367 b'use-dirstate-v2',
1368 default=False,
1368 default=False,
1369 experimental=True,
1369 experimental=True,
1370 alias=[(b'format', b'exp-rc-dirstate-v2')],
1370 alias=[(b'format', b'exp-rc-dirstate-v2')],
1371 )
1371 )
1372 coreconfigitem(
1372 coreconfigitem(
1373 b'format',
1373 b'format',
1374 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1374 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1375 default=False,
1375 default=False,
1376 experimental=True,
1376 experimental=True,
1377 )
1377 )
1378 coreconfigitem(
1378 coreconfigitem(
1379 b'format',
1379 b'format',
1380 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1380 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1381 default=False,
1381 default=False,
1382 experimental=True,
1382 experimental=True,
1383 )
1383 )
1384 coreconfigitem(
1384 coreconfigitem(
1385 b'format',
1385 b'format',
1386 b'use-dirstate-tracked-hint',
1386 b'use-dirstate-tracked-hint',
1387 default=False,
1387 default=False,
1388 experimental=True,
1388 experimental=True,
1389 )
1389 )
1390 coreconfigitem(
1390 coreconfigitem(
1391 b'format',
1391 b'format',
1392 b'use-dirstate-tracked-hint.version',
1392 b'use-dirstate-tracked-hint.version',
1393 default=1,
1393 default=1,
1394 experimental=True,
1394 experimental=True,
1395 )
1395 )
1396 coreconfigitem(
1396 coreconfigitem(
1397 b'format',
1397 b'format',
1398 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1398 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1399 default=False,
1399 default=False,
1400 experimental=True,
1400 experimental=True,
1401 )
1401 )
1402 coreconfigitem(
1402 coreconfigitem(
1403 b'format',
1403 b'format',
1404 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1404 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1405 default=False,
1405 default=False,
1406 experimental=True,
1406 experimental=True,
1407 )
1407 )
1408 coreconfigitem(
1408 coreconfigitem(
1409 b'format',
1409 b'format',
1410 b'dotencode',
1410 b'dotencode',
1411 default=True,
1411 default=True,
1412 )
1412 )
1413 coreconfigitem(
1413 coreconfigitem(
1414 b'format',
1414 b'format',
1415 b'generaldelta',
1415 b'generaldelta',
1416 default=False,
1416 default=False,
1417 experimental=True,
1417 experimental=True,
1418 )
1418 )
1419 coreconfigitem(
1419 coreconfigitem(
1420 b'format',
1420 b'format',
1421 b'manifestcachesize',
1421 b'manifestcachesize',
1422 default=None,
1422 default=None,
1423 experimental=True,
1423 experimental=True,
1424 )
1424 )
1425 coreconfigitem(
1425 coreconfigitem(
1426 b'format',
1426 b'format',
1427 b'maxchainlen',
1427 b'maxchainlen',
1428 default=dynamicdefault,
1428 default=dynamicdefault,
1429 experimental=True,
1429 experimental=True,
1430 )
1430 )
1431 coreconfigitem(
1431 coreconfigitem(
1432 b'format',
1432 b'format',
1433 b'obsstore-version',
1433 b'obsstore-version',
1434 default=None,
1434 default=None,
1435 )
1435 )
1436 coreconfigitem(
1436 coreconfigitem(
1437 b'format',
1437 b'format',
1438 b'sparse-revlog',
1438 b'sparse-revlog',
1439 default=True,
1439 default=True,
1440 )
1440 )
1441 coreconfigitem(
1441 coreconfigitem(
1442 b'format',
1442 b'format',
1443 b'revlog-compression',
1443 b'revlog-compression',
1444 default=lambda: [b'zstd', b'zlib'],
1444 default=lambda: [b'zstd', b'zlib'],
1445 alias=[(b'experimental', b'format.compression')],
1445 alias=[(b'experimental', b'format.compression')],
1446 )
1446 )
1447 # Experimental TODOs:
1447 # Experimental TODOs:
1448 #
1448 #
1449 # * Same as for revlogv2 (but for the reduction of the number of files)
1449 # * Same as for revlogv2 (but for the reduction of the number of files)
1450 # * Actually computing the rank of changesets
1450 # * Actually computing the rank of changesets
1451 # * Improvement to investigate
1451 # * Improvement to investigate
1452 # - storing .hgtags fnode
1452 # - storing .hgtags fnode
1453 # - storing branch related identifier
1453 # - storing branch related identifier
1454
1454
1455 coreconfigitem(
1455 coreconfigitem(
1456 b'format',
1456 b'format',
1457 b'exp-use-changelog-v2',
1457 b'exp-use-changelog-v2',
1458 default=None,
1458 default=None,
1459 experimental=True,
1459 experimental=True,
1460 )
1460 )
1461 coreconfigitem(
1461 coreconfigitem(
1462 b'format',
1462 b'format',
1463 b'usefncache',
1463 b'usefncache',
1464 default=True,
1464 default=True,
1465 )
1465 )
1466 coreconfigitem(
1466 coreconfigitem(
1467 b'format',
1467 b'format',
1468 b'usegeneraldelta',
1468 b'usegeneraldelta',
1469 default=True,
1469 default=True,
1470 )
1470 )
1471 coreconfigitem(
1471 coreconfigitem(
1472 b'format',
1472 b'format',
1473 b'usestore',
1473 b'usestore',
1474 default=True,
1474 default=True,
1475 )
1475 )
1476
1477
1478 def _persistent_nodemap_default():
1479 """compute `use-persistent-nodemap` default value
1480
1481 The feature is disabled unless a fast implementation is available.
1482 """
1483 from . import policy
1484
1485 return policy.importrust('revlog') is not None
1486
1487
1488 coreconfigitem(
1476 coreconfigitem(
1489 b'format',
1477 b'format',
1490 b'use-persistent-nodemap',
1478 b'use-persistent-nodemap',
1491 default=_persistent_nodemap_default,
1479 default=dynamicdefault,
1492 )
1480 )
1493 coreconfigitem(
1481 coreconfigitem(
1494 b'format',
1482 b'format',
1495 b'exp-use-copies-side-data-changeset',
1483 b'exp-use-copies-side-data-changeset',
1496 default=False,
1484 default=False,
1497 experimental=True,
1485 experimental=True,
1498 )
1486 )
1499 coreconfigitem(
1487 coreconfigitem(
1500 b'format',
1488 b'format',
1501 b'use-share-safe',
1489 b'use-share-safe',
1502 default=True,
1490 default=True,
1503 )
1491 )
1504 coreconfigitem(
1492 coreconfigitem(
1505 b'format',
1493 b'format',
1506 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1494 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1507 default=False,
1495 default=False,
1508 experimental=True,
1496 experimental=True,
1509 )
1497 )
1510 coreconfigitem(
1498 coreconfigitem(
1511 b'format',
1499 b'format',
1512 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1500 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1513 default=False,
1501 default=False,
1514 experimental=True,
1502 experimental=True,
1515 )
1503 )
1516
1504
1517 # Moving this on by default means we are confident about the scaling of phases.
1505 # Moving this on by default means we are confident about the scaling of phases.
1518 # This is not garanteed to be the case at the time this message is written.
1506 # This is not garanteed to be the case at the time this message is written.
1519 coreconfigitem(
1507 coreconfigitem(
1520 b'format',
1508 b'format',
1521 b'use-internal-phase',
1509 b'use-internal-phase',
1522 default=False,
1510 default=False,
1523 experimental=True,
1511 experimental=True,
1524 )
1512 )
1525 # The interaction between the archived phase and obsolescence markers needs to
1513 # The interaction between the archived phase and obsolescence markers needs to
1526 # be sorted out before wider usage of this are to be considered.
1514 # be sorted out before wider usage of this are to be considered.
1527 #
1515 #
1528 # At the time this message is written, behavior when archiving obsolete
1516 # At the time this message is written, behavior when archiving obsolete
1529 # changeset differ significantly from stripping. As part of stripping, we also
1517 # changeset differ significantly from stripping. As part of stripping, we also
1530 # remove the obsolescence marker associated to the stripped changesets,
1518 # remove the obsolescence marker associated to the stripped changesets,
1531 # revealing the precedecessors changesets when applicable. When archiving, we
1519 # revealing the precedecessors changesets when applicable. When archiving, we
1532 # don't touch the obsolescence markers, keeping everything hidden. This can
1520 # don't touch the obsolescence markers, keeping everything hidden. This can
1533 # result in quite confusing situation for people combining exchanging draft
1521 # result in quite confusing situation for people combining exchanging draft
1534 # with the archived phases. As some markers needed by others may be skipped
1522 # with the archived phases. As some markers needed by others may be skipped
1535 # during exchange.
1523 # during exchange.
1536 coreconfigitem(
1524 coreconfigitem(
1537 b'format',
1525 b'format',
1538 b'exp-archived-phase',
1526 b'exp-archived-phase',
1539 default=False,
1527 default=False,
1540 experimental=True,
1528 experimental=True,
1541 )
1529 )
1542 coreconfigitem(
1530 coreconfigitem(
1543 b'shelve',
1531 b'shelve',
1544 b'store',
1532 b'store',
1545 default=b'internal',
1533 default=b'internal',
1546 experimental=True,
1534 experimental=True,
1547 )
1535 )
1548 coreconfigitem(
1536 coreconfigitem(
1549 b'fsmonitor',
1537 b'fsmonitor',
1550 b'warn_when_unused',
1538 b'warn_when_unused',
1551 default=True,
1539 default=True,
1552 )
1540 )
1553 coreconfigitem(
1541 coreconfigitem(
1554 b'fsmonitor',
1542 b'fsmonitor',
1555 b'warn_update_file_count',
1543 b'warn_update_file_count',
1556 default=50000,
1544 default=50000,
1557 )
1545 )
1558 coreconfigitem(
1546 coreconfigitem(
1559 b'fsmonitor',
1547 b'fsmonitor',
1560 b'warn_update_file_count_rust',
1548 b'warn_update_file_count_rust',
1561 default=400000,
1549 default=400000,
1562 )
1550 )
1563 coreconfigitem(
1551 coreconfigitem(
1564 b'help',
1552 b'help',
1565 br'hidden-command\..*',
1553 br'hidden-command\..*',
1566 default=False,
1554 default=False,
1567 generic=True,
1555 generic=True,
1568 )
1556 )
1569 coreconfigitem(
1557 coreconfigitem(
1570 b'help',
1558 b'help',
1571 br'hidden-topic\..*',
1559 br'hidden-topic\..*',
1572 default=False,
1560 default=False,
1573 generic=True,
1561 generic=True,
1574 )
1562 )
1575 coreconfigitem(
1563 coreconfigitem(
1576 b'hooks',
1564 b'hooks',
1577 b'[^:]*',
1565 b'[^:]*',
1578 default=dynamicdefault,
1566 default=dynamicdefault,
1579 generic=True,
1567 generic=True,
1580 )
1568 )
1581 coreconfigitem(
1569 coreconfigitem(
1582 b'hooks',
1570 b'hooks',
1583 b'.*:run-with-plain',
1571 b'.*:run-with-plain',
1584 default=True,
1572 default=True,
1585 generic=True,
1573 generic=True,
1586 )
1574 )
1587 coreconfigitem(
1575 coreconfigitem(
1588 b'hgweb-paths',
1576 b'hgweb-paths',
1589 b'.*',
1577 b'.*',
1590 default=list,
1578 default=list,
1591 generic=True,
1579 generic=True,
1592 )
1580 )
1593 coreconfigitem(
1581 coreconfigitem(
1594 b'hostfingerprints',
1582 b'hostfingerprints',
1595 b'.*',
1583 b'.*',
1596 default=list,
1584 default=list,
1597 generic=True,
1585 generic=True,
1598 )
1586 )
1599 coreconfigitem(
1587 coreconfigitem(
1600 b'hostsecurity',
1588 b'hostsecurity',
1601 b'ciphers',
1589 b'ciphers',
1602 default=None,
1590 default=None,
1603 )
1591 )
1604 coreconfigitem(
1592 coreconfigitem(
1605 b'hostsecurity',
1593 b'hostsecurity',
1606 b'minimumprotocol',
1594 b'minimumprotocol',
1607 default=dynamicdefault,
1595 default=dynamicdefault,
1608 )
1596 )
1609 coreconfigitem(
1597 coreconfigitem(
1610 b'hostsecurity',
1598 b'hostsecurity',
1611 b'.*:minimumprotocol$',
1599 b'.*:minimumprotocol$',
1612 default=dynamicdefault,
1600 default=dynamicdefault,
1613 generic=True,
1601 generic=True,
1614 )
1602 )
1615 coreconfigitem(
1603 coreconfigitem(
1616 b'hostsecurity',
1604 b'hostsecurity',
1617 b'.*:ciphers$',
1605 b'.*:ciphers$',
1618 default=dynamicdefault,
1606 default=dynamicdefault,
1619 generic=True,
1607 generic=True,
1620 )
1608 )
1621 coreconfigitem(
1609 coreconfigitem(
1622 b'hostsecurity',
1610 b'hostsecurity',
1623 b'.*:fingerprints$',
1611 b'.*:fingerprints$',
1624 default=list,
1612 default=list,
1625 generic=True,
1613 generic=True,
1626 )
1614 )
1627 coreconfigitem(
1615 coreconfigitem(
1628 b'hostsecurity',
1616 b'hostsecurity',
1629 b'.*:verifycertsfile$',
1617 b'.*:verifycertsfile$',
1630 default=None,
1618 default=None,
1631 generic=True,
1619 generic=True,
1632 )
1620 )
1633
1621
1634 coreconfigitem(
1622 coreconfigitem(
1635 b'http_proxy',
1623 b'http_proxy',
1636 b'always',
1624 b'always',
1637 default=False,
1625 default=False,
1638 )
1626 )
1639 coreconfigitem(
1627 coreconfigitem(
1640 b'http_proxy',
1628 b'http_proxy',
1641 b'host',
1629 b'host',
1642 default=None,
1630 default=None,
1643 )
1631 )
1644 coreconfigitem(
1632 coreconfigitem(
1645 b'http_proxy',
1633 b'http_proxy',
1646 b'no',
1634 b'no',
1647 default=list,
1635 default=list,
1648 )
1636 )
1649 coreconfigitem(
1637 coreconfigitem(
1650 b'http_proxy',
1638 b'http_proxy',
1651 b'passwd',
1639 b'passwd',
1652 default=None,
1640 default=None,
1653 )
1641 )
1654 coreconfigitem(
1642 coreconfigitem(
1655 b'http_proxy',
1643 b'http_proxy',
1656 b'user',
1644 b'user',
1657 default=None,
1645 default=None,
1658 )
1646 )
1659
1647
1660 coreconfigitem(
1648 coreconfigitem(
1661 b'http',
1649 b'http',
1662 b'timeout',
1650 b'timeout',
1663 default=None,
1651 default=None,
1664 )
1652 )
1665
1653
1666 coreconfigitem(
1654 coreconfigitem(
1667 b'logtoprocess',
1655 b'logtoprocess',
1668 b'commandexception',
1656 b'commandexception',
1669 default=None,
1657 default=None,
1670 )
1658 )
1671 coreconfigitem(
1659 coreconfigitem(
1672 b'logtoprocess',
1660 b'logtoprocess',
1673 b'commandfinish',
1661 b'commandfinish',
1674 default=None,
1662 default=None,
1675 )
1663 )
1676 coreconfigitem(
1664 coreconfigitem(
1677 b'logtoprocess',
1665 b'logtoprocess',
1678 b'command',
1666 b'command',
1679 default=None,
1667 default=None,
1680 )
1668 )
1681 coreconfigitem(
1669 coreconfigitem(
1682 b'logtoprocess',
1670 b'logtoprocess',
1683 b'develwarn',
1671 b'develwarn',
1684 default=None,
1672 default=None,
1685 )
1673 )
1686 coreconfigitem(
1674 coreconfigitem(
1687 b'logtoprocess',
1675 b'logtoprocess',
1688 b'uiblocked',
1676 b'uiblocked',
1689 default=None,
1677 default=None,
1690 )
1678 )
1691 coreconfigitem(
1679 coreconfigitem(
1692 b'merge',
1680 b'merge',
1693 b'checkunknown',
1681 b'checkunknown',
1694 default=b'abort',
1682 default=b'abort',
1695 )
1683 )
1696 coreconfigitem(
1684 coreconfigitem(
1697 b'merge',
1685 b'merge',
1698 b'checkignored',
1686 b'checkignored',
1699 default=b'abort',
1687 default=b'abort',
1700 )
1688 )
1701 coreconfigitem(
1689 coreconfigitem(
1702 b'experimental',
1690 b'experimental',
1703 b'merge.checkpathconflicts',
1691 b'merge.checkpathconflicts',
1704 default=False,
1692 default=False,
1705 )
1693 )
1706 coreconfigitem(
1694 coreconfigitem(
1707 b'merge',
1695 b'merge',
1708 b'followcopies',
1696 b'followcopies',
1709 default=True,
1697 default=True,
1710 )
1698 )
1711 coreconfigitem(
1699 coreconfigitem(
1712 b'merge',
1700 b'merge',
1713 b'on-failure',
1701 b'on-failure',
1714 default=b'continue',
1702 default=b'continue',
1715 )
1703 )
1716 coreconfigitem(
1704 coreconfigitem(
1717 b'merge',
1705 b'merge',
1718 b'preferancestor',
1706 b'preferancestor',
1719 default=lambda: [b'*'],
1707 default=lambda: [b'*'],
1720 experimental=True,
1708 experimental=True,
1721 )
1709 )
1722 coreconfigitem(
1710 coreconfigitem(
1723 b'merge',
1711 b'merge',
1724 b'strict-capability-check',
1712 b'strict-capability-check',
1725 default=False,
1713 default=False,
1726 )
1714 )
1727 coreconfigitem(
1715 coreconfigitem(
1728 b'merge',
1716 b'merge',
1729 b'disable-partial-tools',
1717 b'disable-partial-tools',
1730 default=False,
1718 default=False,
1731 experimental=True,
1719 experimental=True,
1732 )
1720 )
1733 coreconfigitem(
1721 coreconfigitem(
1734 b'partial-merge-tools',
1722 b'partial-merge-tools',
1735 b'.*',
1723 b'.*',
1736 default=None,
1724 default=None,
1737 generic=True,
1725 generic=True,
1738 experimental=True,
1726 experimental=True,
1739 )
1727 )
1740 coreconfigitem(
1728 coreconfigitem(
1741 b'partial-merge-tools',
1729 b'partial-merge-tools',
1742 br'.*\.patterns',
1730 br'.*\.patterns',
1743 default=dynamicdefault,
1731 default=dynamicdefault,
1744 generic=True,
1732 generic=True,
1745 priority=-1,
1733 priority=-1,
1746 experimental=True,
1734 experimental=True,
1747 )
1735 )
1748 coreconfigitem(
1736 coreconfigitem(
1749 b'partial-merge-tools',
1737 b'partial-merge-tools',
1750 br'.*\.executable$',
1738 br'.*\.executable$',
1751 default=dynamicdefault,
1739 default=dynamicdefault,
1752 generic=True,
1740 generic=True,
1753 priority=-1,
1741 priority=-1,
1754 experimental=True,
1742 experimental=True,
1755 )
1743 )
1756 coreconfigitem(
1744 coreconfigitem(
1757 b'partial-merge-tools',
1745 b'partial-merge-tools',
1758 br'.*\.order',
1746 br'.*\.order',
1759 default=0,
1747 default=0,
1760 generic=True,
1748 generic=True,
1761 priority=-1,
1749 priority=-1,
1762 experimental=True,
1750 experimental=True,
1763 )
1751 )
1764 coreconfigitem(
1752 coreconfigitem(
1765 b'partial-merge-tools',
1753 b'partial-merge-tools',
1766 br'.*\.args',
1754 br'.*\.args',
1767 default=b"$local $base $other",
1755 default=b"$local $base $other",
1768 generic=True,
1756 generic=True,
1769 priority=-1,
1757 priority=-1,
1770 experimental=True,
1758 experimental=True,
1771 )
1759 )
1772 coreconfigitem(
1760 coreconfigitem(
1773 b'partial-merge-tools',
1761 b'partial-merge-tools',
1774 br'.*\.disable',
1762 br'.*\.disable',
1775 default=False,
1763 default=False,
1776 generic=True,
1764 generic=True,
1777 priority=-1,
1765 priority=-1,
1778 experimental=True,
1766 experimental=True,
1779 )
1767 )
1780 coreconfigitem(
1768 coreconfigitem(
1781 b'merge-tools',
1769 b'merge-tools',
1782 b'.*',
1770 b'.*',
1783 default=None,
1771 default=None,
1784 generic=True,
1772 generic=True,
1785 )
1773 )
1786 coreconfigitem(
1774 coreconfigitem(
1787 b'merge-tools',
1775 b'merge-tools',
1788 br'.*\.args$',
1776 br'.*\.args$',
1789 default=b"$local $base $other",
1777 default=b"$local $base $other",
1790 generic=True,
1778 generic=True,
1791 priority=-1,
1779 priority=-1,
1792 )
1780 )
1793 coreconfigitem(
1781 coreconfigitem(
1794 b'merge-tools',
1782 b'merge-tools',
1795 br'.*\.binary$',
1783 br'.*\.binary$',
1796 default=False,
1784 default=False,
1797 generic=True,
1785 generic=True,
1798 priority=-1,
1786 priority=-1,
1799 )
1787 )
1800 coreconfigitem(
1788 coreconfigitem(
1801 b'merge-tools',
1789 b'merge-tools',
1802 br'.*\.check$',
1790 br'.*\.check$',
1803 default=list,
1791 default=list,
1804 generic=True,
1792 generic=True,
1805 priority=-1,
1793 priority=-1,
1806 )
1794 )
1807 coreconfigitem(
1795 coreconfigitem(
1808 b'merge-tools',
1796 b'merge-tools',
1809 br'.*\.checkchanged$',
1797 br'.*\.checkchanged$',
1810 default=False,
1798 default=False,
1811 generic=True,
1799 generic=True,
1812 priority=-1,
1800 priority=-1,
1813 )
1801 )
1814 coreconfigitem(
1802 coreconfigitem(
1815 b'merge-tools',
1803 b'merge-tools',
1816 br'.*\.executable$',
1804 br'.*\.executable$',
1817 default=dynamicdefault,
1805 default=dynamicdefault,
1818 generic=True,
1806 generic=True,
1819 priority=-1,
1807 priority=-1,
1820 )
1808 )
1821 coreconfigitem(
1809 coreconfigitem(
1822 b'merge-tools',
1810 b'merge-tools',
1823 br'.*\.fixeol$',
1811 br'.*\.fixeol$',
1824 default=False,
1812 default=False,
1825 generic=True,
1813 generic=True,
1826 priority=-1,
1814 priority=-1,
1827 )
1815 )
1828 coreconfigitem(
1816 coreconfigitem(
1829 b'merge-tools',
1817 b'merge-tools',
1830 br'.*\.gui$',
1818 br'.*\.gui$',
1831 default=False,
1819 default=False,
1832 generic=True,
1820 generic=True,
1833 priority=-1,
1821 priority=-1,
1834 )
1822 )
1835 coreconfigitem(
1823 coreconfigitem(
1836 b'merge-tools',
1824 b'merge-tools',
1837 br'.*\.mergemarkers$',
1825 br'.*\.mergemarkers$',
1838 default=b'basic',
1826 default=b'basic',
1839 generic=True,
1827 generic=True,
1840 priority=-1,
1828 priority=-1,
1841 )
1829 )
1842 coreconfigitem(
1830 coreconfigitem(
1843 b'merge-tools',
1831 b'merge-tools',
1844 br'.*\.mergemarkertemplate$',
1832 br'.*\.mergemarkertemplate$',
1845 default=dynamicdefault, # take from command-templates.mergemarker
1833 default=dynamicdefault, # take from command-templates.mergemarker
1846 generic=True,
1834 generic=True,
1847 priority=-1,
1835 priority=-1,
1848 )
1836 )
1849 coreconfigitem(
1837 coreconfigitem(
1850 b'merge-tools',
1838 b'merge-tools',
1851 br'.*\.priority$',
1839 br'.*\.priority$',
1852 default=0,
1840 default=0,
1853 generic=True,
1841 generic=True,
1854 priority=-1,
1842 priority=-1,
1855 )
1843 )
1856 coreconfigitem(
1844 coreconfigitem(
1857 b'merge-tools',
1845 b'merge-tools',
1858 br'.*\.premerge$',
1846 br'.*\.premerge$',
1859 default=dynamicdefault,
1847 default=dynamicdefault,
1860 generic=True,
1848 generic=True,
1861 priority=-1,
1849 priority=-1,
1862 )
1850 )
1863 coreconfigitem(
1851 coreconfigitem(
1864 b'merge-tools',
1852 b'merge-tools',
1865 br'.*\.regappend$',
1853 br'.*\.regappend$',
1866 default=b"",
1854 default=b"",
1867 generic=True,
1855 generic=True,
1868 priority=-1,
1856 priority=-1,
1869 )
1857 )
1870 coreconfigitem(
1858 coreconfigitem(
1871 b'merge-tools',
1859 b'merge-tools',
1872 br'.*\.symlink$',
1860 br'.*\.symlink$',
1873 default=False,
1861 default=False,
1874 generic=True,
1862 generic=True,
1875 priority=-1,
1863 priority=-1,
1876 )
1864 )
1877 coreconfigitem(
1865 coreconfigitem(
1878 b'pager',
1866 b'pager',
1879 b'attend-.*',
1867 b'attend-.*',
1880 default=dynamicdefault,
1868 default=dynamicdefault,
1881 generic=True,
1869 generic=True,
1882 )
1870 )
1883 coreconfigitem(
1871 coreconfigitem(
1884 b'pager',
1872 b'pager',
1885 b'ignore',
1873 b'ignore',
1886 default=list,
1874 default=list,
1887 )
1875 )
1888 coreconfigitem(
1876 coreconfigitem(
1889 b'pager',
1877 b'pager',
1890 b'pager',
1878 b'pager',
1891 default=dynamicdefault,
1879 default=dynamicdefault,
1892 )
1880 )
1893 coreconfigitem(
1881 coreconfigitem(
1894 b'patch',
1882 b'patch',
1895 b'eol',
1883 b'eol',
1896 default=b'strict',
1884 default=b'strict',
1897 )
1885 )
1898 coreconfigitem(
1886 coreconfigitem(
1899 b'patch',
1887 b'patch',
1900 b'fuzz',
1888 b'fuzz',
1901 default=2,
1889 default=2,
1902 )
1890 )
1903 coreconfigitem(
1891 coreconfigitem(
1904 b'paths',
1892 b'paths',
1905 b'default',
1893 b'default',
1906 default=None,
1894 default=None,
1907 )
1895 )
1908 coreconfigitem(
1896 coreconfigitem(
1909 b'paths',
1897 b'paths',
1910 b'default-push',
1898 b'default-push',
1911 default=None,
1899 default=None,
1912 )
1900 )
1913 coreconfigitem(
1901 coreconfigitem(
1914 b'paths',
1902 b'paths',
1915 b'[^:]*',
1903 b'[^:]*',
1916 default=None,
1904 default=None,
1917 generic=True,
1905 generic=True,
1918 )
1906 )
1919 coreconfigitem(
1907 coreconfigitem(
1920 b'paths',
1908 b'paths',
1921 b'.*:bookmarks.mode',
1909 b'.*:bookmarks.mode',
1922 default='default',
1910 default='default',
1923 generic=True,
1911 generic=True,
1924 )
1912 )
1925 coreconfigitem(
1913 coreconfigitem(
1926 b'paths',
1914 b'paths',
1927 b'.*:multi-urls',
1915 b'.*:multi-urls',
1928 default=False,
1916 default=False,
1929 generic=True,
1917 generic=True,
1930 )
1918 )
1931 coreconfigitem(
1919 coreconfigitem(
1932 b'paths',
1920 b'paths',
1933 b'.*:pushrev',
1921 b'.*:pushrev',
1934 default=None,
1922 default=None,
1935 generic=True,
1923 generic=True,
1936 )
1924 )
1937 coreconfigitem(
1925 coreconfigitem(
1938 b'paths',
1926 b'paths',
1939 b'.*:pushurl',
1927 b'.*:pushurl',
1940 default=None,
1928 default=None,
1941 generic=True,
1929 generic=True,
1942 )
1930 )
1943 coreconfigitem(
1931 coreconfigitem(
1944 b'paths',
1932 b'paths',
1945 b'.*:pulled-delta-reuse-policy',
1933 b'.*:pulled-delta-reuse-policy',
1946 default=None,
1934 default=None,
1947 generic=True,
1935 generic=True,
1948 )
1936 )
1949 coreconfigitem(
1937 coreconfigitem(
1950 b'phases',
1938 b'phases',
1951 b'checksubrepos',
1939 b'checksubrepos',
1952 default=b'follow',
1940 default=b'follow',
1953 )
1941 )
1954 coreconfigitem(
1942 coreconfigitem(
1955 b'phases',
1943 b'phases',
1956 b'new-commit',
1944 b'new-commit',
1957 default=b'draft',
1945 default=b'draft',
1958 )
1946 )
1959 coreconfigitem(
1947 coreconfigitem(
1960 b'phases',
1948 b'phases',
1961 b'publish',
1949 b'publish',
1962 default=True,
1950 default=True,
1963 )
1951 )
1964 coreconfigitem(
1952 coreconfigitem(
1965 b'profiling',
1953 b'profiling',
1966 b'enabled',
1954 b'enabled',
1967 default=False,
1955 default=False,
1968 )
1956 )
1969 coreconfigitem(
1957 coreconfigitem(
1970 b'profiling',
1958 b'profiling',
1971 b'format',
1959 b'format',
1972 default=b'text',
1960 default=b'text',
1973 )
1961 )
1974 coreconfigitem(
1962 coreconfigitem(
1975 b'profiling',
1963 b'profiling',
1976 b'freq',
1964 b'freq',
1977 default=1000,
1965 default=1000,
1978 )
1966 )
1979 coreconfigitem(
1967 coreconfigitem(
1980 b'profiling',
1968 b'profiling',
1981 b'limit',
1969 b'limit',
1982 default=30,
1970 default=30,
1983 )
1971 )
1984 coreconfigitem(
1972 coreconfigitem(
1985 b'profiling',
1973 b'profiling',
1986 b'nested',
1974 b'nested',
1987 default=0,
1975 default=0,
1988 )
1976 )
1989 coreconfigitem(
1977 coreconfigitem(
1990 b'profiling',
1978 b'profiling',
1991 b'output',
1979 b'output',
1992 default=None,
1980 default=None,
1993 )
1981 )
1994 coreconfigitem(
1982 coreconfigitem(
1995 b'profiling',
1983 b'profiling',
1996 b'showmax',
1984 b'showmax',
1997 default=0.999,
1985 default=0.999,
1998 )
1986 )
1999 coreconfigitem(
1987 coreconfigitem(
2000 b'profiling',
1988 b'profiling',
2001 b'showmin',
1989 b'showmin',
2002 default=dynamicdefault,
1990 default=dynamicdefault,
2003 )
1991 )
2004 coreconfigitem(
1992 coreconfigitem(
2005 b'profiling',
1993 b'profiling',
2006 b'showtime',
1994 b'showtime',
2007 default=True,
1995 default=True,
2008 )
1996 )
2009 coreconfigitem(
1997 coreconfigitem(
2010 b'profiling',
1998 b'profiling',
2011 b'sort',
1999 b'sort',
2012 default=b'inlinetime',
2000 default=b'inlinetime',
2013 )
2001 )
2014 coreconfigitem(
2002 coreconfigitem(
2015 b'profiling',
2003 b'profiling',
2016 b'statformat',
2004 b'statformat',
2017 default=b'hotpath',
2005 default=b'hotpath',
2018 )
2006 )
2019 coreconfigitem(
2007 coreconfigitem(
2020 b'profiling',
2008 b'profiling',
2021 b'time-track',
2009 b'time-track',
2022 default=dynamicdefault,
2010 default=dynamicdefault,
2023 )
2011 )
2024 coreconfigitem(
2012 coreconfigitem(
2025 b'profiling',
2013 b'profiling',
2026 b'type',
2014 b'type',
2027 default=b'stat',
2015 default=b'stat',
2028 )
2016 )
2029 coreconfigitem(
2017 coreconfigitem(
2030 b'progress',
2018 b'progress',
2031 b'assume-tty',
2019 b'assume-tty',
2032 default=False,
2020 default=False,
2033 )
2021 )
2034 coreconfigitem(
2022 coreconfigitem(
2035 b'progress',
2023 b'progress',
2036 b'changedelay',
2024 b'changedelay',
2037 default=1,
2025 default=1,
2038 )
2026 )
2039 coreconfigitem(
2027 coreconfigitem(
2040 b'progress',
2028 b'progress',
2041 b'clear-complete',
2029 b'clear-complete',
2042 default=True,
2030 default=True,
2043 )
2031 )
2044 coreconfigitem(
2032 coreconfigitem(
2045 b'progress',
2033 b'progress',
2046 b'debug',
2034 b'debug',
2047 default=False,
2035 default=False,
2048 )
2036 )
2049 coreconfigitem(
2037 coreconfigitem(
2050 b'progress',
2038 b'progress',
2051 b'delay',
2039 b'delay',
2052 default=3,
2040 default=3,
2053 )
2041 )
2054 coreconfigitem(
2042 coreconfigitem(
2055 b'progress',
2043 b'progress',
2056 b'disable',
2044 b'disable',
2057 default=False,
2045 default=False,
2058 )
2046 )
2059 coreconfigitem(
2047 coreconfigitem(
2060 b'progress',
2048 b'progress',
2061 b'estimateinterval',
2049 b'estimateinterval',
2062 default=60.0,
2050 default=60.0,
2063 )
2051 )
2064 coreconfigitem(
2052 coreconfigitem(
2065 b'progress',
2053 b'progress',
2066 b'format',
2054 b'format',
2067 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
2055 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
2068 )
2056 )
2069 coreconfigitem(
2057 coreconfigitem(
2070 b'progress',
2058 b'progress',
2071 b'refresh',
2059 b'refresh',
2072 default=0.1,
2060 default=0.1,
2073 )
2061 )
2074 coreconfigitem(
2062 coreconfigitem(
2075 b'progress',
2063 b'progress',
2076 b'width',
2064 b'width',
2077 default=dynamicdefault,
2065 default=dynamicdefault,
2078 )
2066 )
2079 coreconfigitem(
2067 coreconfigitem(
2080 b'pull',
2068 b'pull',
2081 b'confirm',
2069 b'confirm',
2082 default=False,
2070 default=False,
2083 )
2071 )
2084 coreconfigitem(
2072 coreconfigitem(
2085 b'push',
2073 b'push',
2086 b'pushvars.server',
2074 b'pushvars.server',
2087 default=False,
2075 default=False,
2088 )
2076 )
2089 coreconfigitem(
2077 coreconfigitem(
2090 b'rewrite',
2078 b'rewrite',
2091 b'backup-bundle',
2079 b'backup-bundle',
2092 default=True,
2080 default=True,
2093 alias=[(b'ui', b'history-editing-backup')],
2081 alias=[(b'ui', b'history-editing-backup')],
2094 )
2082 )
2095 coreconfigitem(
2083 coreconfigitem(
2096 b'rewrite',
2084 b'rewrite',
2097 b'update-timestamp',
2085 b'update-timestamp',
2098 default=False,
2086 default=False,
2099 )
2087 )
2100 coreconfigitem(
2088 coreconfigitem(
2101 b'rewrite',
2089 b'rewrite',
2102 b'empty-successor',
2090 b'empty-successor',
2103 default=b'skip',
2091 default=b'skip',
2104 experimental=True,
2092 experimental=True,
2105 )
2093 )
2106 # experimental as long as format.use-dirstate-v2 is.
2094 # experimental as long as format.use-dirstate-v2 is.
2107 coreconfigitem(
2095 coreconfigitem(
2108 b'storage',
2096 b'storage',
2109 b'dirstate-v2.slow-path',
2097 b'dirstate-v2.slow-path',
2110 default=b"abort",
2098 default=b"abort",
2111 experimental=True,
2099 experimental=True,
2112 )
2100 )
2113 coreconfigitem(
2101 coreconfigitem(
2114 b'storage',
2102 b'storage',
2115 b'new-repo-backend',
2103 b'new-repo-backend',
2116 default=b'revlogv1',
2104 default=b'revlogv1',
2117 experimental=True,
2105 experimental=True,
2118 )
2106 )
2119 coreconfigitem(
2107 coreconfigitem(
2120 b'storage',
2108 b'storage',
2121 b'revlog.optimize-delta-parent-choice',
2109 b'revlog.optimize-delta-parent-choice',
2122 default=True,
2110 default=True,
2123 alias=[(b'format', b'aggressivemergedeltas')],
2111 alias=[(b'format', b'aggressivemergedeltas')],
2124 )
2112 )
2125 coreconfigitem(
2113 coreconfigitem(
2126 b'storage',
2114 b'storage',
2127 b'revlog.delta-parent-search.candidate-group-chunk-size',
2115 b'revlog.delta-parent-search.candidate-group-chunk-size',
2128 default=20,
2116 default=20,
2129 )
2117 )
2130 coreconfigitem(
2118 coreconfigitem(
2131 b'storage',
2119 b'storage',
2132 b'revlog.issue6528.fix-incoming',
2120 b'revlog.issue6528.fix-incoming',
2133 default=True,
2121 default=True,
2134 )
2122 )
2135 # experimental as long as rust is experimental (or a C version is implemented)
2123 # experimental as long as rust is experimental (or a C version is implemented)
2136 coreconfigitem(
2124 coreconfigitem(
2137 b'storage',
2125 b'storage',
2138 b'revlog.persistent-nodemap.mmap',
2126 b'revlog.persistent-nodemap.mmap',
2139 default=True,
2127 default=True,
2140 )
2128 )
2141 # experimental as long as format.use-persistent-nodemap is.
2129 # experimental as long as format.use-persistent-nodemap is.
2142 coreconfigitem(
2130 coreconfigitem(
2143 b'storage',
2131 b'storage',
2144 b'revlog.persistent-nodemap.slow-path',
2132 b'revlog.persistent-nodemap.slow-path',
2145 default=b"abort",
2133 default=b"abort",
2146 )
2134 )
2147
2135
2148 coreconfigitem(
2136 coreconfigitem(
2149 b'storage',
2137 b'storage',
2150 b'revlog.reuse-external-delta',
2138 b'revlog.reuse-external-delta',
2151 default=True,
2139 default=True,
2152 )
2140 )
2153 # This option is True unless `format.generaldelta` is set.
2141 # This option is True unless `format.generaldelta` is set.
2154 coreconfigitem(
2142 coreconfigitem(
2155 b'storage',
2143 b'storage',
2156 b'revlog.reuse-external-delta-parent',
2144 b'revlog.reuse-external-delta-parent',
2157 default=None,
2145 default=None,
2158 )
2146 )
2159 coreconfigitem(
2147 coreconfigitem(
2160 b'storage',
2148 b'storage',
2161 b'revlog.zlib.level',
2149 b'revlog.zlib.level',
2162 default=None,
2150 default=None,
2163 )
2151 )
2164 coreconfigitem(
2152 coreconfigitem(
2165 b'storage',
2153 b'storage',
2166 b'revlog.zstd.level',
2154 b'revlog.zstd.level',
2167 default=None,
2155 default=None,
2168 )
2156 )
2169 coreconfigitem(
2157 coreconfigitem(
2170 b'server',
2158 b'server',
2171 b'bookmarks-pushkey-compat',
2159 b'bookmarks-pushkey-compat',
2172 default=True,
2160 default=True,
2173 )
2161 )
2174 coreconfigitem(
2162 coreconfigitem(
2175 b'server',
2163 b'server',
2176 b'bundle1',
2164 b'bundle1',
2177 default=True,
2165 default=True,
2178 )
2166 )
2179 coreconfigitem(
2167 coreconfigitem(
2180 b'server',
2168 b'server',
2181 b'bundle1gd',
2169 b'bundle1gd',
2182 default=None,
2170 default=None,
2183 )
2171 )
2184 coreconfigitem(
2172 coreconfigitem(
2185 b'server',
2173 b'server',
2186 b'bundle1.pull',
2174 b'bundle1.pull',
2187 default=None,
2175 default=None,
2188 )
2176 )
2189 coreconfigitem(
2177 coreconfigitem(
2190 b'server',
2178 b'server',
2191 b'bundle1gd.pull',
2179 b'bundle1gd.pull',
2192 default=None,
2180 default=None,
2193 )
2181 )
2194 coreconfigitem(
2182 coreconfigitem(
2195 b'server',
2183 b'server',
2196 b'bundle1.push',
2184 b'bundle1.push',
2197 default=None,
2185 default=None,
2198 )
2186 )
2199 coreconfigitem(
2187 coreconfigitem(
2200 b'server',
2188 b'server',
2201 b'bundle1gd.push',
2189 b'bundle1gd.push',
2202 default=None,
2190 default=None,
2203 )
2191 )
2204 coreconfigitem(
2192 coreconfigitem(
2205 b'server',
2193 b'server',
2206 b'bundle2.stream',
2194 b'bundle2.stream',
2207 default=True,
2195 default=True,
2208 alias=[(b'experimental', b'bundle2.stream')],
2196 alias=[(b'experimental', b'bundle2.stream')],
2209 )
2197 )
2210 coreconfigitem(
2198 coreconfigitem(
2211 b'server',
2199 b'server',
2212 b'compressionengines',
2200 b'compressionengines',
2213 default=list,
2201 default=list,
2214 )
2202 )
2215 coreconfigitem(
2203 coreconfigitem(
2216 b'server',
2204 b'server',
2217 b'concurrent-push-mode',
2205 b'concurrent-push-mode',
2218 default=b'check-related',
2206 default=b'check-related',
2219 )
2207 )
2220 coreconfigitem(
2208 coreconfigitem(
2221 b'server',
2209 b'server',
2222 b'disablefullbundle',
2210 b'disablefullbundle',
2223 default=False,
2211 default=False,
2224 )
2212 )
2225 coreconfigitem(
2213 coreconfigitem(
2226 b'server',
2214 b'server',
2227 b'maxhttpheaderlen',
2215 b'maxhttpheaderlen',
2228 default=1024,
2216 default=1024,
2229 )
2217 )
2230 coreconfigitem(
2218 coreconfigitem(
2231 b'server',
2219 b'server',
2232 b'pullbundle',
2220 b'pullbundle',
2233 default=True,
2221 default=True,
2234 )
2222 )
2235 coreconfigitem(
2223 coreconfigitem(
2236 b'server',
2224 b'server',
2237 b'preferuncompressed',
2225 b'preferuncompressed',
2238 default=False,
2226 default=False,
2239 )
2227 )
2240 coreconfigitem(
2228 coreconfigitem(
2241 b'server',
2229 b'server',
2242 b'streamunbundle',
2230 b'streamunbundle',
2243 default=False,
2231 default=False,
2244 )
2232 )
2245 coreconfigitem(
2233 coreconfigitem(
2246 b'server',
2234 b'server',
2247 b'uncompressed',
2235 b'uncompressed',
2248 default=True,
2236 default=True,
2249 )
2237 )
2250 coreconfigitem(
2238 coreconfigitem(
2251 b'server',
2239 b'server',
2252 b'uncompressedallowsecret',
2240 b'uncompressedallowsecret',
2253 default=False,
2241 default=False,
2254 )
2242 )
2255 coreconfigitem(
2243 coreconfigitem(
2256 b'server',
2244 b'server',
2257 b'view',
2245 b'view',
2258 default=b'served',
2246 default=b'served',
2259 )
2247 )
2260 coreconfigitem(
2248 coreconfigitem(
2261 b'server',
2249 b'server',
2262 b'validate',
2250 b'validate',
2263 default=False,
2251 default=False,
2264 )
2252 )
2265 coreconfigitem(
2253 coreconfigitem(
2266 b'server',
2254 b'server',
2267 b'zliblevel',
2255 b'zliblevel',
2268 default=-1,
2256 default=-1,
2269 )
2257 )
2270 coreconfigitem(
2258 coreconfigitem(
2271 b'server',
2259 b'server',
2272 b'zstdlevel',
2260 b'zstdlevel',
2273 default=3,
2261 default=3,
2274 )
2262 )
2275 coreconfigitem(
2263 coreconfigitem(
2276 b'share',
2264 b'share',
2277 b'pool',
2265 b'pool',
2278 default=None,
2266 default=None,
2279 )
2267 )
2280 coreconfigitem(
2268 coreconfigitem(
2281 b'share',
2269 b'share',
2282 b'poolnaming',
2270 b'poolnaming',
2283 default=b'identity',
2271 default=b'identity',
2284 )
2272 )
2285 coreconfigitem(
2273 coreconfigitem(
2286 b'share',
2274 b'share',
2287 b'safe-mismatch.source-not-safe',
2275 b'safe-mismatch.source-not-safe',
2288 default=b'abort',
2276 default=b'abort',
2289 )
2277 )
2290 coreconfigitem(
2278 coreconfigitem(
2291 b'share',
2279 b'share',
2292 b'safe-mismatch.source-safe',
2280 b'safe-mismatch.source-safe',
2293 default=b'abort',
2281 default=b'abort',
2294 )
2282 )
2295 coreconfigitem(
2283 coreconfigitem(
2296 b'share',
2284 b'share',
2297 b'safe-mismatch.source-not-safe.warn',
2285 b'safe-mismatch.source-not-safe.warn',
2298 default=True,
2286 default=True,
2299 )
2287 )
2300 coreconfigitem(
2288 coreconfigitem(
2301 b'share',
2289 b'share',
2302 b'safe-mismatch.source-safe.warn',
2290 b'safe-mismatch.source-safe.warn',
2303 default=True,
2291 default=True,
2304 )
2292 )
2305 coreconfigitem(
2293 coreconfigitem(
2306 b'share',
2294 b'share',
2307 b'safe-mismatch.source-not-safe:verbose-upgrade',
2295 b'safe-mismatch.source-not-safe:verbose-upgrade',
2308 default=True,
2296 default=True,
2309 )
2297 )
2310 coreconfigitem(
2298 coreconfigitem(
2311 b'share',
2299 b'share',
2312 b'safe-mismatch.source-safe:verbose-upgrade',
2300 b'safe-mismatch.source-safe:verbose-upgrade',
2313 default=True,
2301 default=True,
2314 )
2302 )
2315 coreconfigitem(
2303 coreconfigitem(
2316 b'shelve',
2304 b'shelve',
2317 b'maxbackups',
2305 b'maxbackups',
2318 default=10,
2306 default=10,
2319 )
2307 )
2320 coreconfigitem(
2308 coreconfigitem(
2321 b'smtp',
2309 b'smtp',
2322 b'host',
2310 b'host',
2323 default=None,
2311 default=None,
2324 )
2312 )
2325 coreconfigitem(
2313 coreconfigitem(
2326 b'smtp',
2314 b'smtp',
2327 b'local_hostname',
2315 b'local_hostname',
2328 default=None,
2316 default=None,
2329 )
2317 )
2330 coreconfigitem(
2318 coreconfigitem(
2331 b'smtp',
2319 b'smtp',
2332 b'password',
2320 b'password',
2333 default=None,
2321 default=None,
2334 )
2322 )
2335 coreconfigitem(
2323 coreconfigitem(
2336 b'smtp',
2324 b'smtp',
2337 b'port',
2325 b'port',
2338 default=dynamicdefault,
2326 default=dynamicdefault,
2339 )
2327 )
2340 coreconfigitem(
2328 coreconfigitem(
2341 b'smtp',
2329 b'smtp',
2342 b'tls',
2330 b'tls',
2343 default=b'none',
2331 default=b'none',
2344 )
2332 )
2345 coreconfigitem(
2333 coreconfigitem(
2346 b'smtp',
2334 b'smtp',
2347 b'username',
2335 b'username',
2348 default=None,
2336 default=None,
2349 )
2337 )
2350 coreconfigitem(
2338 coreconfigitem(
2351 b'sparse',
2339 b'sparse',
2352 b'missingwarning',
2340 b'missingwarning',
2353 default=True,
2341 default=True,
2354 experimental=True,
2342 experimental=True,
2355 )
2343 )
2356 coreconfigitem(
2344 coreconfigitem(
2357 b'subrepos',
2345 b'subrepos',
2358 b'allowed',
2346 b'allowed',
2359 default=dynamicdefault, # to make backporting simpler
2347 default=dynamicdefault, # to make backporting simpler
2360 )
2348 )
2361 coreconfigitem(
2349 coreconfigitem(
2362 b'subrepos',
2350 b'subrepos',
2363 b'hg:allowed',
2351 b'hg:allowed',
2364 default=dynamicdefault,
2352 default=dynamicdefault,
2365 )
2353 )
2366 coreconfigitem(
2354 coreconfigitem(
2367 b'subrepos',
2355 b'subrepos',
2368 b'git:allowed',
2356 b'git:allowed',
2369 default=dynamicdefault,
2357 default=dynamicdefault,
2370 )
2358 )
2371 coreconfigitem(
2359 coreconfigitem(
2372 b'subrepos',
2360 b'subrepos',
2373 b'svn:allowed',
2361 b'svn:allowed',
2374 default=dynamicdefault,
2362 default=dynamicdefault,
2375 )
2363 )
2376 coreconfigitem(
2364 coreconfigitem(
2377 b'templates',
2365 b'templates',
2378 b'.*',
2366 b'.*',
2379 default=None,
2367 default=None,
2380 generic=True,
2368 generic=True,
2381 )
2369 )
2382 coreconfigitem(
2370 coreconfigitem(
2383 b'templateconfig',
2371 b'templateconfig',
2384 b'.*',
2372 b'.*',
2385 default=dynamicdefault,
2373 default=dynamicdefault,
2386 generic=True,
2374 generic=True,
2387 )
2375 )
2388 coreconfigitem(
2376 coreconfigitem(
2389 b'trusted',
2377 b'trusted',
2390 b'groups',
2378 b'groups',
2391 default=list,
2379 default=list,
2392 )
2380 )
2393 coreconfigitem(
2381 coreconfigitem(
2394 b'trusted',
2382 b'trusted',
2395 b'users',
2383 b'users',
2396 default=list,
2384 default=list,
2397 )
2385 )
2398 coreconfigitem(
2386 coreconfigitem(
2399 b'ui',
2387 b'ui',
2400 b'_usedassubrepo',
2388 b'_usedassubrepo',
2401 default=False,
2389 default=False,
2402 )
2390 )
2403 coreconfigitem(
2391 coreconfigitem(
2404 b'ui',
2392 b'ui',
2405 b'allowemptycommit',
2393 b'allowemptycommit',
2406 default=False,
2394 default=False,
2407 )
2395 )
2408 coreconfigitem(
2396 coreconfigitem(
2409 b'ui',
2397 b'ui',
2410 b'archivemeta',
2398 b'archivemeta',
2411 default=True,
2399 default=True,
2412 )
2400 )
2413 coreconfigitem(
2401 coreconfigitem(
2414 b'ui',
2402 b'ui',
2415 b'askusername',
2403 b'askusername',
2416 default=False,
2404 default=False,
2417 )
2405 )
2418 coreconfigitem(
2406 coreconfigitem(
2419 b'ui',
2407 b'ui',
2420 b'available-memory',
2408 b'available-memory',
2421 default=None,
2409 default=None,
2422 )
2410 )
2423
2411
2424 coreconfigitem(
2412 coreconfigitem(
2425 b'ui',
2413 b'ui',
2426 b'clonebundlefallback',
2414 b'clonebundlefallback',
2427 default=False,
2415 default=False,
2428 )
2416 )
2429 coreconfigitem(
2417 coreconfigitem(
2430 b'ui',
2418 b'ui',
2431 b'clonebundleprefers',
2419 b'clonebundleprefers',
2432 default=list,
2420 default=list,
2433 )
2421 )
2434 coreconfigitem(
2422 coreconfigitem(
2435 b'ui',
2423 b'ui',
2436 b'clonebundles',
2424 b'clonebundles',
2437 default=True,
2425 default=True,
2438 )
2426 )
2439 coreconfigitem(
2427 coreconfigitem(
2440 b'ui',
2428 b'ui',
2441 b'color',
2429 b'color',
2442 default=b'auto',
2430 default=b'auto',
2443 )
2431 )
2444 coreconfigitem(
2432 coreconfigitem(
2445 b'ui',
2433 b'ui',
2446 b'commitsubrepos',
2434 b'commitsubrepos',
2447 default=False,
2435 default=False,
2448 )
2436 )
2449 coreconfigitem(
2437 coreconfigitem(
2450 b'ui',
2438 b'ui',
2451 b'debug',
2439 b'debug',
2452 default=False,
2440 default=False,
2453 )
2441 )
2454 coreconfigitem(
2442 coreconfigitem(
2455 b'ui',
2443 b'ui',
2456 b'debugger',
2444 b'debugger',
2457 default=None,
2445 default=None,
2458 )
2446 )
2459 coreconfigitem(
2447 coreconfigitem(
2460 b'ui',
2448 b'ui',
2461 b'editor',
2449 b'editor',
2462 default=dynamicdefault,
2450 default=dynamicdefault,
2463 )
2451 )
2464 coreconfigitem(
2452 coreconfigitem(
2465 b'ui',
2453 b'ui',
2466 b'detailed-exit-code',
2454 b'detailed-exit-code',
2467 default=False,
2455 default=False,
2468 experimental=True,
2456 experimental=True,
2469 )
2457 )
2470 coreconfigitem(
2458 coreconfigitem(
2471 b'ui',
2459 b'ui',
2472 b'fallbackencoding',
2460 b'fallbackencoding',
2473 default=None,
2461 default=None,
2474 )
2462 )
2475 coreconfigitem(
2463 coreconfigitem(
2476 b'ui',
2464 b'ui',
2477 b'forcecwd',
2465 b'forcecwd',
2478 default=None,
2466 default=None,
2479 )
2467 )
2480 coreconfigitem(
2468 coreconfigitem(
2481 b'ui',
2469 b'ui',
2482 b'forcemerge',
2470 b'forcemerge',
2483 default=None,
2471 default=None,
2484 )
2472 )
2485 coreconfigitem(
2473 coreconfigitem(
2486 b'ui',
2474 b'ui',
2487 b'formatdebug',
2475 b'formatdebug',
2488 default=False,
2476 default=False,
2489 )
2477 )
2490 coreconfigitem(
2478 coreconfigitem(
2491 b'ui',
2479 b'ui',
2492 b'formatjson',
2480 b'formatjson',
2493 default=False,
2481 default=False,
2494 )
2482 )
2495 coreconfigitem(
2483 coreconfigitem(
2496 b'ui',
2484 b'ui',
2497 b'formatted',
2485 b'formatted',
2498 default=None,
2486 default=None,
2499 )
2487 )
2500 coreconfigitem(
2488 coreconfigitem(
2501 b'ui',
2489 b'ui',
2502 b'interactive',
2490 b'interactive',
2503 default=None,
2491 default=None,
2504 )
2492 )
2505 coreconfigitem(
2493 coreconfigitem(
2506 b'ui',
2494 b'ui',
2507 b'interface',
2495 b'interface',
2508 default=None,
2496 default=None,
2509 )
2497 )
2510 coreconfigitem(
2498 coreconfigitem(
2511 b'ui',
2499 b'ui',
2512 b'interface.chunkselector',
2500 b'interface.chunkselector',
2513 default=None,
2501 default=None,
2514 )
2502 )
2515 coreconfigitem(
2503 coreconfigitem(
2516 b'ui',
2504 b'ui',
2517 b'large-file-limit',
2505 b'large-file-limit',
2518 default=10 * (2 ** 20),
2506 default=10 * (2 ** 20),
2519 )
2507 )
2520 coreconfigitem(
2508 coreconfigitem(
2521 b'ui',
2509 b'ui',
2522 b'logblockedtimes',
2510 b'logblockedtimes',
2523 default=False,
2511 default=False,
2524 )
2512 )
2525 coreconfigitem(
2513 coreconfigitem(
2526 b'ui',
2514 b'ui',
2527 b'merge',
2515 b'merge',
2528 default=None,
2516 default=None,
2529 )
2517 )
2530 coreconfigitem(
2518 coreconfigitem(
2531 b'ui',
2519 b'ui',
2532 b'mergemarkers',
2520 b'mergemarkers',
2533 default=b'basic',
2521 default=b'basic',
2534 )
2522 )
2535 coreconfigitem(
2523 coreconfigitem(
2536 b'ui',
2524 b'ui',
2537 b'message-output',
2525 b'message-output',
2538 default=b'stdio',
2526 default=b'stdio',
2539 )
2527 )
2540 coreconfigitem(
2528 coreconfigitem(
2541 b'ui',
2529 b'ui',
2542 b'nontty',
2530 b'nontty',
2543 default=False,
2531 default=False,
2544 )
2532 )
2545 coreconfigitem(
2533 coreconfigitem(
2546 b'ui',
2534 b'ui',
2547 b'origbackuppath',
2535 b'origbackuppath',
2548 default=None,
2536 default=None,
2549 )
2537 )
2550 coreconfigitem(
2538 coreconfigitem(
2551 b'ui',
2539 b'ui',
2552 b'paginate',
2540 b'paginate',
2553 default=True,
2541 default=True,
2554 )
2542 )
2555 coreconfigitem(
2543 coreconfigitem(
2556 b'ui',
2544 b'ui',
2557 b'patch',
2545 b'patch',
2558 default=None,
2546 default=None,
2559 )
2547 )
2560 coreconfigitem(
2548 coreconfigitem(
2561 b'ui',
2549 b'ui',
2562 b'portablefilenames',
2550 b'portablefilenames',
2563 default=b'warn',
2551 default=b'warn',
2564 )
2552 )
2565 coreconfigitem(
2553 coreconfigitem(
2566 b'ui',
2554 b'ui',
2567 b'promptecho',
2555 b'promptecho',
2568 default=False,
2556 default=False,
2569 )
2557 )
2570 coreconfigitem(
2558 coreconfigitem(
2571 b'ui',
2559 b'ui',
2572 b'quiet',
2560 b'quiet',
2573 default=False,
2561 default=False,
2574 )
2562 )
2575 coreconfigitem(
2563 coreconfigitem(
2576 b'ui',
2564 b'ui',
2577 b'quietbookmarkmove',
2565 b'quietbookmarkmove',
2578 default=False,
2566 default=False,
2579 )
2567 )
2580 coreconfigitem(
2568 coreconfigitem(
2581 b'ui',
2569 b'ui',
2582 b'relative-paths',
2570 b'relative-paths',
2583 default=b'legacy',
2571 default=b'legacy',
2584 )
2572 )
2585 coreconfigitem(
2573 coreconfigitem(
2586 b'ui',
2574 b'ui',
2587 b'remotecmd',
2575 b'remotecmd',
2588 default=b'hg',
2576 default=b'hg',
2589 )
2577 )
2590 coreconfigitem(
2578 coreconfigitem(
2591 b'ui',
2579 b'ui',
2592 b'report_untrusted',
2580 b'report_untrusted',
2593 default=True,
2581 default=True,
2594 )
2582 )
2595 coreconfigitem(
2583 coreconfigitem(
2596 b'ui',
2584 b'ui',
2597 b'rollback',
2585 b'rollback',
2598 default=True,
2586 default=True,
2599 )
2587 )
2600 coreconfigitem(
2588 coreconfigitem(
2601 b'ui',
2589 b'ui',
2602 b'signal-safe-lock',
2590 b'signal-safe-lock',
2603 default=True,
2591 default=True,
2604 )
2592 )
2605 coreconfigitem(
2593 coreconfigitem(
2606 b'ui',
2594 b'ui',
2607 b'slash',
2595 b'slash',
2608 default=False,
2596 default=False,
2609 )
2597 )
2610 coreconfigitem(
2598 coreconfigitem(
2611 b'ui',
2599 b'ui',
2612 b'ssh',
2600 b'ssh',
2613 default=b'ssh',
2601 default=b'ssh',
2614 )
2602 )
2615 coreconfigitem(
2603 coreconfigitem(
2616 b'ui',
2604 b'ui',
2617 b'ssherrorhint',
2605 b'ssherrorhint',
2618 default=None,
2606 default=None,
2619 )
2607 )
2620 coreconfigitem(
2608 coreconfigitem(
2621 b'ui',
2609 b'ui',
2622 b'statuscopies',
2610 b'statuscopies',
2623 default=False,
2611 default=False,
2624 )
2612 )
2625 coreconfigitem(
2613 coreconfigitem(
2626 b'ui',
2614 b'ui',
2627 b'strict',
2615 b'strict',
2628 default=False,
2616 default=False,
2629 )
2617 )
2630 coreconfigitem(
2618 coreconfigitem(
2631 b'ui',
2619 b'ui',
2632 b'style',
2620 b'style',
2633 default=b'',
2621 default=b'',
2634 )
2622 )
2635 coreconfigitem(
2623 coreconfigitem(
2636 b'ui',
2624 b'ui',
2637 b'supportcontact',
2625 b'supportcontact',
2638 default=None,
2626 default=None,
2639 )
2627 )
2640 coreconfigitem(
2628 coreconfigitem(
2641 b'ui',
2629 b'ui',
2642 b'textwidth',
2630 b'textwidth',
2643 default=78,
2631 default=78,
2644 )
2632 )
2645 coreconfigitem(
2633 coreconfigitem(
2646 b'ui',
2634 b'ui',
2647 b'timeout',
2635 b'timeout',
2648 default=b'600',
2636 default=b'600',
2649 )
2637 )
2650 coreconfigitem(
2638 coreconfigitem(
2651 b'ui',
2639 b'ui',
2652 b'timeout.warn',
2640 b'timeout.warn',
2653 default=0,
2641 default=0,
2654 )
2642 )
2655 coreconfigitem(
2643 coreconfigitem(
2656 b'ui',
2644 b'ui',
2657 b'timestamp-output',
2645 b'timestamp-output',
2658 default=False,
2646 default=False,
2659 )
2647 )
2660 coreconfigitem(
2648 coreconfigitem(
2661 b'ui',
2649 b'ui',
2662 b'traceback',
2650 b'traceback',
2663 default=False,
2651 default=False,
2664 )
2652 )
2665 coreconfigitem(
2653 coreconfigitem(
2666 b'ui',
2654 b'ui',
2667 b'tweakdefaults',
2655 b'tweakdefaults',
2668 default=False,
2656 default=False,
2669 )
2657 )
2670 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2658 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2671 coreconfigitem(
2659 coreconfigitem(
2672 b'ui',
2660 b'ui',
2673 b'verbose',
2661 b'verbose',
2674 default=False,
2662 default=False,
2675 )
2663 )
2676 coreconfigitem(
2664 coreconfigitem(
2677 b'verify',
2665 b'verify',
2678 b'skipflags',
2666 b'skipflags',
2679 default=0,
2667 default=0,
2680 )
2668 )
2681 coreconfigitem(
2669 coreconfigitem(
2682 b'web',
2670 b'web',
2683 b'allowbz2',
2671 b'allowbz2',
2684 default=False,
2672 default=False,
2685 )
2673 )
2686 coreconfigitem(
2674 coreconfigitem(
2687 b'web',
2675 b'web',
2688 b'allowgz',
2676 b'allowgz',
2689 default=False,
2677 default=False,
2690 )
2678 )
2691 coreconfigitem(
2679 coreconfigitem(
2692 b'web',
2680 b'web',
2693 b'allow-pull',
2681 b'allow-pull',
2694 alias=[(b'web', b'allowpull')],
2682 alias=[(b'web', b'allowpull')],
2695 default=True,
2683 default=True,
2696 )
2684 )
2697 coreconfigitem(
2685 coreconfigitem(
2698 b'web',
2686 b'web',
2699 b'allow-push',
2687 b'allow-push',
2700 alias=[(b'web', b'allow_push')],
2688 alias=[(b'web', b'allow_push')],
2701 default=list,
2689 default=list,
2702 )
2690 )
2703 coreconfigitem(
2691 coreconfigitem(
2704 b'web',
2692 b'web',
2705 b'allowzip',
2693 b'allowzip',
2706 default=False,
2694 default=False,
2707 )
2695 )
2708 coreconfigitem(
2696 coreconfigitem(
2709 b'web',
2697 b'web',
2710 b'archivesubrepos',
2698 b'archivesubrepos',
2711 default=False,
2699 default=False,
2712 )
2700 )
2713 coreconfigitem(
2701 coreconfigitem(
2714 b'web',
2702 b'web',
2715 b'cache',
2703 b'cache',
2716 default=True,
2704 default=True,
2717 )
2705 )
2718 coreconfigitem(
2706 coreconfigitem(
2719 b'web',
2707 b'web',
2720 b'comparisoncontext',
2708 b'comparisoncontext',
2721 default=5,
2709 default=5,
2722 )
2710 )
2723 coreconfigitem(
2711 coreconfigitem(
2724 b'web',
2712 b'web',
2725 b'contact',
2713 b'contact',
2726 default=None,
2714 default=None,
2727 )
2715 )
2728 coreconfigitem(
2716 coreconfigitem(
2729 b'web',
2717 b'web',
2730 b'deny_push',
2718 b'deny_push',
2731 default=list,
2719 default=list,
2732 )
2720 )
2733 coreconfigitem(
2721 coreconfigitem(
2734 b'web',
2722 b'web',
2735 b'guessmime',
2723 b'guessmime',
2736 default=False,
2724 default=False,
2737 )
2725 )
2738 coreconfigitem(
2726 coreconfigitem(
2739 b'web',
2727 b'web',
2740 b'hidden',
2728 b'hidden',
2741 default=False,
2729 default=False,
2742 )
2730 )
2743 coreconfigitem(
2731 coreconfigitem(
2744 b'web',
2732 b'web',
2745 b'labels',
2733 b'labels',
2746 default=list,
2734 default=list,
2747 )
2735 )
2748 coreconfigitem(
2736 coreconfigitem(
2749 b'web',
2737 b'web',
2750 b'logoimg',
2738 b'logoimg',
2751 default=b'hglogo.png',
2739 default=b'hglogo.png',
2752 )
2740 )
2753 coreconfigitem(
2741 coreconfigitem(
2754 b'web',
2742 b'web',
2755 b'logourl',
2743 b'logourl',
2756 default=b'https://mercurial-scm.org/',
2744 default=b'https://mercurial-scm.org/',
2757 )
2745 )
2758 coreconfigitem(
2746 coreconfigitem(
2759 b'web',
2747 b'web',
2760 b'accesslog',
2748 b'accesslog',
2761 default=b'-',
2749 default=b'-',
2762 )
2750 )
2763 coreconfigitem(
2751 coreconfigitem(
2764 b'web',
2752 b'web',
2765 b'address',
2753 b'address',
2766 default=b'',
2754 default=b'',
2767 )
2755 )
2768 coreconfigitem(
2756 coreconfigitem(
2769 b'web',
2757 b'web',
2770 b'allow-archive',
2758 b'allow-archive',
2771 alias=[(b'web', b'allow_archive')],
2759 alias=[(b'web', b'allow_archive')],
2772 default=list,
2760 default=list,
2773 )
2761 )
2774 coreconfigitem(
2762 coreconfigitem(
2775 b'web',
2763 b'web',
2776 b'allow_read',
2764 b'allow_read',
2777 default=list,
2765 default=list,
2778 )
2766 )
2779 coreconfigitem(
2767 coreconfigitem(
2780 b'web',
2768 b'web',
2781 b'baseurl',
2769 b'baseurl',
2782 default=None,
2770 default=None,
2783 )
2771 )
2784 coreconfigitem(
2772 coreconfigitem(
2785 b'web',
2773 b'web',
2786 b'cacerts',
2774 b'cacerts',
2787 default=None,
2775 default=None,
2788 )
2776 )
2789 coreconfigitem(
2777 coreconfigitem(
2790 b'web',
2778 b'web',
2791 b'certificate',
2779 b'certificate',
2792 default=None,
2780 default=None,
2793 )
2781 )
2794 coreconfigitem(
2782 coreconfigitem(
2795 b'web',
2783 b'web',
2796 b'collapse',
2784 b'collapse',
2797 default=False,
2785 default=False,
2798 )
2786 )
2799 coreconfigitem(
2787 coreconfigitem(
2800 b'web',
2788 b'web',
2801 b'csp',
2789 b'csp',
2802 default=None,
2790 default=None,
2803 )
2791 )
2804 coreconfigitem(
2792 coreconfigitem(
2805 b'web',
2793 b'web',
2806 b'deny_read',
2794 b'deny_read',
2807 default=list,
2795 default=list,
2808 )
2796 )
2809 coreconfigitem(
2797 coreconfigitem(
2810 b'web',
2798 b'web',
2811 b'descend',
2799 b'descend',
2812 default=True,
2800 default=True,
2813 )
2801 )
2814 coreconfigitem(
2802 coreconfigitem(
2815 b'web',
2803 b'web',
2816 b'description',
2804 b'description',
2817 default=b"",
2805 default=b"",
2818 )
2806 )
2819 coreconfigitem(
2807 coreconfigitem(
2820 b'web',
2808 b'web',
2821 b'encoding',
2809 b'encoding',
2822 default=lambda: encoding.encoding,
2810 default=lambda: encoding.encoding,
2823 )
2811 )
2824 coreconfigitem(
2812 coreconfigitem(
2825 b'web',
2813 b'web',
2826 b'errorlog',
2814 b'errorlog',
2827 default=b'-',
2815 default=b'-',
2828 )
2816 )
2829 coreconfigitem(
2817 coreconfigitem(
2830 b'web',
2818 b'web',
2831 b'ipv6',
2819 b'ipv6',
2832 default=False,
2820 default=False,
2833 )
2821 )
2834 coreconfigitem(
2822 coreconfigitem(
2835 b'web',
2823 b'web',
2836 b'maxchanges',
2824 b'maxchanges',
2837 default=10,
2825 default=10,
2838 )
2826 )
2839 coreconfigitem(
2827 coreconfigitem(
2840 b'web',
2828 b'web',
2841 b'maxfiles',
2829 b'maxfiles',
2842 default=10,
2830 default=10,
2843 )
2831 )
2844 coreconfigitem(
2832 coreconfigitem(
2845 b'web',
2833 b'web',
2846 b'maxshortchanges',
2834 b'maxshortchanges',
2847 default=60,
2835 default=60,
2848 )
2836 )
2849 coreconfigitem(
2837 coreconfigitem(
2850 b'web',
2838 b'web',
2851 b'motd',
2839 b'motd',
2852 default=b'',
2840 default=b'',
2853 )
2841 )
2854 coreconfigitem(
2842 coreconfigitem(
2855 b'web',
2843 b'web',
2856 b'name',
2844 b'name',
2857 default=dynamicdefault,
2845 default=dynamicdefault,
2858 )
2846 )
2859 coreconfigitem(
2847 coreconfigitem(
2860 b'web',
2848 b'web',
2861 b'port',
2849 b'port',
2862 default=8000,
2850 default=8000,
2863 )
2851 )
2864 coreconfigitem(
2852 coreconfigitem(
2865 b'web',
2853 b'web',
2866 b'prefix',
2854 b'prefix',
2867 default=b'',
2855 default=b'',
2868 )
2856 )
2869 coreconfigitem(
2857 coreconfigitem(
2870 b'web',
2858 b'web',
2871 b'push_ssl',
2859 b'push_ssl',
2872 default=True,
2860 default=True,
2873 )
2861 )
2874 coreconfigitem(
2862 coreconfigitem(
2875 b'web',
2863 b'web',
2876 b'refreshinterval',
2864 b'refreshinterval',
2877 default=20,
2865 default=20,
2878 )
2866 )
2879 coreconfigitem(
2867 coreconfigitem(
2880 b'web',
2868 b'web',
2881 b'server-header',
2869 b'server-header',
2882 default=None,
2870 default=None,
2883 )
2871 )
2884 coreconfigitem(
2872 coreconfigitem(
2885 b'web',
2873 b'web',
2886 b'static',
2874 b'static',
2887 default=None,
2875 default=None,
2888 )
2876 )
2889 coreconfigitem(
2877 coreconfigitem(
2890 b'web',
2878 b'web',
2891 b'staticurl',
2879 b'staticurl',
2892 default=None,
2880 default=None,
2893 )
2881 )
2894 coreconfigitem(
2882 coreconfigitem(
2895 b'web',
2883 b'web',
2896 b'stripes',
2884 b'stripes',
2897 default=1,
2885 default=1,
2898 )
2886 )
2899 coreconfigitem(
2887 coreconfigitem(
2900 b'web',
2888 b'web',
2901 b'style',
2889 b'style',
2902 default=b'paper',
2890 default=b'paper',
2903 )
2891 )
2904 coreconfigitem(
2892 coreconfigitem(
2905 b'web',
2893 b'web',
2906 b'templates',
2894 b'templates',
2907 default=None,
2895 default=None,
2908 )
2896 )
2909 coreconfigitem(
2897 coreconfigitem(
2910 b'web',
2898 b'web',
2911 b'view',
2899 b'view',
2912 default=b'served',
2900 default=b'served',
2913 experimental=True,
2901 experimental=True,
2914 )
2902 )
2915 coreconfigitem(
2903 coreconfigitem(
2916 b'worker',
2904 b'worker',
2917 b'backgroundclose',
2905 b'backgroundclose',
2918 default=dynamicdefault,
2906 default=dynamicdefault,
2919 )
2907 )
2920 # Windows defaults to a limit of 512 open files. A buffer of 128
2908 # Windows defaults to a limit of 512 open files. A buffer of 128
2921 # should give us enough headway.
2909 # should give us enough headway.
2922 coreconfigitem(
2910 coreconfigitem(
2923 b'worker',
2911 b'worker',
2924 b'backgroundclosemaxqueue',
2912 b'backgroundclosemaxqueue',
2925 default=384,
2913 default=384,
2926 )
2914 )
2927 coreconfigitem(
2915 coreconfigitem(
2928 b'worker',
2916 b'worker',
2929 b'backgroundcloseminfilecount',
2917 b'backgroundcloseminfilecount',
2930 default=2048,
2918 default=2048,
2931 )
2919 )
2932 coreconfigitem(
2920 coreconfigitem(
2933 b'worker',
2921 b'worker',
2934 b'backgroundclosethreadcount',
2922 b'backgroundclosethreadcount',
2935 default=4,
2923 default=4,
2936 )
2924 )
2937 coreconfigitem(
2925 coreconfigitem(
2938 b'worker',
2926 b'worker',
2939 b'enabled',
2927 b'enabled',
2940 default=True,
2928 default=True,
2941 )
2929 )
2942 coreconfigitem(
2930 coreconfigitem(
2943 b'worker',
2931 b'worker',
2944 b'numcpus',
2932 b'numcpus',
2945 default=None,
2933 default=None,
2946 )
2934 )
2947
2935
2948 # Rebase related configuration moved to core because other extension are doing
2936 # Rebase related configuration moved to core because other extension are doing
2949 # strange things. For example, shelve import the extensions to reuse some bit
2937 # strange things. For example, shelve import the extensions to reuse some bit
2950 # without formally loading it.
2938 # without formally loading it.
2951 coreconfigitem(
2939 coreconfigitem(
2952 b'commands',
2940 b'commands',
2953 b'rebase.requiredest',
2941 b'rebase.requiredest',
2954 default=False,
2942 default=False,
2955 )
2943 )
2956 coreconfigitem(
2944 coreconfigitem(
2957 b'experimental',
2945 b'experimental',
2958 b'rebaseskipobsolete',
2946 b'rebaseskipobsolete',
2959 default=True,
2947 default=True,
2960 )
2948 )
2961 coreconfigitem(
2949 coreconfigitem(
2962 b'rebase',
2950 b'rebase',
2963 b'singletransaction',
2951 b'singletransaction',
2964 default=False,
2952 default=False,
2965 )
2953 )
2966 coreconfigitem(
2954 coreconfigitem(
2967 b'rebase',
2955 b'rebase',
2968 b'experimental.inmemory',
2956 b'experimental.inmemory',
2969 default=False,
2957 default=False,
2970 )
2958 )
2971
2959
2972 # This setting controls creation of a rebase_source extra field
2960 # This setting controls creation of a rebase_source extra field
2973 # during rebase. When False, no such field is created. This is
2961 # during rebase. When False, no such field is created. This is
2974 # useful eg for incrementally converting changesets and then
2962 # useful eg for incrementally converting changesets and then
2975 # rebasing them onto an existing repo.
2963 # rebasing them onto an existing repo.
2976 # WARNING: this is an advanced setting reserved for people who know
2964 # WARNING: this is an advanced setting reserved for people who know
2977 # exactly what they are doing. Misuse of this setting can easily
2965 # exactly what they are doing. Misuse of this setting can easily
2978 # result in obsmarker cycles and a vivid headache.
2966 # result in obsmarker cycles and a vivid headache.
2979 coreconfigitem(
2967 coreconfigitem(
2980 b'rebase',
2968 b'rebase',
2981 b'store-source',
2969 b'store-source',
2982 default=True,
2970 default=True,
2983 experimental=True,
2971 experimental=True,
2984 )
2972 )
@@ -1,4038 +1,4043 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 policy,
61 pushkey,
62 pushkey,
62 pycompat,
63 pycompat,
63 rcutil,
64 rcutil,
64 repoview,
65 repoview,
65 requirements as requirementsmod,
66 requirements as requirementsmod,
66 revlog,
67 revlog,
67 revset,
68 revset,
68 revsetlang,
69 revsetlang,
69 scmutil,
70 scmutil,
70 sparse,
71 sparse,
71 store as storemod,
72 store as storemod,
72 subrepoutil,
73 subrepoutil,
73 tags as tagsmod,
74 tags as tagsmod,
74 transaction,
75 transaction,
75 txnutil,
76 txnutil,
76 util,
77 util,
77 vfs as vfsmod,
78 vfs as vfsmod,
78 wireprototypes,
79 wireprototypes,
79 )
80 )
80
81
81 from .interfaces import (
82 from .interfaces import (
82 repository,
83 repository,
83 util as interfaceutil,
84 util as interfaceutil,
84 )
85 )
85
86
86 from .utils import (
87 from .utils import (
87 hashutil,
88 hashutil,
88 procutil,
89 procutil,
89 stringutil,
90 stringutil,
90 urlutil,
91 urlutil,
91 )
92 )
92
93
93 from .revlogutils import (
94 from .revlogutils import (
94 concurrency_checker as revlogchecker,
95 concurrency_checker as revlogchecker,
95 constants as revlogconst,
96 constants as revlogconst,
96 sidedata as sidedatamod,
97 sidedata as sidedatamod,
97 )
98 )
98
99
99 release = lockmod.release
100 release = lockmod.release
100 urlerr = util.urlerr
101 urlerr = util.urlerr
101 urlreq = util.urlreq
102 urlreq = util.urlreq
102
103
103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 )
106 )
106
107
107 # set of (path, vfs-location) tuples. vfs-location is:
108 # set of (path, vfs-location) tuples. vfs-location is:
108 # - 'plain for vfs relative paths
109 # - 'plain for vfs relative paths
109 # - '' for svfs relative paths
110 # - '' for svfs relative paths
110 _cachedfiles = set()
111 _cachedfiles = set()
111
112
112
113
113 class _basefilecache(scmutil.filecache):
114 class _basefilecache(scmutil.filecache):
114 """All filecache usage on repo are done for logic that should be unfiltered"""
115 """All filecache usage on repo are done for logic that should be unfiltered"""
115
116
116 def __get__(self, repo, type=None):
117 def __get__(self, repo, type=None):
117 if repo is None:
118 if repo is None:
118 return self
119 return self
119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 # proxy to unfiltered __dict__ since filtered repo has no entry
120 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
121 try:
122 try:
122 return unfi.__dict__[self.sname]
123 return unfi.__dict__[self.sname]
123 except KeyError:
124 except KeyError:
124 pass
125 pass
125 return super(_basefilecache, self).__get__(unfi, type)
126 return super(_basefilecache, self).__get__(unfi, type)
126
127
127 def set(self, repo, value):
128 def set(self, repo, value):
128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129 return super(_basefilecache, self).set(repo.unfiltered(), value)
129
130
130
131
131 class repofilecache(_basefilecache):
132 class repofilecache(_basefilecache):
132 """filecache for files in .hg but outside of .hg/store"""
133 """filecache for files in .hg but outside of .hg/store"""
133
134
134 def __init__(self, *paths):
135 def __init__(self, *paths):
135 super(repofilecache, self).__init__(*paths)
136 super(repofilecache, self).__init__(*paths)
136 for path in paths:
137 for path in paths:
137 _cachedfiles.add((path, b'plain'))
138 _cachedfiles.add((path, b'plain'))
138
139
139 def join(self, obj, fname):
140 def join(self, obj, fname):
140 return obj.vfs.join(fname)
141 return obj.vfs.join(fname)
141
142
142
143
143 class storecache(_basefilecache):
144 class storecache(_basefilecache):
144 """filecache for files in the store"""
145 """filecache for files in the store"""
145
146
146 def __init__(self, *paths):
147 def __init__(self, *paths):
147 super(storecache, self).__init__(*paths)
148 super(storecache, self).__init__(*paths)
148 for path in paths:
149 for path in paths:
149 _cachedfiles.add((path, b''))
150 _cachedfiles.add((path, b''))
150
151
151 def join(self, obj, fname):
152 def join(self, obj, fname):
152 return obj.sjoin(fname)
153 return obj.sjoin(fname)
153
154
154
155
155 class changelogcache(storecache):
156 class changelogcache(storecache):
156 """filecache for the changelog"""
157 """filecache for the changelog"""
157
158
158 def __init__(self):
159 def __init__(self):
159 super(changelogcache, self).__init__()
160 super(changelogcache, self).__init__()
160 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.n', b''))
162 _cachedfiles.add((b'00changelog.n', b''))
162
163
163 def tracked_paths(self, obj):
164 def tracked_paths(self, obj):
164 paths = [self.join(obj, b'00changelog.i')]
165 paths = [self.join(obj, b'00changelog.i')]
165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 paths.append(self.join(obj, b'00changelog.n'))
167 paths.append(self.join(obj, b'00changelog.n'))
167 return paths
168 return paths
168
169
169
170
170 class manifestlogcache(storecache):
171 class manifestlogcache(storecache):
171 """filecache for the manifestlog"""
172 """filecache for the manifestlog"""
172
173
173 def __init__(self):
174 def __init__(self):
174 super(manifestlogcache, self).__init__()
175 super(manifestlogcache, self).__init__()
175 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.n', b''))
177 _cachedfiles.add((b'00manifest.n', b''))
177
178
178 def tracked_paths(self, obj):
179 def tracked_paths(self, obj):
179 paths = [self.join(obj, b'00manifest.i')]
180 paths = [self.join(obj, b'00manifest.i')]
180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 paths.append(self.join(obj, b'00manifest.n'))
182 paths.append(self.join(obj, b'00manifest.n'))
182 return paths
183 return paths
183
184
184
185
185 class mixedrepostorecache(_basefilecache):
186 class mixedrepostorecache(_basefilecache):
186 """filecache for a mix files in .hg/store and outside"""
187 """filecache for a mix files in .hg/store and outside"""
187
188
188 def __init__(self, *pathsandlocations):
189 def __init__(self, *pathsandlocations):
189 # scmutil.filecache only uses the path for passing back into our
190 # scmutil.filecache only uses the path for passing back into our
190 # join(), so we can safely pass a list of paths and locations
191 # join(), so we can safely pass a list of paths and locations
191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 _cachedfiles.update(pathsandlocations)
193 _cachedfiles.update(pathsandlocations)
193
194
194 def join(self, obj, fnameandlocation):
195 def join(self, obj, fnameandlocation):
195 fname, location = fnameandlocation
196 fname, location = fnameandlocation
196 if location == b'plain':
197 if location == b'plain':
197 return obj.vfs.join(fname)
198 return obj.vfs.join(fname)
198 else:
199 else:
199 if location != b'':
200 if location != b'':
200 raise error.ProgrammingError(
201 raise error.ProgrammingError(
201 b'unexpected location: %s' % location
202 b'unexpected location: %s' % location
202 )
203 )
203 return obj.sjoin(fname)
204 return obj.sjoin(fname)
204
205
205
206
206 def isfilecached(repo, name):
207 def isfilecached(repo, name):
207 """check if a repo has already cached "name" filecache-ed property
208 """check if a repo has already cached "name" filecache-ed property
208
209
209 This returns (cachedobj-or-None, iscached) tuple.
210 This returns (cachedobj-or-None, iscached) tuple.
210 """
211 """
211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 if not cacheentry:
213 if not cacheentry:
213 return None, False
214 return None, False
214 return cacheentry.obj, True
215 return cacheentry.obj, True
215
216
216
217
217 class unfilteredpropertycache(util.propertycache):
218 class unfilteredpropertycache(util.propertycache):
218 """propertycache that apply to unfiltered repo only"""
219 """propertycache that apply to unfiltered repo only"""
219
220
220 def __get__(self, repo, type=None):
221 def __get__(self, repo, type=None):
221 unfi = repo.unfiltered()
222 unfi = repo.unfiltered()
222 if unfi is repo:
223 if unfi is repo:
223 return super(unfilteredpropertycache, self).__get__(unfi)
224 return super(unfilteredpropertycache, self).__get__(unfi)
224 return getattr(unfi, self.name)
225 return getattr(unfi, self.name)
225
226
226
227
227 class filteredpropertycache(util.propertycache):
228 class filteredpropertycache(util.propertycache):
228 """propertycache that must take filtering in account"""
229 """propertycache that must take filtering in account"""
229
230
230 def cachevalue(self, obj, value):
231 def cachevalue(self, obj, value):
231 object.__setattr__(obj, self.name, value)
232 object.__setattr__(obj, self.name, value)
232
233
233
234
234 def hasunfilteredcache(repo, name):
235 def hasunfilteredcache(repo, name):
235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 """check if a repo has an unfilteredpropertycache value for <name>"""
236 return name in vars(repo.unfiltered())
237 return name in vars(repo.unfiltered())
237
238
238
239
239 def unfilteredmethod(orig):
240 def unfilteredmethod(orig):
240 """decorate method that always need to be run on unfiltered version"""
241 """decorate method that always need to be run on unfiltered version"""
241
242
242 @functools.wraps(orig)
243 @functools.wraps(orig)
243 def wrapper(repo, *args, **kwargs):
244 def wrapper(repo, *args, **kwargs):
244 return orig(repo.unfiltered(), *args, **kwargs)
245 return orig(repo.unfiltered(), *args, **kwargs)
245
246
246 return wrapper
247 return wrapper
247
248
248
249
249 moderncaps = {
250 moderncaps = {
250 b'lookup',
251 b'lookup',
251 b'branchmap',
252 b'branchmap',
252 b'pushkey',
253 b'pushkey',
253 b'known',
254 b'known',
254 b'getbundle',
255 b'getbundle',
255 b'unbundle',
256 b'unbundle',
256 }
257 }
257 legacycaps = moderncaps.union({b'changegroupsubset'})
258 legacycaps = moderncaps.union({b'changegroupsubset'})
258
259
259
260
260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 class localcommandexecutor:
262 class localcommandexecutor:
262 def __init__(self, peer):
263 def __init__(self, peer):
263 self._peer = peer
264 self._peer = peer
264 self._sent = False
265 self._sent = False
265 self._closed = False
266 self._closed = False
266
267
267 def __enter__(self):
268 def __enter__(self):
268 return self
269 return self
269
270
270 def __exit__(self, exctype, excvalue, exctb):
271 def __exit__(self, exctype, excvalue, exctb):
271 self.close()
272 self.close()
272
273
273 def callcommand(self, command, args):
274 def callcommand(self, command, args):
274 if self._sent:
275 if self._sent:
275 raise error.ProgrammingError(
276 raise error.ProgrammingError(
276 b'callcommand() cannot be used after sendcommands()'
277 b'callcommand() cannot be used after sendcommands()'
277 )
278 )
278
279
279 if self._closed:
280 if self._closed:
280 raise error.ProgrammingError(
281 raise error.ProgrammingError(
281 b'callcommand() cannot be used after close()'
282 b'callcommand() cannot be used after close()'
282 )
283 )
283
284
284 # We don't need to support anything fancy. Just call the named
285 # We don't need to support anything fancy. Just call the named
285 # method on the peer and return a resolved future.
286 # method on the peer and return a resolved future.
286 fn = getattr(self._peer, pycompat.sysstr(command))
287 fn = getattr(self._peer, pycompat.sysstr(command))
287
288
288 f = futures.Future()
289 f = futures.Future()
289
290
290 try:
291 try:
291 result = fn(**pycompat.strkwargs(args))
292 result = fn(**pycompat.strkwargs(args))
292 except Exception:
293 except Exception:
293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 else:
295 else:
295 f.set_result(result)
296 f.set_result(result)
296
297
297 return f
298 return f
298
299
299 def sendcommands(self):
300 def sendcommands(self):
300 self._sent = True
301 self._sent = True
301
302
302 def close(self):
303 def close(self):
303 self._closed = True
304 self._closed = True
304
305
305
306
306 @interfaceutil.implementer(repository.ipeercommands)
307 @interfaceutil.implementer(repository.ipeercommands)
307 class localpeer(repository.peer):
308 class localpeer(repository.peer):
308 '''peer for a local repo; reflects only the most recent API'''
309 '''peer for a local repo; reflects only the most recent API'''
309
310
310 def __init__(self, repo, caps=None, path=None, remotehidden=False):
311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
311 super(localpeer, self).__init__(
312 super(localpeer, self).__init__(
312 repo.ui, path=path, remotehidden=remotehidden
313 repo.ui, path=path, remotehidden=remotehidden
313 )
314 )
314
315
315 if caps is None:
316 if caps is None:
316 caps = moderncaps.copy()
317 caps = moderncaps.copy()
317 if remotehidden:
318 if remotehidden:
318 self._repo = repo.filtered(b'served.hidden')
319 self._repo = repo.filtered(b'served.hidden')
319 else:
320 else:
320 self._repo = repo.filtered(b'served')
321 self._repo = repo.filtered(b'served')
321 if repo._wanted_sidedata:
322 if repo._wanted_sidedata:
322 formatted = bundle2.format_remote_wanted_sidedata(repo)
323 formatted = bundle2.format_remote_wanted_sidedata(repo)
323 caps.add(b'exp-wanted-sidedata=' + formatted)
324 caps.add(b'exp-wanted-sidedata=' + formatted)
324
325
325 self._caps = repo._restrictcapabilities(caps)
326 self._caps = repo._restrictcapabilities(caps)
326
327
327 # Begin of _basepeer interface.
328 # Begin of _basepeer interface.
328
329
329 def url(self):
330 def url(self):
330 return self._repo.url()
331 return self._repo.url()
331
332
332 def local(self):
333 def local(self):
333 return self._repo
334 return self._repo
334
335
335 def canpush(self):
336 def canpush(self):
336 return True
337 return True
337
338
338 def close(self):
339 def close(self):
339 self._repo.close()
340 self._repo.close()
340
341
341 # End of _basepeer interface.
342 # End of _basepeer interface.
342
343
343 # Begin of _basewirecommands interface.
344 # Begin of _basewirecommands interface.
344
345
345 def branchmap(self):
346 def branchmap(self):
346 return self._repo.branchmap()
347 return self._repo.branchmap()
347
348
348 def capabilities(self):
349 def capabilities(self):
349 return self._caps
350 return self._caps
350
351
351 def get_cached_bundle_inline(self, path):
352 def get_cached_bundle_inline(self, path):
352 # not needed with local peer
353 # not needed with local peer
353 raise NotImplementedError
354 raise NotImplementedError
354
355
355 def clonebundles(self):
356 def clonebundles(self):
356 return bundlecaches.get_manifest(self._repo)
357 return bundlecaches.get_manifest(self._repo)
357
358
358 def debugwireargs(self, one, two, three=None, four=None, five=None):
359 def debugwireargs(self, one, two, three=None, four=None, five=None):
359 """Used to test argument passing over the wire"""
360 """Used to test argument passing over the wire"""
360 return b"%s %s %s %s %s" % (
361 return b"%s %s %s %s %s" % (
361 one,
362 one,
362 two,
363 two,
363 pycompat.bytestr(three),
364 pycompat.bytestr(three),
364 pycompat.bytestr(four),
365 pycompat.bytestr(four),
365 pycompat.bytestr(five),
366 pycompat.bytestr(five),
366 )
367 )
367
368
368 def getbundle(
369 def getbundle(
369 self,
370 self,
370 source,
371 source,
371 heads=None,
372 heads=None,
372 common=None,
373 common=None,
373 bundlecaps=None,
374 bundlecaps=None,
374 remote_sidedata=None,
375 remote_sidedata=None,
375 **kwargs
376 **kwargs
376 ):
377 ):
377 chunks = exchange.getbundlechunks(
378 chunks = exchange.getbundlechunks(
378 self._repo,
379 self._repo,
379 source,
380 source,
380 heads=heads,
381 heads=heads,
381 common=common,
382 common=common,
382 bundlecaps=bundlecaps,
383 bundlecaps=bundlecaps,
383 remote_sidedata=remote_sidedata,
384 remote_sidedata=remote_sidedata,
384 **kwargs
385 **kwargs
385 )[1]
386 )[1]
386 cb = util.chunkbuffer(chunks)
387 cb = util.chunkbuffer(chunks)
387
388
388 if exchange.bundle2requested(bundlecaps):
389 if exchange.bundle2requested(bundlecaps):
389 # When requesting a bundle2, getbundle returns a stream to make the
390 # When requesting a bundle2, getbundle returns a stream to make the
390 # wire level function happier. We need to build a proper object
391 # wire level function happier. We need to build a proper object
391 # from it in local peer.
392 # from it in local peer.
392 return bundle2.getunbundler(self.ui, cb)
393 return bundle2.getunbundler(self.ui, cb)
393 else:
394 else:
394 return changegroup.getunbundler(b'01', cb, None)
395 return changegroup.getunbundler(b'01', cb, None)
395
396
396 def heads(self):
397 def heads(self):
397 return self._repo.heads()
398 return self._repo.heads()
398
399
399 def known(self, nodes):
400 def known(self, nodes):
400 return self._repo.known(nodes)
401 return self._repo.known(nodes)
401
402
402 def listkeys(self, namespace):
403 def listkeys(self, namespace):
403 return self._repo.listkeys(namespace)
404 return self._repo.listkeys(namespace)
404
405
405 def lookup(self, key):
406 def lookup(self, key):
406 return self._repo.lookup(key)
407 return self._repo.lookup(key)
407
408
408 def pushkey(self, namespace, key, old, new):
409 def pushkey(self, namespace, key, old, new):
409 return self._repo.pushkey(namespace, key, old, new)
410 return self._repo.pushkey(namespace, key, old, new)
410
411
411 def stream_out(self):
412 def stream_out(self):
412 raise error.Abort(_(b'cannot perform stream clone against local peer'))
413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
413
414
414 def unbundle(self, bundle, heads, url):
415 def unbundle(self, bundle, heads, url):
415 """apply a bundle on a repo
416 """apply a bundle on a repo
416
417
417 This function handles the repo locking itself."""
418 This function handles the repo locking itself."""
418 try:
419 try:
419 try:
420 try:
420 bundle = exchange.readbundle(self.ui, bundle, None)
421 bundle = exchange.readbundle(self.ui, bundle, None)
421 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
422 if util.safehasattr(ret, 'getchunks'):
423 if util.safehasattr(ret, 'getchunks'):
423 # This is a bundle20 object, turn it into an unbundler.
424 # This is a bundle20 object, turn it into an unbundler.
424 # This little dance should be dropped eventually when the
425 # This little dance should be dropped eventually when the
425 # API is finally improved.
426 # API is finally improved.
426 stream = util.chunkbuffer(ret.getchunks())
427 stream = util.chunkbuffer(ret.getchunks())
427 ret = bundle2.getunbundler(self.ui, stream)
428 ret = bundle2.getunbundler(self.ui, stream)
428 return ret
429 return ret
429 except Exception as exc:
430 except Exception as exc:
430 # If the exception contains output salvaged from a bundle2
431 # If the exception contains output salvaged from a bundle2
431 # reply, we need to make sure it is printed before continuing
432 # reply, we need to make sure it is printed before continuing
432 # to fail. So we build a bundle2 with such output and consume
433 # to fail. So we build a bundle2 with such output and consume
433 # it directly.
434 # it directly.
434 #
435 #
435 # This is not very elegant but allows a "simple" solution for
436 # This is not very elegant but allows a "simple" solution for
436 # issue4594
437 # issue4594
437 output = getattr(exc, '_bundle2salvagedoutput', ())
438 output = getattr(exc, '_bundle2salvagedoutput', ())
438 if output:
439 if output:
439 bundler = bundle2.bundle20(self._repo.ui)
440 bundler = bundle2.bundle20(self._repo.ui)
440 for out in output:
441 for out in output:
441 bundler.addpart(out)
442 bundler.addpart(out)
442 stream = util.chunkbuffer(bundler.getchunks())
443 stream = util.chunkbuffer(bundler.getchunks())
443 b = bundle2.getunbundler(self.ui, stream)
444 b = bundle2.getunbundler(self.ui, stream)
444 bundle2.processbundle(self._repo, b)
445 bundle2.processbundle(self._repo, b)
445 raise
446 raise
446 except error.PushRaced as exc:
447 except error.PushRaced as exc:
447 raise error.ResponseError(
448 raise error.ResponseError(
448 _(b'push failed:'), stringutil.forcebytestr(exc)
449 _(b'push failed:'), stringutil.forcebytestr(exc)
449 )
450 )
450
451
451 # End of _basewirecommands interface.
452 # End of _basewirecommands interface.
452
453
453 # Begin of peer interface.
454 # Begin of peer interface.
454
455
455 def commandexecutor(self):
456 def commandexecutor(self):
456 return localcommandexecutor(self)
457 return localcommandexecutor(self)
457
458
458 # End of peer interface.
459 # End of peer interface.
459
460
460
461
461 @interfaceutil.implementer(repository.ipeerlegacycommands)
462 @interfaceutil.implementer(repository.ipeerlegacycommands)
462 class locallegacypeer(localpeer):
463 class locallegacypeer(localpeer):
463 """peer extension which implements legacy methods too; used for tests with
464 """peer extension which implements legacy methods too; used for tests with
464 restricted capabilities"""
465 restricted capabilities"""
465
466
466 def __init__(self, repo, path=None, remotehidden=False):
467 def __init__(self, repo, path=None, remotehidden=False):
467 super(locallegacypeer, self).__init__(
468 super(locallegacypeer, self).__init__(
468 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 )
470 )
470
471
471 # Begin of baselegacywirecommands interface.
472 # Begin of baselegacywirecommands interface.
472
473
473 def between(self, pairs):
474 def between(self, pairs):
474 return self._repo.between(pairs)
475 return self._repo.between(pairs)
475
476
476 def branches(self, nodes):
477 def branches(self, nodes):
477 return self._repo.branches(nodes)
478 return self._repo.branches(nodes)
478
479
479 def changegroup(self, nodes, source):
480 def changegroup(self, nodes, source):
480 outgoing = discovery.outgoing(
481 outgoing = discovery.outgoing(
481 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 )
483 )
483 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484
485
485 def changegroupsubset(self, bases, heads, source):
486 def changegroupsubset(self, bases, heads, source):
486 outgoing = discovery.outgoing(
487 outgoing = discovery.outgoing(
487 self._repo, missingroots=bases, ancestorsof=heads
488 self._repo, missingroots=bases, ancestorsof=heads
488 )
489 )
489 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490
491
491 # End of baselegacywirecommands interface.
492 # End of baselegacywirecommands interface.
492
493
493
494
494 # Functions receiving (ui, features) that extensions can register to impact
495 # Functions receiving (ui, features) that extensions can register to impact
495 # the ability to load repositories with custom requirements. Only
496 # the ability to load repositories with custom requirements. Only
496 # functions defined in loaded extensions are called.
497 # functions defined in loaded extensions are called.
497 #
498 #
498 # The function receives a set of requirement strings that the repository
499 # The function receives a set of requirement strings that the repository
499 # is capable of opening. Functions will typically add elements to the
500 # is capable of opening. Functions will typically add elements to the
500 # set to reflect that the extension knows how to handle that requirements.
501 # set to reflect that the extension knows how to handle that requirements.
501 featuresetupfuncs = set()
502 featuresetupfuncs = set()
502
503
503
504
504 def _getsharedvfs(hgvfs, requirements):
505 def _getsharedvfs(hgvfs, requirements):
505 """returns the vfs object pointing to root of shared source
506 """returns the vfs object pointing to root of shared source
506 repo for a shared repository
507 repo for a shared repository
507
508
508 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 requirements is a set of requirements of current repo (shared one)
510 requirements is a set of requirements of current repo (shared one)
510 """
511 """
511 # The ``shared`` or ``relshared`` requirements indicate the
512 # The ``shared`` or ``relshared`` requirements indicate the
512 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 # This is an absolute path for ``shared`` and relative to
514 # This is an absolute path for ``shared`` and relative to
514 # ``.hg/`` for ``relshared``.
515 # ``.hg/`` for ``relshared``.
515 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 sharedpath = util.normpath(hgvfs.join(sharedpath))
518 sharedpath = util.normpath(hgvfs.join(sharedpath))
518
519
519 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520
521
521 if not sharedvfs.exists():
522 if not sharedvfs.exists():
522 raise error.RepoError(
523 raise error.RepoError(
523 _(b'.hg/sharedpath points to nonexistent directory %s')
524 _(b'.hg/sharedpath points to nonexistent directory %s')
524 % sharedvfs.base
525 % sharedvfs.base
525 )
526 )
526 return sharedvfs
527 return sharedvfs
527
528
528
529
529 def _readrequires(vfs, allowmissing):
530 def _readrequires(vfs, allowmissing):
530 """reads the require file present at root of this vfs
531 """reads the require file present at root of this vfs
531 and return a set of requirements
532 and return a set of requirements
532
533
533 If allowmissing is True, we suppress FileNotFoundError if raised"""
534 If allowmissing is True, we suppress FileNotFoundError if raised"""
534 # requires file contains a newline-delimited list of
535 # requires file contains a newline-delimited list of
535 # features/capabilities the opener (us) must have in order to use
536 # features/capabilities the opener (us) must have in order to use
536 # the repository. This file was introduced in Mercurial 0.9.2,
537 # the repository. This file was introduced in Mercurial 0.9.2,
537 # which means very old repositories may not have one. We assume
538 # which means very old repositories may not have one. We assume
538 # a missing file translates to no requirements.
539 # a missing file translates to no requirements.
539 read = vfs.tryread if allowmissing else vfs.read
540 read = vfs.tryread if allowmissing else vfs.read
540 return set(read(b'requires').splitlines())
541 return set(read(b'requires').splitlines())
541
542
542
543
543 def makelocalrepository(baseui, path: bytes, intents=None):
544 def makelocalrepository(baseui, path: bytes, intents=None):
544 """Create a local repository object.
545 """Create a local repository object.
545
546
546 Given arguments needed to construct a local repository, this function
547 Given arguments needed to construct a local repository, this function
547 performs various early repository loading functionality (such as
548 performs various early repository loading functionality (such as
548 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
549 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
549 the repository can be opened, derives a type suitable for representing
550 the repository can be opened, derives a type suitable for representing
550 that repository, and returns an instance of it.
551 that repository, and returns an instance of it.
551
552
552 The returned object conforms to the ``repository.completelocalrepository``
553 The returned object conforms to the ``repository.completelocalrepository``
553 interface.
554 interface.
554
555
555 The repository type is derived by calling a series of factory functions
556 The repository type is derived by calling a series of factory functions
556 for each aspect/interface of the final repository. These are defined by
557 for each aspect/interface of the final repository. These are defined by
557 ``REPO_INTERFACES``.
558 ``REPO_INTERFACES``.
558
559
559 Each factory function is called to produce a type implementing a specific
560 Each factory function is called to produce a type implementing a specific
560 interface. The cumulative list of returned types will be combined into a
561 interface. The cumulative list of returned types will be combined into a
561 new type and that type will be instantiated to represent the local
562 new type and that type will be instantiated to represent the local
562 repository.
563 repository.
563
564
564 The factory functions each receive various state that may be consulted
565 The factory functions each receive various state that may be consulted
565 as part of deriving a type.
566 as part of deriving a type.
566
567
567 Extensions should wrap these factory functions to customize repository type
568 Extensions should wrap these factory functions to customize repository type
568 creation. Note that an extension's wrapped function may be called even if
569 creation. Note that an extension's wrapped function may be called even if
569 that extension is not loaded for the repo being constructed. Extensions
570 that extension is not loaded for the repo being constructed. Extensions
570 should check if their ``__name__`` appears in the
571 should check if their ``__name__`` appears in the
571 ``extensionmodulenames`` set passed to the factory function and no-op if
572 ``extensionmodulenames`` set passed to the factory function and no-op if
572 not.
573 not.
573 """
574 """
574 ui = baseui.copy()
575 ui = baseui.copy()
575 # Prevent copying repo configuration.
576 # Prevent copying repo configuration.
576 ui.copy = baseui.copy
577 ui.copy = baseui.copy
577
578
578 # Working directory VFS rooted at repository root.
579 # Working directory VFS rooted at repository root.
579 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
580 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
580
581
581 # Main VFS for .hg/ directory.
582 # Main VFS for .hg/ directory.
582 hgpath = wdirvfs.join(b'.hg')
583 hgpath = wdirvfs.join(b'.hg')
583 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
584 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
584 # Whether this repository is shared one or not
585 # Whether this repository is shared one or not
585 shared = False
586 shared = False
586 # If this repository is shared, vfs pointing to shared repo
587 # If this repository is shared, vfs pointing to shared repo
587 sharedvfs = None
588 sharedvfs = None
588
589
589 # The .hg/ path should exist and should be a directory. All other
590 # The .hg/ path should exist and should be a directory. All other
590 # cases are errors.
591 # cases are errors.
591 if not hgvfs.isdir():
592 if not hgvfs.isdir():
592 try:
593 try:
593 hgvfs.stat()
594 hgvfs.stat()
594 except FileNotFoundError:
595 except FileNotFoundError:
595 pass
596 pass
596 except ValueError as e:
597 except ValueError as e:
597 # Can be raised on Python 3.8 when path is invalid.
598 # Can be raised on Python 3.8 when path is invalid.
598 raise error.Abort(
599 raise error.Abort(
599 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
600 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
600 )
601 )
601
602
602 raise error.RepoError(_(b'repository %s not found') % path)
603 raise error.RepoError(_(b'repository %s not found') % path)
603
604
604 requirements = _readrequires(hgvfs, True)
605 requirements = _readrequires(hgvfs, True)
605 shared = (
606 shared = (
606 requirementsmod.SHARED_REQUIREMENT in requirements
607 requirementsmod.SHARED_REQUIREMENT in requirements
607 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
608 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
608 )
609 )
609 storevfs = None
610 storevfs = None
610 if shared:
611 if shared:
611 # This is a shared repo
612 # This is a shared repo
612 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
614 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
614 else:
615 else:
615 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
616 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
616
617
617 # if .hg/requires contains the sharesafe requirement, it means
618 # if .hg/requires contains the sharesafe requirement, it means
618 # there exists a `.hg/store/requires` too and we should read it
619 # there exists a `.hg/store/requires` too and we should read it
619 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
620 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
620 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
621 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
621 # is not present, refer checkrequirementscompat() for that
622 # is not present, refer checkrequirementscompat() for that
622 #
623 #
623 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
624 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
624 # repository was shared the old way. We check the share source .hg/requires
625 # repository was shared the old way. We check the share source .hg/requires
625 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
626 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
626 # to be reshared
627 # to be reshared
627 hint = _(b"see `hg help config.format.use-share-safe` for more information")
628 hint = _(b"see `hg help config.format.use-share-safe` for more information")
628 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
629 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
629 if (
630 if (
630 shared
631 shared
631 and requirementsmod.SHARESAFE_REQUIREMENT
632 and requirementsmod.SHARESAFE_REQUIREMENT
632 not in _readrequires(sharedvfs, True)
633 not in _readrequires(sharedvfs, True)
633 ):
634 ):
634 mismatch_warn = ui.configbool(
635 mismatch_warn = ui.configbool(
635 b'share', b'safe-mismatch.source-not-safe.warn'
636 b'share', b'safe-mismatch.source-not-safe.warn'
636 )
637 )
637 mismatch_config = ui.config(
638 mismatch_config = ui.config(
638 b'share', b'safe-mismatch.source-not-safe'
639 b'share', b'safe-mismatch.source-not-safe'
639 )
640 )
640 mismatch_verbose_upgrade = ui.configbool(
641 mismatch_verbose_upgrade = ui.configbool(
641 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
642 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
642 )
643 )
643 if mismatch_config in (
644 if mismatch_config in (
644 b'downgrade-allow',
645 b'downgrade-allow',
645 b'allow',
646 b'allow',
646 b'downgrade-abort',
647 b'downgrade-abort',
647 ):
648 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
650 from . import upgrade
650
651
651 upgrade.downgrade_share_to_non_safe(
652 upgrade.downgrade_share_to_non_safe(
652 ui,
653 ui,
653 hgvfs,
654 hgvfs,
654 sharedvfs,
655 sharedvfs,
655 requirements,
656 requirements,
656 mismatch_config,
657 mismatch_config,
657 mismatch_warn,
658 mismatch_warn,
658 mismatch_verbose_upgrade,
659 mismatch_verbose_upgrade,
659 )
660 )
660 elif mismatch_config == b'abort':
661 elif mismatch_config == b'abort':
661 raise error.Abort(
662 raise error.Abort(
662 _(b"share source does not support share-safe requirement"),
663 _(b"share source does not support share-safe requirement"),
663 hint=hint,
664 hint=hint,
664 )
665 )
665 else:
666 else:
666 raise error.Abort(
667 raise error.Abort(
667 _(
668 _(
668 b"share-safe mismatch with source.\nUnrecognized"
669 b"share-safe mismatch with source.\nUnrecognized"
669 b" value '%s' of `share.safe-mismatch.source-not-safe`"
670 b" value '%s' of `share.safe-mismatch.source-not-safe`"
670 b" set."
671 b" set."
671 )
672 )
672 % mismatch_config,
673 % mismatch_config,
673 hint=hint,
674 hint=hint,
674 )
675 )
675 else:
676 else:
676 requirements |= _readrequires(storevfs, False)
677 requirements |= _readrequires(storevfs, False)
677 elif shared:
678 elif shared:
678 sourcerequires = _readrequires(sharedvfs, False)
679 sourcerequires = _readrequires(sharedvfs, False)
679 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
680 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
680 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
681 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
681 mismatch_warn = ui.configbool(
682 mismatch_warn = ui.configbool(
682 b'share', b'safe-mismatch.source-safe.warn'
683 b'share', b'safe-mismatch.source-safe.warn'
683 )
684 )
684 mismatch_verbose_upgrade = ui.configbool(
685 mismatch_verbose_upgrade = ui.configbool(
685 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
686 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
686 )
687 )
687 if mismatch_config in (
688 if mismatch_config in (
688 b'upgrade-allow',
689 b'upgrade-allow',
689 b'allow',
690 b'allow',
690 b'upgrade-abort',
691 b'upgrade-abort',
691 ):
692 ):
692 # prevent cyclic import localrepo -> upgrade -> localrepo
693 # prevent cyclic import localrepo -> upgrade -> localrepo
693 from . import upgrade
694 from . import upgrade
694
695
695 upgrade.upgrade_share_to_safe(
696 upgrade.upgrade_share_to_safe(
696 ui,
697 ui,
697 hgvfs,
698 hgvfs,
698 storevfs,
699 storevfs,
699 requirements,
700 requirements,
700 mismatch_config,
701 mismatch_config,
701 mismatch_warn,
702 mismatch_warn,
702 mismatch_verbose_upgrade,
703 mismatch_verbose_upgrade,
703 )
704 )
704 elif mismatch_config == b'abort':
705 elif mismatch_config == b'abort':
705 raise error.Abort(
706 raise error.Abort(
706 _(
707 _(
707 b'version mismatch: source uses share-safe'
708 b'version mismatch: source uses share-safe'
708 b' functionality while the current share does not'
709 b' functionality while the current share does not'
709 ),
710 ),
710 hint=hint,
711 hint=hint,
711 )
712 )
712 else:
713 else:
713 raise error.Abort(
714 raise error.Abort(
714 _(
715 _(
715 b"share-safe mismatch with source.\nUnrecognized"
716 b"share-safe mismatch with source.\nUnrecognized"
716 b" value '%s' of `share.safe-mismatch.source-safe` set."
717 b" value '%s' of `share.safe-mismatch.source-safe` set."
717 )
718 )
718 % mismatch_config,
719 % mismatch_config,
719 hint=hint,
720 hint=hint,
720 )
721 )
721
722
722 # The .hg/hgrc file may load extensions or contain config options
723 # The .hg/hgrc file may load extensions or contain config options
723 # that influence repository construction. Attempt to load it and
724 # that influence repository construction. Attempt to load it and
724 # process any new extensions that it may have pulled in.
725 # process any new extensions that it may have pulled in.
725 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
726 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
726 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
727 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
727 extensions.loadall(ui)
728 extensions.loadall(ui)
728 extensions.populateui(ui)
729 extensions.populateui(ui)
729
730
730 # Set of module names of extensions loaded for this repository.
731 # Set of module names of extensions loaded for this repository.
731 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
732 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
732
733
733 supportedrequirements = gathersupportedrequirements(ui)
734 supportedrequirements = gathersupportedrequirements(ui)
734
735
735 # We first validate the requirements are known.
736 # We first validate the requirements are known.
736 ensurerequirementsrecognized(requirements, supportedrequirements)
737 ensurerequirementsrecognized(requirements, supportedrequirements)
737
738
738 # Then we validate that the known set is reasonable to use together.
739 # Then we validate that the known set is reasonable to use together.
739 ensurerequirementscompatible(ui, requirements)
740 ensurerequirementscompatible(ui, requirements)
740
741
741 # TODO there are unhandled edge cases related to opening repositories with
742 # TODO there are unhandled edge cases related to opening repositories with
742 # shared storage. If storage is shared, we should also test for requirements
743 # shared storage. If storage is shared, we should also test for requirements
743 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
744 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
744 # that repo, as that repo may load extensions needed to open it. This is a
745 # that repo, as that repo may load extensions needed to open it. This is a
745 # bit complicated because we don't want the other hgrc to overwrite settings
746 # bit complicated because we don't want the other hgrc to overwrite settings
746 # in this hgrc.
747 # in this hgrc.
747 #
748 #
748 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
749 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
749 # file when sharing repos. But if a requirement is added after the share is
750 # file when sharing repos. But if a requirement is added after the share is
750 # performed, thereby introducing a new requirement for the opener, we may
751 # performed, thereby introducing a new requirement for the opener, we may
751 # will not see that and could encounter a run-time error interacting with
752 # will not see that and could encounter a run-time error interacting with
752 # that shared store since it has an unknown-to-us requirement.
753 # that shared store since it has an unknown-to-us requirement.
753
754
754 # At this point, we know we should be capable of opening the repository.
755 # At this point, we know we should be capable of opening the repository.
755 # Now get on with doing that.
756 # Now get on with doing that.
756
757
757 features = set()
758 features = set()
758
759
759 # The "store" part of the repository holds versioned data. How it is
760 # The "store" part of the repository holds versioned data. How it is
760 # accessed is determined by various requirements. If `shared` or
761 # accessed is determined by various requirements. If `shared` or
761 # `relshared` requirements are present, this indicates current repository
762 # `relshared` requirements are present, this indicates current repository
762 # is a share and store exists in path mentioned in `.hg/sharedpath`
763 # is a share and store exists in path mentioned in `.hg/sharedpath`
763 if shared:
764 if shared:
764 storebasepath = sharedvfs.base
765 storebasepath = sharedvfs.base
765 cachepath = sharedvfs.join(b'cache')
766 cachepath = sharedvfs.join(b'cache')
766 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
767 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
767 else:
768 else:
768 storebasepath = hgvfs.base
769 storebasepath = hgvfs.base
769 cachepath = hgvfs.join(b'cache')
770 cachepath = hgvfs.join(b'cache')
770 wcachepath = hgvfs.join(b'wcache')
771 wcachepath = hgvfs.join(b'wcache')
771
772
772 # The store has changed over time and the exact layout is dictated by
773 # The store has changed over time and the exact layout is dictated by
773 # requirements. The store interface abstracts differences across all
774 # requirements. The store interface abstracts differences across all
774 # of them.
775 # of them.
775 store = makestore(
776 store = makestore(
776 requirements,
777 requirements,
777 storebasepath,
778 storebasepath,
778 lambda base: vfsmod.vfs(base, cacheaudited=True),
779 lambda base: vfsmod.vfs(base, cacheaudited=True),
779 )
780 )
780 hgvfs.createmode = store.createmode
781 hgvfs.createmode = store.createmode
781
782
782 storevfs = store.vfs
783 storevfs = store.vfs
783 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
784 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
784
785
785 if (
786 if (
786 requirementsmod.REVLOGV2_REQUIREMENT in requirements
787 requirementsmod.REVLOGV2_REQUIREMENT in requirements
787 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
788 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
788 ):
789 ):
789 features.add(repository.REPO_FEATURE_SIDE_DATA)
790 features.add(repository.REPO_FEATURE_SIDE_DATA)
790 # the revlogv2 docket introduced race condition that we need to fix
791 # the revlogv2 docket introduced race condition that we need to fix
791 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
792 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
792
793
793 # The cache vfs is used to manage cache files.
794 # The cache vfs is used to manage cache files.
794 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
795 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
795 cachevfs.createmode = store.createmode
796 cachevfs.createmode = store.createmode
796 # The cache vfs is used to manage cache files related to the working copy
797 # The cache vfs is used to manage cache files related to the working copy
797 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
798 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
798 wcachevfs.createmode = store.createmode
799 wcachevfs.createmode = store.createmode
799
800
800 # Now resolve the type for the repository object. We do this by repeatedly
801 # Now resolve the type for the repository object. We do this by repeatedly
801 # calling a factory function to produces types for specific aspects of the
802 # calling a factory function to produces types for specific aspects of the
802 # repo's operation. The aggregate returned types are used as base classes
803 # repo's operation. The aggregate returned types are used as base classes
803 # for a dynamically-derived type, which will represent our new repository.
804 # for a dynamically-derived type, which will represent our new repository.
804
805
805 bases = []
806 bases = []
806 extrastate = {}
807 extrastate = {}
807
808
808 for iface, fn in REPO_INTERFACES:
809 for iface, fn in REPO_INTERFACES:
809 # We pass all potentially useful state to give extensions tons of
810 # We pass all potentially useful state to give extensions tons of
810 # flexibility.
811 # flexibility.
811 typ = fn()(
812 typ = fn()(
812 ui=ui,
813 ui=ui,
813 intents=intents,
814 intents=intents,
814 requirements=requirements,
815 requirements=requirements,
815 features=features,
816 features=features,
816 wdirvfs=wdirvfs,
817 wdirvfs=wdirvfs,
817 hgvfs=hgvfs,
818 hgvfs=hgvfs,
818 store=store,
819 store=store,
819 storevfs=storevfs,
820 storevfs=storevfs,
820 storeoptions=storevfs.options,
821 storeoptions=storevfs.options,
821 cachevfs=cachevfs,
822 cachevfs=cachevfs,
822 wcachevfs=wcachevfs,
823 wcachevfs=wcachevfs,
823 extensionmodulenames=extensionmodulenames,
824 extensionmodulenames=extensionmodulenames,
824 extrastate=extrastate,
825 extrastate=extrastate,
825 baseclasses=bases,
826 baseclasses=bases,
826 )
827 )
827
828
828 if not isinstance(typ, type):
829 if not isinstance(typ, type):
829 raise error.ProgrammingError(
830 raise error.ProgrammingError(
830 b'unable to construct type for %s' % iface
831 b'unable to construct type for %s' % iface
831 )
832 )
832
833
833 bases.append(typ)
834 bases.append(typ)
834
835
835 # type() allows you to use characters in type names that wouldn't be
836 # type() allows you to use characters in type names that wouldn't be
836 # recognized as Python symbols in source code. We abuse that to add
837 # recognized as Python symbols in source code. We abuse that to add
837 # rich information about our constructed repo.
838 # rich information about our constructed repo.
838 name = pycompat.sysstr(
839 name = pycompat.sysstr(
839 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
840 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
840 )
841 )
841
842
842 cls = type(name, tuple(bases), {})
843 cls = type(name, tuple(bases), {})
843
844
844 return cls(
845 return cls(
845 baseui=baseui,
846 baseui=baseui,
846 ui=ui,
847 ui=ui,
847 origroot=path,
848 origroot=path,
848 wdirvfs=wdirvfs,
849 wdirvfs=wdirvfs,
849 hgvfs=hgvfs,
850 hgvfs=hgvfs,
850 requirements=requirements,
851 requirements=requirements,
851 supportedrequirements=supportedrequirements,
852 supportedrequirements=supportedrequirements,
852 sharedpath=storebasepath,
853 sharedpath=storebasepath,
853 store=store,
854 store=store,
854 cachevfs=cachevfs,
855 cachevfs=cachevfs,
855 wcachevfs=wcachevfs,
856 wcachevfs=wcachevfs,
856 features=features,
857 features=features,
857 intents=intents,
858 intents=intents,
858 )
859 )
859
860
860
861
861 def loadhgrc(
862 def loadhgrc(
862 ui,
863 ui,
863 wdirvfs: vfsmod.vfs,
864 wdirvfs: vfsmod.vfs,
864 hgvfs: vfsmod.vfs,
865 hgvfs: vfsmod.vfs,
865 requirements,
866 requirements,
866 sharedvfs: Optional[vfsmod.vfs] = None,
867 sharedvfs: Optional[vfsmod.vfs] = None,
867 ):
868 ):
868 """Load hgrc files/content into a ui instance.
869 """Load hgrc files/content into a ui instance.
869
870
870 This is called during repository opening to load any additional
871 This is called during repository opening to load any additional
871 config files or settings relevant to the current repository.
872 config files or settings relevant to the current repository.
872
873
873 Returns a bool indicating whether any additional configs were loaded.
874 Returns a bool indicating whether any additional configs were loaded.
874
875
875 Extensions should monkeypatch this function to modify how per-repo
876 Extensions should monkeypatch this function to modify how per-repo
876 configs are loaded. For example, an extension may wish to pull in
877 configs are loaded. For example, an extension may wish to pull in
877 configs from alternate files or sources.
878 configs from alternate files or sources.
878
879
879 sharedvfs is vfs object pointing to source repo if the current one is a
880 sharedvfs is vfs object pointing to source repo if the current one is a
880 shared one
881 shared one
881 """
882 """
882 if not rcutil.use_repo_hgrc():
883 if not rcutil.use_repo_hgrc():
883 return False
884 return False
884
885
885 ret = False
886 ret = False
886 # first load config from shared source if we has to
887 # first load config from shared source if we has to
887 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
888 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
888 try:
889 try:
889 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
890 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
890 ret = True
891 ret = True
891 except IOError:
892 except IOError:
892 pass
893 pass
893
894
894 try:
895 try:
895 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
896 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
896 ret = True
897 ret = True
897 except IOError:
898 except IOError:
898 pass
899 pass
899
900
900 try:
901 try:
901 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
902 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
902 ret = True
903 ret = True
903 except IOError:
904 except IOError:
904 pass
905 pass
905
906
906 return ret
907 return ret
907
908
908
909
909 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
910 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
910 """Perform additional actions after .hg/hgrc is loaded.
911 """Perform additional actions after .hg/hgrc is loaded.
911
912
912 This function is called during repository loading immediately after
913 This function is called during repository loading immediately after
913 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
914 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
914
915
915 The function can be used to validate configs, automatically add
916 The function can be used to validate configs, automatically add
916 options (including extensions) based on requirements, etc.
917 options (including extensions) based on requirements, etc.
917 """
918 """
918
919
919 # Map of requirements to list of extensions to load automatically when
920 # Map of requirements to list of extensions to load automatically when
920 # requirement is present.
921 # requirement is present.
921 autoextensions = {
922 autoextensions = {
922 b'git': [b'git'],
923 b'git': [b'git'],
923 b'largefiles': [b'largefiles'],
924 b'largefiles': [b'largefiles'],
924 b'lfs': [b'lfs'],
925 b'lfs': [b'lfs'],
925 }
926 }
926
927
927 for requirement, names in sorted(autoextensions.items()):
928 for requirement, names in sorted(autoextensions.items()):
928 if requirement not in requirements:
929 if requirement not in requirements:
929 continue
930 continue
930
931
931 for name in names:
932 for name in names:
932 if not ui.hasconfig(b'extensions', name):
933 if not ui.hasconfig(b'extensions', name):
933 ui.setconfig(b'extensions', name, b'', source=b'autoload')
934 ui.setconfig(b'extensions', name, b'', source=b'autoload')
934
935
935
936
936 def gathersupportedrequirements(ui):
937 def gathersupportedrequirements(ui):
937 """Determine the complete set of recognized requirements."""
938 """Determine the complete set of recognized requirements."""
938 # Start with all requirements supported by this file.
939 # Start with all requirements supported by this file.
939 supported = set(localrepository._basesupported)
940 supported = set(localrepository._basesupported)
940
941
941 # Execute ``featuresetupfuncs`` entries if they belong to an extension
942 # Execute ``featuresetupfuncs`` entries if they belong to an extension
942 # relevant to this ui instance.
943 # relevant to this ui instance.
943 modules = {m.__name__ for n, m in extensions.extensions(ui)}
944 modules = {m.__name__ for n, m in extensions.extensions(ui)}
944
945
945 for fn in featuresetupfuncs:
946 for fn in featuresetupfuncs:
946 if fn.__module__ in modules:
947 if fn.__module__ in modules:
947 fn(ui, supported)
948 fn(ui, supported)
948
949
949 # Add derived requirements from registered compression engines.
950 # Add derived requirements from registered compression engines.
950 for name in util.compengines:
951 for name in util.compengines:
951 engine = util.compengines[name]
952 engine = util.compengines[name]
952 if engine.available() and engine.revlogheader():
953 if engine.available() and engine.revlogheader():
953 supported.add(b'exp-compression-%s' % name)
954 supported.add(b'exp-compression-%s' % name)
954 if engine.name() == b'zstd':
955 if engine.name() == b'zstd':
955 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
956 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
956
957
957 return supported
958 return supported
958
959
959
960
960 def ensurerequirementsrecognized(requirements, supported):
961 def ensurerequirementsrecognized(requirements, supported):
961 """Validate that a set of local requirements is recognized.
962 """Validate that a set of local requirements is recognized.
962
963
963 Receives a set of requirements. Raises an ``error.RepoError`` if there
964 Receives a set of requirements. Raises an ``error.RepoError`` if there
964 exists any requirement in that set that currently loaded code doesn't
965 exists any requirement in that set that currently loaded code doesn't
965 recognize.
966 recognize.
966
967
967 Returns a set of supported requirements.
968 Returns a set of supported requirements.
968 """
969 """
969 missing = set()
970 missing = set()
970
971
971 for requirement in requirements:
972 for requirement in requirements:
972 if requirement in supported:
973 if requirement in supported:
973 continue
974 continue
974
975
975 if not requirement or not requirement[0:1].isalnum():
976 if not requirement or not requirement[0:1].isalnum():
976 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
977 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
977
978
978 missing.add(requirement)
979 missing.add(requirement)
979
980
980 if missing:
981 if missing:
981 raise error.RequirementError(
982 raise error.RequirementError(
982 _(b'repository requires features unknown to this Mercurial: %s')
983 _(b'repository requires features unknown to this Mercurial: %s')
983 % b' '.join(sorted(missing)),
984 % b' '.join(sorted(missing)),
984 hint=_(
985 hint=_(
985 b'see https://mercurial-scm.org/wiki/MissingRequirement '
986 b'see https://mercurial-scm.org/wiki/MissingRequirement '
986 b'for more information'
987 b'for more information'
987 ),
988 ),
988 )
989 )
989
990
990
991
991 def ensurerequirementscompatible(ui, requirements):
992 def ensurerequirementscompatible(ui, requirements):
992 """Validates that a set of recognized requirements is mutually compatible.
993 """Validates that a set of recognized requirements is mutually compatible.
993
994
994 Some requirements may not be compatible with others or require
995 Some requirements may not be compatible with others or require
995 config options that aren't enabled. This function is called during
996 config options that aren't enabled. This function is called during
996 repository opening to ensure that the set of requirements needed
997 repository opening to ensure that the set of requirements needed
997 to open a repository is sane and compatible with config options.
998 to open a repository is sane and compatible with config options.
998
999
999 Extensions can monkeypatch this function to perform additional
1000 Extensions can monkeypatch this function to perform additional
1000 checking.
1001 checking.
1001
1002
1002 ``error.RepoError`` should be raised on failure.
1003 ``error.RepoError`` should be raised on failure.
1003 """
1004 """
1004 if (
1005 if (
1005 requirementsmod.SPARSE_REQUIREMENT in requirements
1006 requirementsmod.SPARSE_REQUIREMENT in requirements
1006 and not sparse.enabled
1007 and not sparse.enabled
1007 ):
1008 ):
1008 raise error.RepoError(
1009 raise error.RepoError(
1009 _(
1010 _(
1010 b'repository is using sparse feature but '
1011 b'repository is using sparse feature but '
1011 b'sparse is not enabled; enable the '
1012 b'sparse is not enabled; enable the '
1012 b'"sparse" extensions to access'
1013 b'"sparse" extensions to access'
1013 )
1014 )
1014 )
1015 )
1015
1016
1016
1017
1017 def makestore(requirements, path, vfstype):
1018 def makestore(requirements, path, vfstype):
1018 """Construct a storage object for a repository."""
1019 """Construct a storage object for a repository."""
1019 if requirementsmod.STORE_REQUIREMENT in requirements:
1020 if requirementsmod.STORE_REQUIREMENT in requirements:
1020 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1021 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1021 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1022 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1022 return storemod.fncachestore(path, vfstype, dotencode)
1023 return storemod.fncachestore(path, vfstype, dotencode)
1023
1024
1024 return storemod.encodedstore(path, vfstype)
1025 return storemod.encodedstore(path, vfstype)
1025
1026
1026 return storemod.basicstore(path, vfstype)
1027 return storemod.basicstore(path, vfstype)
1027
1028
1028
1029
1029 def resolvestorevfsoptions(ui, requirements, features):
1030 def resolvestorevfsoptions(ui, requirements, features):
1030 """Resolve the options to pass to the store vfs opener.
1031 """Resolve the options to pass to the store vfs opener.
1031
1032
1032 The returned dict is used to influence behavior of the storage layer.
1033 The returned dict is used to influence behavior of the storage layer.
1033 """
1034 """
1034 options = {}
1035 options = {}
1035
1036
1036 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1037 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1037 options[b'treemanifest'] = True
1038 options[b'treemanifest'] = True
1038
1039
1039 # experimental config: format.manifestcachesize
1040 # experimental config: format.manifestcachesize
1040 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1041 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1041 if manifestcachesize is not None:
1042 if manifestcachesize is not None:
1042 options[b'manifestcachesize'] = manifestcachesize
1043 options[b'manifestcachesize'] = manifestcachesize
1043
1044
1044 # In the absence of another requirement superseding a revlog-related
1045 # In the absence of another requirement superseding a revlog-related
1045 # requirement, we have to assume the repo is using revlog version 0.
1046 # requirement, we have to assume the repo is using revlog version 0.
1046 # This revlog format is super old and we don't bother trying to parse
1047 # This revlog format is super old and we don't bother trying to parse
1047 # opener options for it because those options wouldn't do anything
1048 # opener options for it because those options wouldn't do anything
1048 # meaningful on such old repos.
1049 # meaningful on such old repos.
1049 if (
1050 if (
1050 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1051 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1051 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1052 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1052 ):
1053 ):
1053 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1054 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1054 else: # explicitly mark repo as using revlogv0
1055 else: # explicitly mark repo as using revlogv0
1055 options[b'revlogv0'] = True
1056 options[b'revlogv0'] = True
1056
1057
1057 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1058 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1058 options[b'copies-storage'] = b'changeset-sidedata'
1059 options[b'copies-storage'] = b'changeset-sidedata'
1059 else:
1060 else:
1060 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1061 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1061 copiesextramode = (b'changeset-only', b'compatibility')
1062 copiesextramode = (b'changeset-only', b'compatibility')
1062 if writecopiesto in copiesextramode:
1063 if writecopiesto in copiesextramode:
1063 options[b'copies-storage'] = b'extra'
1064 options[b'copies-storage'] = b'extra'
1064
1065
1065 return options
1066 return options
1066
1067
1067
1068
1068 def resolverevlogstorevfsoptions(ui, requirements, features):
1069 def resolverevlogstorevfsoptions(ui, requirements, features):
1069 """Resolve opener options specific to revlogs."""
1070 """Resolve opener options specific to revlogs."""
1070
1071
1071 options = {}
1072 options = {}
1072 options[b'flagprocessors'] = {}
1073 options[b'flagprocessors'] = {}
1073
1074
1074 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 options[b'revlogv1'] = True
1076 options[b'revlogv1'] = True
1076 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 options[b'revlogv2'] = True
1078 options[b'revlogv2'] = True
1078 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 options[b'changelogv2'] = True
1080 options[b'changelogv2'] = True
1080 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 options[b'changelogv2.compute-rank'] = cmp_rank
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082
1083
1083 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 options[b'generaldelta'] = True
1085 options[b'generaldelta'] = True
1085
1086
1086 # experimental config: format.chunkcachesize
1087 # experimental config: format.chunkcachesize
1087 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 if chunkcachesize is not None:
1089 if chunkcachesize is not None:
1089 options[b'chunkcachesize'] = chunkcachesize
1090 options[b'chunkcachesize'] = chunkcachesize
1090
1091
1091 deltabothparents = ui.configbool(
1092 deltabothparents = ui.configbool(
1092 b'storage', b'revlog.optimize-delta-parent-choice'
1093 b'storage', b'revlog.optimize-delta-parent-choice'
1093 )
1094 )
1094 options[b'deltabothparents'] = deltabothparents
1095 options[b'deltabothparents'] = deltabothparents
1095 dps_cgds = ui.configint(
1096 dps_cgds = ui.configint(
1096 b'storage',
1097 b'storage',
1097 b'revlog.delta-parent-search.candidate-group-chunk-size',
1098 b'revlog.delta-parent-search.candidate-group-chunk-size',
1098 )
1099 )
1099 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1100 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1100 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1101 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1101
1102
1102 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1103 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1103 options[b'issue6528.fix-incoming'] = issue6528
1104 options[b'issue6528.fix-incoming'] = issue6528
1104
1105
1105 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1106 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1106 lazydeltabase = False
1107 lazydeltabase = False
1107 if lazydelta:
1108 if lazydelta:
1108 lazydeltabase = ui.configbool(
1109 lazydeltabase = ui.configbool(
1109 b'storage', b'revlog.reuse-external-delta-parent'
1110 b'storage', b'revlog.reuse-external-delta-parent'
1110 )
1111 )
1111 if lazydeltabase is None:
1112 if lazydeltabase is None:
1112 lazydeltabase = not scmutil.gddeltaconfig(ui)
1113 lazydeltabase = not scmutil.gddeltaconfig(ui)
1113 options[b'lazydelta'] = lazydelta
1114 options[b'lazydelta'] = lazydelta
1114 options[b'lazydeltabase'] = lazydeltabase
1115 options[b'lazydeltabase'] = lazydeltabase
1115
1116
1116 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1117 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1117 if 0 <= chainspan:
1118 if 0 <= chainspan:
1118 options[b'maxdeltachainspan'] = chainspan
1119 options[b'maxdeltachainspan'] = chainspan
1119
1120
1120 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1121 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1121 if mmapindexthreshold is not None:
1122 if mmapindexthreshold is not None:
1122 options[b'mmapindexthreshold'] = mmapindexthreshold
1123 options[b'mmapindexthreshold'] = mmapindexthreshold
1123
1124
1124 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1125 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1125 srdensitythres = float(
1126 srdensitythres = float(
1126 ui.config(b'experimental', b'sparse-read.density-threshold')
1127 ui.config(b'experimental', b'sparse-read.density-threshold')
1127 )
1128 )
1128 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1129 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1129 options[b'with-sparse-read'] = withsparseread
1130 options[b'with-sparse-read'] = withsparseread
1130 options[b'sparse-read-density-threshold'] = srdensitythres
1131 options[b'sparse-read-density-threshold'] = srdensitythres
1131 options[b'sparse-read-min-gap-size'] = srmingapsize
1132 options[b'sparse-read-min-gap-size'] = srmingapsize
1132
1133
1133 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1134 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1134 options[b'sparse-revlog'] = sparserevlog
1135 options[b'sparse-revlog'] = sparserevlog
1135 if sparserevlog:
1136 if sparserevlog:
1136 options[b'generaldelta'] = True
1137 options[b'generaldelta'] = True
1137
1138
1138 maxchainlen = None
1139 maxchainlen = None
1139 if sparserevlog:
1140 if sparserevlog:
1140 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1141 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1141 # experimental config: format.maxchainlen
1142 # experimental config: format.maxchainlen
1142 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1143 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1143 if maxchainlen is not None:
1144 if maxchainlen is not None:
1144 options[b'maxchainlen'] = maxchainlen
1145 options[b'maxchainlen'] = maxchainlen
1145
1146
1146 for r in requirements:
1147 for r in requirements:
1147 # we allow multiple compression engine requirement to co-exist because
1148 # we allow multiple compression engine requirement to co-exist because
1148 # strickly speaking, revlog seems to support mixed compression style.
1149 # strickly speaking, revlog seems to support mixed compression style.
1149 #
1150 #
1150 # The compression used for new entries will be "the last one"
1151 # The compression used for new entries will be "the last one"
1151 prefix = r.startswith
1152 prefix = r.startswith
1152 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1153 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1153 options[b'compengine'] = r.split(b'-', 2)[2]
1154 options[b'compengine'] = r.split(b'-', 2)[2]
1154
1155
1155 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1156 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1156 if options[b'zlib.level'] is not None:
1157 if options[b'zlib.level'] is not None:
1157 if not (0 <= options[b'zlib.level'] <= 9):
1158 if not (0 <= options[b'zlib.level'] <= 9):
1158 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1159 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1159 raise error.Abort(msg % options[b'zlib.level'])
1160 raise error.Abort(msg % options[b'zlib.level'])
1160 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1161 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1161 if options[b'zstd.level'] is not None:
1162 if options[b'zstd.level'] is not None:
1162 if not (0 <= options[b'zstd.level'] <= 22):
1163 if not (0 <= options[b'zstd.level'] <= 22):
1163 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 raise error.Abort(msg % options[b'zstd.level'])
1165 raise error.Abort(msg % options[b'zstd.level'])
1165
1166
1166 if requirementsmod.NARROW_REQUIREMENT in requirements:
1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1167 options[b'enableellipsis'] = True
1168 options[b'enableellipsis'] = True
1168
1169
1169 if ui.configbool(b'experimental', b'rust.index'):
1170 if ui.configbool(b'experimental', b'rust.index'):
1170 options[b'rust.index'] = True
1171 options[b'rust.index'] = True
1171 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1172 slow_path = ui.config(
1173 slow_path = ui.config(
1173 b'storage', b'revlog.persistent-nodemap.slow-path'
1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1174 )
1175 )
1175 if slow_path not in (b'allow', b'warn', b'abort'):
1176 if slow_path not in (b'allow', b'warn', b'abort'):
1176 default = ui.config_default(
1177 default = ui.config_default(
1177 b'storage', b'revlog.persistent-nodemap.slow-path'
1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1178 )
1179 )
1179 msg = _(
1180 msg = _(
1180 b'unknown value for config '
1181 b'unknown value for config '
1181 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1182 )
1183 )
1183 ui.warn(msg % slow_path)
1184 ui.warn(msg % slow_path)
1184 if not ui.quiet:
1185 if not ui.quiet:
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 slow_path = default
1187 slow_path = default
1187
1188
1188 msg = _(
1189 msg = _(
1189 b"accessing `persistent-nodemap` repository without associated "
1190 b"accessing `persistent-nodemap` repository without associated "
1190 b"fast implementation."
1191 b"fast implementation."
1191 )
1192 )
1192 hint = _(
1193 hint = _(
1193 b"check `hg help config.format.use-persistent-nodemap` "
1194 b"check `hg help config.format.use-persistent-nodemap` "
1194 b"for details"
1195 b"for details"
1195 )
1196 )
1196 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1197 if slow_path == b'warn':
1198 if slow_path == b'warn':
1198 msg = b"warning: " + msg + b'\n'
1199 msg = b"warning: " + msg + b'\n'
1199 ui.warn(msg)
1200 ui.warn(msg)
1200 if not ui.quiet:
1201 if not ui.quiet:
1201 hint = b'(' + hint + b')\n'
1202 hint = b'(' + hint + b')\n'
1202 ui.warn(hint)
1203 ui.warn(hint)
1203 if slow_path == b'abort':
1204 if slow_path == b'abort':
1204 raise error.Abort(msg, hint=hint)
1205 raise error.Abort(msg, hint=hint)
1205 options[b'persistent-nodemap'] = True
1206 options[b'persistent-nodemap'] = True
1206 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1207 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1208 if slow_path not in (b'allow', b'warn', b'abort'):
1209 if slow_path not in (b'allow', b'warn', b'abort'):
1209 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1210 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1211 ui.warn(msg % slow_path)
1212 ui.warn(msg % slow_path)
1212 if not ui.quiet:
1213 if not ui.quiet:
1213 ui.warn(_(b'falling back to default value: %s\n') % default)
1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1214 slow_path = default
1215 slow_path = default
1215
1216
1216 msg = _(
1217 msg = _(
1217 b"accessing `dirstate-v2` repository without associated "
1218 b"accessing `dirstate-v2` repository without associated "
1218 b"fast implementation."
1219 b"fast implementation."
1219 )
1220 )
1220 hint = _(
1221 hint = _(
1221 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1222 )
1223 )
1223 if not dirstate.HAS_FAST_DIRSTATE_V2:
1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1224 if slow_path == b'warn':
1225 if slow_path == b'warn':
1225 msg = b"warning: " + msg + b'\n'
1226 msg = b"warning: " + msg + b'\n'
1226 ui.warn(msg)
1227 ui.warn(msg)
1227 if not ui.quiet:
1228 if not ui.quiet:
1228 hint = b'(' + hint + b')\n'
1229 hint = b'(' + hint + b')\n'
1229 ui.warn(hint)
1230 ui.warn(hint)
1230 if slow_path == b'abort':
1231 if slow_path == b'abort':
1231 raise error.Abort(msg, hint=hint)
1232 raise error.Abort(msg, hint=hint)
1232 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1233 options[b'persistent-nodemap.mmap'] = True
1234 options[b'persistent-nodemap.mmap'] = True
1234 if ui.configbool(b'devel', b'persistent-nodemap'):
1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1235 options[b'devel-force-nodemap'] = True
1236 options[b'devel-force-nodemap'] = True
1236
1237
1237 return options
1238 return options
1238
1239
1239
1240
1240 def makemain(**kwargs):
1241 def makemain(**kwargs):
1241 """Produce a type conforming to ``ilocalrepositorymain``."""
1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1242 return localrepository
1243 return localrepository
1243
1244
1244
1245
1245 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 class revlogfilestorage:
1247 class revlogfilestorage:
1247 """File storage when using revlogs."""
1248 """File storage when using revlogs."""
1248
1249
1249 def file(self, path):
1250 def file(self, path):
1250 if path.startswith(b'/'):
1251 if path.startswith(b'/'):
1251 path = path[1:]
1252 path = path[1:]
1252
1253
1253 try_split = (
1254 try_split = (
1254 self.currenttransaction() is not None
1255 self.currenttransaction() is not None
1255 or txnutil.mayhavepending(self.root)
1256 or txnutil.mayhavepending(self.root)
1256 )
1257 )
1257
1258
1258 return filelog.filelog(self.svfs, path, try_split=try_split)
1259 return filelog.filelog(self.svfs, path, try_split=try_split)
1259
1260
1260
1261
1261 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1262 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1262 class revlognarrowfilestorage:
1263 class revlognarrowfilestorage:
1263 """File storage when using revlogs and narrow files."""
1264 """File storage when using revlogs and narrow files."""
1264
1265
1265 def file(self, path):
1266 def file(self, path):
1266 if path.startswith(b'/'):
1267 if path.startswith(b'/'):
1267 path = path[1:]
1268 path = path[1:]
1268
1269
1269 try_split = (
1270 try_split = (
1270 self.currenttransaction() is not None
1271 self.currenttransaction() is not None
1271 or txnutil.mayhavepending(self.root)
1272 or txnutil.mayhavepending(self.root)
1272 )
1273 )
1273 return filelog.narrowfilelog(
1274 return filelog.narrowfilelog(
1274 self.svfs, path, self._storenarrowmatch, try_split=try_split
1275 self.svfs, path, self._storenarrowmatch, try_split=try_split
1275 )
1276 )
1276
1277
1277
1278
1278 def makefilestorage(requirements, features, **kwargs):
1279 def makefilestorage(requirements, features, **kwargs):
1279 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1280 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1280 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1281 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1281 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1282 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1282
1283
1283 if requirementsmod.NARROW_REQUIREMENT in requirements:
1284 if requirementsmod.NARROW_REQUIREMENT in requirements:
1284 return revlognarrowfilestorage
1285 return revlognarrowfilestorage
1285 else:
1286 else:
1286 return revlogfilestorage
1287 return revlogfilestorage
1287
1288
1288
1289
1289 # List of repository interfaces and factory functions for them. Each
1290 # List of repository interfaces and factory functions for them. Each
1290 # will be called in order during ``makelocalrepository()`` to iteratively
1291 # will be called in order during ``makelocalrepository()`` to iteratively
1291 # derive the final type for a local repository instance. We capture the
1292 # derive the final type for a local repository instance. We capture the
1292 # function as a lambda so we don't hold a reference and the module-level
1293 # function as a lambda so we don't hold a reference and the module-level
1293 # functions can be wrapped.
1294 # functions can be wrapped.
1294 REPO_INTERFACES = [
1295 REPO_INTERFACES = [
1295 (repository.ilocalrepositorymain, lambda: makemain),
1296 (repository.ilocalrepositorymain, lambda: makemain),
1296 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1297 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1297 ]
1298 ]
1298
1299
1299
1300
1300 @interfaceutil.implementer(repository.ilocalrepositorymain)
1301 @interfaceutil.implementer(repository.ilocalrepositorymain)
1301 class localrepository:
1302 class localrepository:
1302 """Main class for representing local repositories.
1303 """Main class for representing local repositories.
1303
1304
1304 All local repositories are instances of this class.
1305 All local repositories are instances of this class.
1305
1306
1306 Constructed on its own, instances of this class are not usable as
1307 Constructed on its own, instances of this class are not usable as
1307 repository objects. To obtain a usable repository object, call
1308 repository objects. To obtain a usable repository object, call
1308 ``hg.repository()``, ``localrepo.instance()``, or
1309 ``hg.repository()``, ``localrepo.instance()``, or
1309 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1310 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1310 ``instance()`` adds support for creating new repositories.
1311 ``instance()`` adds support for creating new repositories.
1311 ``hg.repository()`` adds more extension integration, including calling
1312 ``hg.repository()`` adds more extension integration, including calling
1312 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1313 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1313 used.
1314 used.
1314 """
1315 """
1315
1316
1316 _basesupported = {
1317 _basesupported = {
1317 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1318 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1318 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1319 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1319 requirementsmod.CHANGELOGV2_REQUIREMENT,
1320 requirementsmod.CHANGELOGV2_REQUIREMENT,
1320 requirementsmod.COPIESSDC_REQUIREMENT,
1321 requirementsmod.COPIESSDC_REQUIREMENT,
1321 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1322 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1322 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1323 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1323 requirementsmod.DOTENCODE_REQUIREMENT,
1324 requirementsmod.DOTENCODE_REQUIREMENT,
1324 requirementsmod.FNCACHE_REQUIREMENT,
1325 requirementsmod.FNCACHE_REQUIREMENT,
1325 requirementsmod.GENERALDELTA_REQUIREMENT,
1326 requirementsmod.GENERALDELTA_REQUIREMENT,
1326 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1327 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1327 requirementsmod.NODEMAP_REQUIREMENT,
1328 requirementsmod.NODEMAP_REQUIREMENT,
1328 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1329 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1329 requirementsmod.REVLOGV1_REQUIREMENT,
1330 requirementsmod.REVLOGV1_REQUIREMENT,
1330 requirementsmod.REVLOGV2_REQUIREMENT,
1331 requirementsmod.REVLOGV2_REQUIREMENT,
1331 requirementsmod.SHARED_REQUIREMENT,
1332 requirementsmod.SHARED_REQUIREMENT,
1332 requirementsmod.SHARESAFE_REQUIREMENT,
1333 requirementsmod.SHARESAFE_REQUIREMENT,
1333 requirementsmod.SPARSE_REQUIREMENT,
1334 requirementsmod.SPARSE_REQUIREMENT,
1334 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1335 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1335 requirementsmod.STORE_REQUIREMENT,
1336 requirementsmod.STORE_REQUIREMENT,
1336 requirementsmod.TREEMANIFEST_REQUIREMENT,
1337 requirementsmod.TREEMANIFEST_REQUIREMENT,
1337 }
1338 }
1338
1339
1339 # list of prefix for file which can be written without 'wlock'
1340 # list of prefix for file which can be written without 'wlock'
1340 # Extensions should extend this list when needed
1341 # Extensions should extend this list when needed
1341 _wlockfreeprefix = {
1342 _wlockfreeprefix = {
1342 # We migh consider requiring 'wlock' for the next
1343 # We migh consider requiring 'wlock' for the next
1343 # two, but pretty much all the existing code assume
1344 # two, but pretty much all the existing code assume
1344 # wlock is not needed so we keep them excluded for
1345 # wlock is not needed so we keep them excluded for
1345 # now.
1346 # now.
1346 b'hgrc',
1347 b'hgrc',
1347 b'requires',
1348 b'requires',
1348 # XXX cache is a complicatged business someone
1349 # XXX cache is a complicatged business someone
1349 # should investigate this in depth at some point
1350 # should investigate this in depth at some point
1350 b'cache/',
1351 b'cache/',
1351 # XXX bisect was still a bit too messy at the time
1352 # XXX bisect was still a bit too messy at the time
1352 # this changeset was introduced. Someone should fix
1353 # this changeset was introduced. Someone should fix
1353 # the remainig bit and drop this line
1354 # the remainig bit and drop this line
1354 b'bisect.state',
1355 b'bisect.state',
1355 }
1356 }
1356
1357
1357 def __init__(
1358 def __init__(
1358 self,
1359 self,
1359 baseui,
1360 baseui,
1360 ui,
1361 ui,
1361 origroot: bytes,
1362 origroot: bytes,
1362 wdirvfs: vfsmod.vfs,
1363 wdirvfs: vfsmod.vfs,
1363 hgvfs: vfsmod.vfs,
1364 hgvfs: vfsmod.vfs,
1364 requirements,
1365 requirements,
1365 supportedrequirements,
1366 supportedrequirements,
1366 sharedpath: bytes,
1367 sharedpath: bytes,
1367 store,
1368 store,
1368 cachevfs: vfsmod.vfs,
1369 cachevfs: vfsmod.vfs,
1369 wcachevfs: vfsmod.vfs,
1370 wcachevfs: vfsmod.vfs,
1370 features,
1371 features,
1371 intents=None,
1372 intents=None,
1372 ):
1373 ):
1373 """Create a new local repository instance.
1374 """Create a new local repository instance.
1374
1375
1375 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1376 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1376 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1377 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1377 object.
1378 object.
1378
1379
1379 Arguments:
1380 Arguments:
1380
1381
1381 baseui
1382 baseui
1382 ``ui.ui`` instance that ``ui`` argument was based off of.
1383 ``ui.ui`` instance that ``ui`` argument was based off of.
1383
1384
1384 ui
1385 ui
1385 ``ui.ui`` instance for use by the repository.
1386 ``ui.ui`` instance for use by the repository.
1386
1387
1387 origroot
1388 origroot
1388 ``bytes`` path to working directory root of this repository.
1389 ``bytes`` path to working directory root of this repository.
1389
1390
1390 wdirvfs
1391 wdirvfs
1391 ``vfs.vfs`` rooted at the working directory.
1392 ``vfs.vfs`` rooted at the working directory.
1392
1393
1393 hgvfs
1394 hgvfs
1394 ``vfs.vfs`` rooted at .hg/
1395 ``vfs.vfs`` rooted at .hg/
1395
1396
1396 requirements
1397 requirements
1397 ``set`` of bytestrings representing repository opening requirements.
1398 ``set`` of bytestrings representing repository opening requirements.
1398
1399
1399 supportedrequirements
1400 supportedrequirements
1400 ``set`` of bytestrings representing repository requirements that we
1401 ``set`` of bytestrings representing repository requirements that we
1401 know how to open. May be a supetset of ``requirements``.
1402 know how to open. May be a supetset of ``requirements``.
1402
1403
1403 sharedpath
1404 sharedpath
1404 ``bytes`` Defining path to storage base directory. Points to a
1405 ``bytes`` Defining path to storage base directory. Points to a
1405 ``.hg/`` directory somewhere.
1406 ``.hg/`` directory somewhere.
1406
1407
1407 store
1408 store
1408 ``store.basicstore`` (or derived) instance providing access to
1409 ``store.basicstore`` (or derived) instance providing access to
1409 versioned storage.
1410 versioned storage.
1410
1411
1411 cachevfs
1412 cachevfs
1412 ``vfs.vfs`` used for cache files.
1413 ``vfs.vfs`` used for cache files.
1413
1414
1414 wcachevfs
1415 wcachevfs
1415 ``vfs.vfs`` used for cache files related to the working copy.
1416 ``vfs.vfs`` used for cache files related to the working copy.
1416
1417
1417 features
1418 features
1418 ``set`` of bytestrings defining features/capabilities of this
1419 ``set`` of bytestrings defining features/capabilities of this
1419 instance.
1420 instance.
1420
1421
1421 intents
1422 intents
1422 ``set`` of system strings indicating what this repo will be used
1423 ``set`` of system strings indicating what this repo will be used
1423 for.
1424 for.
1424 """
1425 """
1425 self.baseui = baseui
1426 self.baseui = baseui
1426 self.ui = ui
1427 self.ui = ui
1427 self.origroot = origroot
1428 self.origroot = origroot
1428 # vfs rooted at working directory.
1429 # vfs rooted at working directory.
1429 self.wvfs = wdirvfs
1430 self.wvfs = wdirvfs
1430 self.root = wdirvfs.base
1431 self.root = wdirvfs.base
1431 # vfs rooted at .hg/. Used to access most non-store paths.
1432 # vfs rooted at .hg/. Used to access most non-store paths.
1432 self.vfs = hgvfs
1433 self.vfs = hgvfs
1433 self.path = hgvfs.base
1434 self.path = hgvfs.base
1434 self.requirements = requirements
1435 self.requirements = requirements
1435 self.nodeconstants = sha1nodeconstants
1436 self.nodeconstants = sha1nodeconstants
1436 self.nullid = self.nodeconstants.nullid
1437 self.nullid = self.nodeconstants.nullid
1437 self.supported = supportedrequirements
1438 self.supported = supportedrequirements
1438 self.sharedpath = sharedpath
1439 self.sharedpath = sharedpath
1439 self.store = store
1440 self.store = store
1440 self.cachevfs = cachevfs
1441 self.cachevfs = cachevfs
1441 self.wcachevfs = wcachevfs
1442 self.wcachevfs = wcachevfs
1442 self.features = features
1443 self.features = features
1443
1444
1444 self.filtername = None
1445 self.filtername = None
1445
1446
1446 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1447 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1447 b'devel', b'check-locks'
1448 b'devel', b'check-locks'
1448 ):
1449 ):
1449 self.vfs.audit = self._getvfsward(self.vfs.audit)
1450 self.vfs.audit = self._getvfsward(self.vfs.audit)
1450 # A list of callback to shape the phase if no data were found.
1451 # A list of callback to shape the phase if no data were found.
1451 # Callback are in the form: func(repo, roots) --> processed root.
1452 # Callback are in the form: func(repo, roots) --> processed root.
1452 # This list it to be filled by extension during repo setup
1453 # This list it to be filled by extension during repo setup
1453 self._phasedefaults = []
1454 self._phasedefaults = []
1454
1455
1455 color.setup(self.ui)
1456 color.setup(self.ui)
1456
1457
1457 self.spath = self.store.path
1458 self.spath = self.store.path
1458 self.svfs = self.store.vfs
1459 self.svfs = self.store.vfs
1459 self.sjoin = self.store.join
1460 self.sjoin = self.store.join
1460 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1461 b'devel', b'check-locks'
1462 b'devel', b'check-locks'
1462 ):
1463 ):
1463 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1464 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1464 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1465 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1465 else: # standard vfs
1466 else: # standard vfs
1466 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1467 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1467
1468
1468 self._dirstatevalidatewarned = False
1469 self._dirstatevalidatewarned = False
1469
1470
1470 self._branchcaches = branchmap.BranchMapCache()
1471 self._branchcaches = branchmap.BranchMapCache()
1471 self._revbranchcache = None
1472 self._revbranchcache = None
1472 self._filterpats = {}
1473 self._filterpats = {}
1473 self._datafilters = {}
1474 self._datafilters = {}
1474 self._transref = self._lockref = self._wlockref = None
1475 self._transref = self._lockref = self._wlockref = None
1475
1476
1476 # A cache for various files under .hg/ that tracks file changes,
1477 # A cache for various files under .hg/ that tracks file changes,
1477 # (used by the filecache decorator)
1478 # (used by the filecache decorator)
1478 #
1479 #
1479 # Maps a property name to its util.filecacheentry
1480 # Maps a property name to its util.filecacheentry
1480 self._filecache = {}
1481 self._filecache = {}
1481
1482
1482 # hold sets of revision to be filtered
1483 # hold sets of revision to be filtered
1483 # should be cleared when something might have changed the filter value:
1484 # should be cleared when something might have changed the filter value:
1484 # - new changesets,
1485 # - new changesets,
1485 # - phase change,
1486 # - phase change,
1486 # - new obsolescence marker,
1487 # - new obsolescence marker,
1487 # - working directory parent change,
1488 # - working directory parent change,
1488 # - bookmark changes
1489 # - bookmark changes
1489 self.filteredrevcache = {}
1490 self.filteredrevcache = {}
1490
1491
1491 self._dirstate = None
1492 self._dirstate = None
1492 # post-dirstate-status hooks
1493 # post-dirstate-status hooks
1493 self._postdsstatus = []
1494 self._postdsstatus = []
1494
1495
1495 self._pending_narrow_pats = None
1496 self._pending_narrow_pats = None
1496 self._pending_narrow_pats_dirstate = None
1497 self._pending_narrow_pats_dirstate = None
1497
1498
1498 # generic mapping between names and nodes
1499 # generic mapping between names and nodes
1499 self.names = namespaces.namespaces()
1500 self.names = namespaces.namespaces()
1500
1501
1501 # Key to signature value.
1502 # Key to signature value.
1502 self._sparsesignaturecache = {}
1503 self._sparsesignaturecache = {}
1503 # Signature to cached matcher instance.
1504 # Signature to cached matcher instance.
1504 self._sparsematchercache = {}
1505 self._sparsematchercache = {}
1505
1506
1506 self._extrafilterid = repoview.extrafilter(ui)
1507 self._extrafilterid = repoview.extrafilter(ui)
1507
1508
1508 self.filecopiesmode = None
1509 self.filecopiesmode = None
1509 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1510 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1510 self.filecopiesmode = b'changeset-sidedata'
1511 self.filecopiesmode = b'changeset-sidedata'
1511
1512
1512 self._wanted_sidedata = set()
1513 self._wanted_sidedata = set()
1513 self._sidedata_computers = {}
1514 self._sidedata_computers = {}
1514 sidedatamod.set_sidedata_spec_for_repo(self)
1515 sidedatamod.set_sidedata_spec_for_repo(self)
1515
1516
1516 def _getvfsward(self, origfunc):
1517 def _getvfsward(self, origfunc):
1517 """build a ward for self.vfs"""
1518 """build a ward for self.vfs"""
1518 rref = weakref.ref(self)
1519 rref = weakref.ref(self)
1519
1520
1520 def checkvfs(path, mode=None):
1521 def checkvfs(path, mode=None):
1521 ret = origfunc(path, mode=mode)
1522 ret = origfunc(path, mode=mode)
1522 repo = rref()
1523 repo = rref()
1523 if (
1524 if (
1524 repo is None
1525 repo is None
1525 or not util.safehasattr(repo, '_wlockref')
1526 or not util.safehasattr(repo, '_wlockref')
1526 or not util.safehasattr(repo, '_lockref')
1527 or not util.safehasattr(repo, '_lockref')
1527 ):
1528 ):
1528 return
1529 return
1529 if mode in (None, b'r', b'rb'):
1530 if mode in (None, b'r', b'rb'):
1530 return
1531 return
1531 if path.startswith(repo.path):
1532 if path.startswith(repo.path):
1532 # truncate name relative to the repository (.hg)
1533 # truncate name relative to the repository (.hg)
1533 path = path[len(repo.path) + 1 :]
1534 path = path[len(repo.path) + 1 :]
1534 if path.startswith(b'cache/'):
1535 if path.startswith(b'cache/'):
1535 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1536 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1536 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1537 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1537 # path prefixes covered by 'lock'
1538 # path prefixes covered by 'lock'
1538 vfs_path_prefixes = (
1539 vfs_path_prefixes = (
1539 b'journal.',
1540 b'journal.',
1540 b'undo.',
1541 b'undo.',
1541 b'strip-backup/',
1542 b'strip-backup/',
1542 b'cache/',
1543 b'cache/',
1543 )
1544 )
1544 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1545 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1545 if repo._currentlock(repo._lockref) is None:
1546 if repo._currentlock(repo._lockref) is None:
1546 repo.ui.develwarn(
1547 repo.ui.develwarn(
1547 b'write with no lock: "%s"' % path,
1548 b'write with no lock: "%s"' % path,
1548 stacklevel=3,
1549 stacklevel=3,
1549 config=b'check-locks',
1550 config=b'check-locks',
1550 )
1551 )
1551 elif repo._currentlock(repo._wlockref) is None:
1552 elif repo._currentlock(repo._wlockref) is None:
1552 # rest of vfs files are covered by 'wlock'
1553 # rest of vfs files are covered by 'wlock'
1553 #
1554 #
1554 # exclude special files
1555 # exclude special files
1555 for prefix in self._wlockfreeprefix:
1556 for prefix in self._wlockfreeprefix:
1556 if path.startswith(prefix):
1557 if path.startswith(prefix):
1557 return
1558 return
1558 repo.ui.develwarn(
1559 repo.ui.develwarn(
1559 b'write with no wlock: "%s"' % path,
1560 b'write with no wlock: "%s"' % path,
1560 stacklevel=3,
1561 stacklevel=3,
1561 config=b'check-locks',
1562 config=b'check-locks',
1562 )
1563 )
1563 return ret
1564 return ret
1564
1565
1565 return checkvfs
1566 return checkvfs
1566
1567
1567 def _getsvfsward(self, origfunc):
1568 def _getsvfsward(self, origfunc):
1568 """build a ward for self.svfs"""
1569 """build a ward for self.svfs"""
1569 rref = weakref.ref(self)
1570 rref = weakref.ref(self)
1570
1571
1571 def checksvfs(path, mode=None):
1572 def checksvfs(path, mode=None):
1572 ret = origfunc(path, mode=mode)
1573 ret = origfunc(path, mode=mode)
1573 repo = rref()
1574 repo = rref()
1574 if repo is None or not util.safehasattr(repo, '_lockref'):
1575 if repo is None or not util.safehasattr(repo, '_lockref'):
1575 return
1576 return
1576 if mode in (None, b'r', b'rb'):
1577 if mode in (None, b'r', b'rb'):
1577 return
1578 return
1578 if path.startswith(repo.sharedpath):
1579 if path.startswith(repo.sharedpath):
1579 # truncate name relative to the repository (.hg)
1580 # truncate name relative to the repository (.hg)
1580 path = path[len(repo.sharedpath) + 1 :]
1581 path = path[len(repo.sharedpath) + 1 :]
1581 if repo._currentlock(repo._lockref) is None:
1582 if repo._currentlock(repo._lockref) is None:
1582 repo.ui.develwarn(
1583 repo.ui.develwarn(
1583 b'write with no lock: "%s"' % path, stacklevel=4
1584 b'write with no lock: "%s"' % path, stacklevel=4
1584 )
1585 )
1585 return ret
1586 return ret
1586
1587
1587 return checksvfs
1588 return checksvfs
1588
1589
1589 @property
1590 @property
1590 def vfs_map(self):
1591 def vfs_map(self):
1591 return {
1592 return {
1592 b'': self.svfs,
1593 b'': self.svfs,
1593 b'plain': self.vfs,
1594 b'plain': self.vfs,
1594 b'store': self.svfs,
1595 b'store': self.svfs,
1595 }
1596 }
1596
1597
1597 def close(self):
1598 def close(self):
1598 self._writecaches()
1599 self._writecaches()
1599
1600
1600 def _writecaches(self):
1601 def _writecaches(self):
1601 if self._revbranchcache:
1602 if self._revbranchcache:
1602 self._revbranchcache.write()
1603 self._revbranchcache.write()
1603
1604
1604 def _restrictcapabilities(self, caps):
1605 def _restrictcapabilities(self, caps):
1605 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1606 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1606 caps = set(caps)
1607 caps = set(caps)
1607 capsblob = bundle2.encodecaps(
1608 capsblob = bundle2.encodecaps(
1608 bundle2.getrepocaps(self, role=b'client')
1609 bundle2.getrepocaps(self, role=b'client')
1609 )
1610 )
1610 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1611 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1611 if self.ui.configbool(b'experimental', b'narrow'):
1612 if self.ui.configbool(b'experimental', b'narrow'):
1612 caps.add(wireprototypes.NARROWCAP)
1613 caps.add(wireprototypes.NARROWCAP)
1613 return caps
1614 return caps
1614
1615
1615 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1616 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1616 # self -> auditor -> self._checknested -> self
1617 # self -> auditor -> self._checknested -> self
1617
1618
1618 @property
1619 @property
1619 def auditor(self):
1620 def auditor(self):
1620 # This is only used by context.workingctx.match in order to
1621 # This is only used by context.workingctx.match in order to
1621 # detect files in subrepos.
1622 # detect files in subrepos.
1622 return pathutil.pathauditor(self.root, callback=self._checknested)
1623 return pathutil.pathauditor(self.root, callback=self._checknested)
1623
1624
1624 @property
1625 @property
1625 def nofsauditor(self):
1626 def nofsauditor(self):
1626 # This is only used by context.basectx.match in order to detect
1627 # This is only used by context.basectx.match in order to detect
1627 # files in subrepos.
1628 # files in subrepos.
1628 return pathutil.pathauditor(
1629 return pathutil.pathauditor(
1629 self.root, callback=self._checknested, realfs=False, cached=True
1630 self.root, callback=self._checknested, realfs=False, cached=True
1630 )
1631 )
1631
1632
1632 def _checknested(self, path):
1633 def _checknested(self, path):
1633 """Determine if path is a legal nested repository."""
1634 """Determine if path is a legal nested repository."""
1634 if not path.startswith(self.root):
1635 if not path.startswith(self.root):
1635 return False
1636 return False
1636 subpath = path[len(self.root) + 1 :]
1637 subpath = path[len(self.root) + 1 :]
1637 normsubpath = util.pconvert(subpath)
1638 normsubpath = util.pconvert(subpath)
1638
1639
1639 # XXX: Checking against the current working copy is wrong in
1640 # XXX: Checking against the current working copy is wrong in
1640 # the sense that it can reject things like
1641 # the sense that it can reject things like
1641 #
1642 #
1642 # $ hg cat -r 10 sub/x.txt
1643 # $ hg cat -r 10 sub/x.txt
1643 #
1644 #
1644 # if sub/ is no longer a subrepository in the working copy
1645 # if sub/ is no longer a subrepository in the working copy
1645 # parent revision.
1646 # parent revision.
1646 #
1647 #
1647 # However, it can of course also allow things that would have
1648 # However, it can of course also allow things that would have
1648 # been rejected before, such as the above cat command if sub/
1649 # been rejected before, such as the above cat command if sub/
1649 # is a subrepository now, but was a normal directory before.
1650 # is a subrepository now, but was a normal directory before.
1650 # The old path auditor would have rejected by mistake since it
1651 # The old path auditor would have rejected by mistake since it
1651 # panics when it sees sub/.hg/.
1652 # panics when it sees sub/.hg/.
1652 #
1653 #
1653 # All in all, checking against the working copy seems sensible
1654 # All in all, checking against the working copy seems sensible
1654 # since we want to prevent access to nested repositories on
1655 # since we want to prevent access to nested repositories on
1655 # the filesystem *now*.
1656 # the filesystem *now*.
1656 ctx = self[None]
1657 ctx = self[None]
1657 parts = util.splitpath(subpath)
1658 parts = util.splitpath(subpath)
1658 while parts:
1659 while parts:
1659 prefix = b'/'.join(parts)
1660 prefix = b'/'.join(parts)
1660 if prefix in ctx.substate:
1661 if prefix in ctx.substate:
1661 if prefix == normsubpath:
1662 if prefix == normsubpath:
1662 return True
1663 return True
1663 else:
1664 else:
1664 sub = ctx.sub(prefix)
1665 sub = ctx.sub(prefix)
1665 return sub.checknested(subpath[len(prefix) + 1 :])
1666 return sub.checknested(subpath[len(prefix) + 1 :])
1666 else:
1667 else:
1667 parts.pop()
1668 parts.pop()
1668 return False
1669 return False
1669
1670
1670 def peer(self, path=None, remotehidden=False):
1671 def peer(self, path=None, remotehidden=False):
1671 return localpeer(
1672 return localpeer(
1672 self, path=path, remotehidden=remotehidden
1673 self, path=path, remotehidden=remotehidden
1673 ) # not cached to avoid reference cycle
1674 ) # not cached to avoid reference cycle
1674
1675
1675 def unfiltered(self):
1676 def unfiltered(self):
1676 """Return unfiltered version of the repository
1677 """Return unfiltered version of the repository
1677
1678
1678 Intended to be overwritten by filtered repo."""
1679 Intended to be overwritten by filtered repo."""
1679 return self
1680 return self
1680
1681
1681 def filtered(self, name, visibilityexceptions=None):
1682 def filtered(self, name, visibilityexceptions=None):
1682 """Return a filtered version of a repository
1683 """Return a filtered version of a repository
1683
1684
1684 The `name` parameter is the identifier of the requested view. This
1685 The `name` parameter is the identifier of the requested view. This
1685 will return a repoview object set "exactly" to the specified view.
1686 will return a repoview object set "exactly" to the specified view.
1686
1687
1687 This function does not apply recursive filtering to a repository. For
1688 This function does not apply recursive filtering to a repository. For
1688 example calling `repo.filtered("served")` will return a repoview using
1689 example calling `repo.filtered("served")` will return a repoview using
1689 the "served" view, regardless of the initial view used by `repo`.
1690 the "served" view, regardless of the initial view used by `repo`.
1690
1691
1691 In other word, there is always only one level of `repoview` "filtering".
1692 In other word, there is always only one level of `repoview` "filtering".
1692 """
1693 """
1693 if self._extrafilterid is not None and b'%' not in name:
1694 if self._extrafilterid is not None and b'%' not in name:
1694 name = name + b'%' + self._extrafilterid
1695 name = name + b'%' + self._extrafilterid
1695
1696
1696 cls = repoview.newtype(self.unfiltered().__class__)
1697 cls = repoview.newtype(self.unfiltered().__class__)
1697 return cls(self, name, visibilityexceptions)
1698 return cls(self, name, visibilityexceptions)
1698
1699
1699 @mixedrepostorecache(
1700 @mixedrepostorecache(
1700 (b'bookmarks', b'plain'),
1701 (b'bookmarks', b'plain'),
1701 (b'bookmarks.current', b'plain'),
1702 (b'bookmarks.current', b'plain'),
1702 (b'bookmarks', b''),
1703 (b'bookmarks', b''),
1703 (b'00changelog.i', b''),
1704 (b'00changelog.i', b''),
1704 )
1705 )
1705 def _bookmarks(self):
1706 def _bookmarks(self):
1706 # Since the multiple files involved in the transaction cannot be
1707 # Since the multiple files involved in the transaction cannot be
1707 # written atomically (with current repository format), there is a race
1708 # written atomically (with current repository format), there is a race
1708 # condition here.
1709 # condition here.
1709 #
1710 #
1710 # 1) changelog content A is read
1711 # 1) changelog content A is read
1711 # 2) outside transaction update changelog to content B
1712 # 2) outside transaction update changelog to content B
1712 # 3) outside transaction update bookmark file referring to content B
1713 # 3) outside transaction update bookmark file referring to content B
1713 # 4) bookmarks file content is read and filtered against changelog-A
1714 # 4) bookmarks file content is read and filtered against changelog-A
1714 #
1715 #
1715 # When this happens, bookmarks against nodes missing from A are dropped.
1716 # When this happens, bookmarks against nodes missing from A are dropped.
1716 #
1717 #
1717 # Having this happening during read is not great, but it become worse
1718 # Having this happening during read is not great, but it become worse
1718 # when this happen during write because the bookmarks to the "unknown"
1719 # when this happen during write because the bookmarks to the "unknown"
1719 # nodes will be dropped for good. However, writes happen within locks.
1720 # nodes will be dropped for good. However, writes happen within locks.
1720 # This locking makes it possible to have a race free consistent read.
1721 # This locking makes it possible to have a race free consistent read.
1721 # For this purpose data read from disc before locking are
1722 # For this purpose data read from disc before locking are
1722 # "invalidated" right after the locks are taken. This invalidations are
1723 # "invalidated" right after the locks are taken. This invalidations are
1723 # "light", the `filecache` mechanism keep the data in memory and will
1724 # "light", the `filecache` mechanism keep the data in memory and will
1724 # reuse them if the underlying files did not changed. Not parsing the
1725 # reuse them if the underlying files did not changed. Not parsing the
1725 # same data multiple times helps performances.
1726 # same data multiple times helps performances.
1726 #
1727 #
1727 # Unfortunately in the case describe above, the files tracked by the
1728 # Unfortunately in the case describe above, the files tracked by the
1728 # bookmarks file cache might not have changed, but the in-memory
1729 # bookmarks file cache might not have changed, but the in-memory
1729 # content is still "wrong" because we used an older changelog content
1730 # content is still "wrong" because we used an older changelog content
1730 # to process the on-disk data. So after locking, the changelog would be
1731 # to process the on-disk data. So after locking, the changelog would be
1731 # refreshed but `_bookmarks` would be preserved.
1732 # refreshed but `_bookmarks` would be preserved.
1732 # Adding `00changelog.i` to the list of tracked file is not
1733 # Adding `00changelog.i` to the list of tracked file is not
1733 # enough, because at the time we build the content for `_bookmarks` in
1734 # enough, because at the time we build the content for `_bookmarks` in
1734 # (4), the changelog file has already diverged from the content used
1735 # (4), the changelog file has already diverged from the content used
1735 # for loading `changelog` in (1)
1736 # for loading `changelog` in (1)
1736 #
1737 #
1737 # To prevent the issue, we force the changelog to be explicitly
1738 # To prevent the issue, we force the changelog to be explicitly
1738 # reloaded while computing `_bookmarks`. The data race can still happen
1739 # reloaded while computing `_bookmarks`. The data race can still happen
1739 # without the lock (with a narrower window), but it would no longer go
1740 # without the lock (with a narrower window), but it would no longer go
1740 # undetected during the lock time refresh.
1741 # undetected during the lock time refresh.
1741 #
1742 #
1742 # The new schedule is as follow
1743 # The new schedule is as follow
1743 #
1744 #
1744 # 1) filecache logic detect that `_bookmarks` needs to be computed
1745 # 1) filecache logic detect that `_bookmarks` needs to be computed
1745 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1746 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1746 # 3) We force `changelog` filecache to be tested
1747 # 3) We force `changelog` filecache to be tested
1747 # 4) cachestat for `changelog` are captured (for changelog)
1748 # 4) cachestat for `changelog` are captured (for changelog)
1748 # 5) `_bookmarks` is computed and cached
1749 # 5) `_bookmarks` is computed and cached
1749 #
1750 #
1750 # The step in (3) ensure we have a changelog at least as recent as the
1751 # The step in (3) ensure we have a changelog at least as recent as the
1751 # cache stat computed in (1). As a result at locking time:
1752 # cache stat computed in (1). As a result at locking time:
1752 # * if the changelog did not changed since (1) -> we can reuse the data
1753 # * if the changelog did not changed since (1) -> we can reuse the data
1753 # * otherwise -> the bookmarks get refreshed.
1754 # * otherwise -> the bookmarks get refreshed.
1754 self._refreshchangelog()
1755 self._refreshchangelog()
1755 return bookmarks.bmstore(self)
1756 return bookmarks.bmstore(self)
1756
1757
1757 def _refreshchangelog(self):
1758 def _refreshchangelog(self):
1758 """make sure the in memory changelog match the on-disk one"""
1759 """make sure the in memory changelog match the on-disk one"""
1759 if 'changelog' in vars(self) and self.currenttransaction() is None:
1760 if 'changelog' in vars(self) and self.currenttransaction() is None:
1760 del self.changelog
1761 del self.changelog
1761
1762
1762 @property
1763 @property
1763 def _activebookmark(self):
1764 def _activebookmark(self):
1764 return self._bookmarks.active
1765 return self._bookmarks.active
1765
1766
1766 # _phasesets depend on changelog. what we need is to call
1767 # _phasesets depend on changelog. what we need is to call
1767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1768 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1768 # can't be easily expressed in filecache mechanism.
1769 # can't be easily expressed in filecache mechanism.
1769 @storecache(b'phaseroots', b'00changelog.i')
1770 @storecache(b'phaseroots', b'00changelog.i')
1770 def _phasecache(self):
1771 def _phasecache(self):
1771 return phases.phasecache(self, self._phasedefaults)
1772 return phases.phasecache(self, self._phasedefaults)
1772
1773
1773 @storecache(b'obsstore')
1774 @storecache(b'obsstore')
1774 def obsstore(self):
1775 def obsstore(self):
1775 return obsolete.makestore(self.ui, self)
1776 return obsolete.makestore(self.ui, self)
1776
1777
1777 @changelogcache()
1778 @changelogcache()
1778 def changelog(repo):
1779 def changelog(repo):
1779 # load dirstate before changelog to avoid race see issue6303
1780 # load dirstate before changelog to avoid race see issue6303
1780 repo.dirstate.prefetch_parents()
1781 repo.dirstate.prefetch_parents()
1781 return repo.store.changelog(
1782 return repo.store.changelog(
1782 txnutil.mayhavepending(repo.root),
1783 txnutil.mayhavepending(repo.root),
1783 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1784 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1784 )
1785 )
1785
1786
1786 @manifestlogcache()
1787 @manifestlogcache()
1787 def manifestlog(self):
1788 def manifestlog(self):
1788 return self.store.manifestlog(self, self._storenarrowmatch)
1789 return self.store.manifestlog(self, self._storenarrowmatch)
1789
1790
1790 @unfilteredpropertycache
1791 @unfilteredpropertycache
1791 def dirstate(self):
1792 def dirstate(self):
1792 if self._dirstate is None:
1793 if self._dirstate is None:
1793 self._dirstate = self._makedirstate()
1794 self._dirstate = self._makedirstate()
1794 else:
1795 else:
1795 self._dirstate.refresh()
1796 self._dirstate.refresh()
1796 return self._dirstate
1797 return self._dirstate
1797
1798
1798 def _makedirstate(self):
1799 def _makedirstate(self):
1799 """Extension point for wrapping the dirstate per-repo."""
1800 """Extension point for wrapping the dirstate per-repo."""
1800 sparsematchfn = None
1801 sparsematchfn = None
1801 if sparse.use_sparse(self):
1802 if sparse.use_sparse(self):
1802 sparsematchfn = lambda: sparse.matcher(self)
1803 sparsematchfn = lambda: sparse.matcher(self)
1803 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1804 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1804 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1805 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1805 use_dirstate_v2 = v2_req in self.requirements
1806 use_dirstate_v2 = v2_req in self.requirements
1806 use_tracked_hint = th in self.requirements
1807 use_tracked_hint = th in self.requirements
1807
1808
1808 return dirstate.dirstate(
1809 return dirstate.dirstate(
1809 self.vfs,
1810 self.vfs,
1810 self.ui,
1811 self.ui,
1811 self.root,
1812 self.root,
1812 self._dirstatevalidate,
1813 self._dirstatevalidate,
1813 sparsematchfn,
1814 sparsematchfn,
1814 self.nodeconstants,
1815 self.nodeconstants,
1815 use_dirstate_v2,
1816 use_dirstate_v2,
1816 use_tracked_hint=use_tracked_hint,
1817 use_tracked_hint=use_tracked_hint,
1817 )
1818 )
1818
1819
1819 def _dirstatevalidate(self, node):
1820 def _dirstatevalidate(self, node):
1820 okay = True
1821 okay = True
1821 try:
1822 try:
1822 self.changelog.rev(node)
1823 self.changelog.rev(node)
1823 except error.LookupError:
1824 except error.LookupError:
1824 # If the parent are unknown it might just be because the changelog
1825 # If the parent are unknown it might just be because the changelog
1825 # in memory is lagging behind the dirstate in memory. So try to
1826 # in memory is lagging behind the dirstate in memory. So try to
1826 # refresh the changelog first.
1827 # refresh the changelog first.
1827 #
1828 #
1828 # We only do so if we don't hold the lock, if we do hold the lock
1829 # We only do so if we don't hold the lock, if we do hold the lock
1829 # the invalidation at that time should have taken care of this and
1830 # the invalidation at that time should have taken care of this and
1830 # something is very fishy.
1831 # something is very fishy.
1831 if self.currentlock() is None:
1832 if self.currentlock() is None:
1832 self.invalidate()
1833 self.invalidate()
1833 try:
1834 try:
1834 self.changelog.rev(node)
1835 self.changelog.rev(node)
1835 except error.LookupError:
1836 except error.LookupError:
1836 okay = False
1837 okay = False
1837 else:
1838 else:
1838 # XXX we should consider raising an error here.
1839 # XXX we should consider raising an error here.
1839 okay = False
1840 okay = False
1840 if okay:
1841 if okay:
1841 return node
1842 return node
1842 else:
1843 else:
1843 if not self._dirstatevalidatewarned:
1844 if not self._dirstatevalidatewarned:
1844 self._dirstatevalidatewarned = True
1845 self._dirstatevalidatewarned = True
1845 self.ui.warn(
1846 self.ui.warn(
1846 _(b"warning: ignoring unknown working parent %s!\n")
1847 _(b"warning: ignoring unknown working parent %s!\n")
1847 % short(node)
1848 % short(node)
1848 )
1849 )
1849 return self.nullid
1850 return self.nullid
1850
1851
1851 @storecache(narrowspec.FILENAME)
1852 @storecache(narrowspec.FILENAME)
1852 def narrowpats(self):
1853 def narrowpats(self):
1853 """matcher patterns for this repository's narrowspec
1854 """matcher patterns for this repository's narrowspec
1854
1855
1855 A tuple of (includes, excludes).
1856 A tuple of (includes, excludes).
1856 """
1857 """
1857 # the narrow management should probably move into its own object
1858 # the narrow management should probably move into its own object
1858 val = self._pending_narrow_pats
1859 val = self._pending_narrow_pats
1859 if val is None:
1860 if val is None:
1860 val = narrowspec.load(self)
1861 val = narrowspec.load(self)
1861 return val
1862 return val
1862
1863
1863 @storecache(narrowspec.FILENAME)
1864 @storecache(narrowspec.FILENAME)
1864 def _storenarrowmatch(self):
1865 def _storenarrowmatch(self):
1865 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1866 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1866 return matchmod.always()
1867 return matchmod.always()
1867 include, exclude = self.narrowpats
1868 include, exclude = self.narrowpats
1868 return narrowspec.match(self.root, include=include, exclude=exclude)
1869 return narrowspec.match(self.root, include=include, exclude=exclude)
1869
1870
1870 @storecache(narrowspec.FILENAME)
1871 @storecache(narrowspec.FILENAME)
1871 def _narrowmatch(self):
1872 def _narrowmatch(self):
1872 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1873 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1873 return matchmod.always()
1874 return matchmod.always()
1874 narrowspec.checkworkingcopynarrowspec(self)
1875 narrowspec.checkworkingcopynarrowspec(self)
1875 include, exclude = self.narrowpats
1876 include, exclude = self.narrowpats
1876 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877
1878
1878 def narrowmatch(self, match=None, includeexact=False):
1879 def narrowmatch(self, match=None, includeexact=False):
1879 """matcher corresponding the the repo's narrowspec
1880 """matcher corresponding the the repo's narrowspec
1880
1881
1881 If `match` is given, then that will be intersected with the narrow
1882 If `match` is given, then that will be intersected with the narrow
1882 matcher.
1883 matcher.
1883
1884
1884 If `includeexact` is True, then any exact matches from `match` will
1885 If `includeexact` is True, then any exact matches from `match` will
1885 be included even if they're outside the narrowspec.
1886 be included even if they're outside the narrowspec.
1886 """
1887 """
1887 if match:
1888 if match:
1888 if includeexact and not self._narrowmatch.always():
1889 if includeexact and not self._narrowmatch.always():
1889 # do not exclude explicitly-specified paths so that they can
1890 # do not exclude explicitly-specified paths so that they can
1890 # be warned later on
1891 # be warned later on
1891 em = matchmod.exact(match.files())
1892 em = matchmod.exact(match.files())
1892 nm = matchmod.unionmatcher([self._narrowmatch, em])
1893 nm = matchmod.unionmatcher([self._narrowmatch, em])
1893 return matchmod.intersectmatchers(match, nm)
1894 return matchmod.intersectmatchers(match, nm)
1894 return matchmod.intersectmatchers(match, self._narrowmatch)
1895 return matchmod.intersectmatchers(match, self._narrowmatch)
1895 return self._narrowmatch
1896 return self._narrowmatch
1896
1897
1897 def setnarrowpats(self, newincludes, newexcludes):
1898 def setnarrowpats(self, newincludes, newexcludes):
1898 narrowspec.save(self, newincludes, newexcludes)
1899 narrowspec.save(self, newincludes, newexcludes)
1899 self.invalidate(clearfilecache=True)
1900 self.invalidate(clearfilecache=True)
1900
1901
1901 @unfilteredpropertycache
1902 @unfilteredpropertycache
1902 def _quick_access_changeid_null(self):
1903 def _quick_access_changeid_null(self):
1903 return {
1904 return {
1904 b'null': (nullrev, self.nodeconstants.nullid),
1905 b'null': (nullrev, self.nodeconstants.nullid),
1905 nullrev: (nullrev, self.nodeconstants.nullid),
1906 nullrev: (nullrev, self.nodeconstants.nullid),
1906 self.nullid: (nullrev, self.nullid),
1907 self.nullid: (nullrev, self.nullid),
1907 }
1908 }
1908
1909
1909 @unfilteredpropertycache
1910 @unfilteredpropertycache
1910 def _quick_access_changeid_wc(self):
1911 def _quick_access_changeid_wc(self):
1911 # also fast path access to the working copy parents
1912 # also fast path access to the working copy parents
1912 # however, only do it for filter that ensure wc is visible.
1913 # however, only do it for filter that ensure wc is visible.
1913 quick = self._quick_access_changeid_null.copy()
1914 quick = self._quick_access_changeid_null.copy()
1914 cl = self.unfiltered().changelog
1915 cl = self.unfiltered().changelog
1915 for node in self.dirstate.parents():
1916 for node in self.dirstate.parents():
1916 if node == self.nullid:
1917 if node == self.nullid:
1917 continue
1918 continue
1918 rev = cl.index.get_rev(node)
1919 rev = cl.index.get_rev(node)
1919 if rev is None:
1920 if rev is None:
1920 # unknown working copy parent case:
1921 # unknown working copy parent case:
1921 #
1922 #
1922 # skip the fast path and let higher code deal with it
1923 # skip the fast path and let higher code deal with it
1923 continue
1924 continue
1924 pair = (rev, node)
1925 pair = (rev, node)
1925 quick[rev] = pair
1926 quick[rev] = pair
1926 quick[node] = pair
1927 quick[node] = pair
1927 # also add the parents of the parents
1928 # also add the parents of the parents
1928 for r in cl.parentrevs(rev):
1929 for r in cl.parentrevs(rev):
1929 if r == nullrev:
1930 if r == nullrev:
1930 continue
1931 continue
1931 n = cl.node(r)
1932 n = cl.node(r)
1932 pair = (r, n)
1933 pair = (r, n)
1933 quick[r] = pair
1934 quick[r] = pair
1934 quick[n] = pair
1935 quick[n] = pair
1935 p1node = self.dirstate.p1()
1936 p1node = self.dirstate.p1()
1936 if p1node != self.nullid:
1937 if p1node != self.nullid:
1937 quick[b'.'] = quick[p1node]
1938 quick[b'.'] = quick[p1node]
1938 return quick
1939 return quick
1939
1940
1940 @unfilteredmethod
1941 @unfilteredmethod
1941 def _quick_access_changeid_invalidate(self):
1942 def _quick_access_changeid_invalidate(self):
1942 if '_quick_access_changeid_wc' in vars(self):
1943 if '_quick_access_changeid_wc' in vars(self):
1943 del self.__dict__['_quick_access_changeid_wc']
1944 del self.__dict__['_quick_access_changeid_wc']
1944
1945
1945 @property
1946 @property
1946 def _quick_access_changeid(self):
1947 def _quick_access_changeid(self):
1947 """an helper dictionnary for __getitem__ calls
1948 """an helper dictionnary for __getitem__ calls
1948
1949
1949 This contains a list of symbol we can recognise right away without
1950 This contains a list of symbol we can recognise right away without
1950 further processing.
1951 further processing.
1951 """
1952 """
1952 if self.filtername in repoview.filter_has_wc:
1953 if self.filtername in repoview.filter_has_wc:
1953 return self._quick_access_changeid_wc
1954 return self._quick_access_changeid_wc
1954 return self._quick_access_changeid_null
1955 return self._quick_access_changeid_null
1955
1956
1956 def __getitem__(self, changeid):
1957 def __getitem__(self, changeid):
1957 # dealing with special cases
1958 # dealing with special cases
1958 if changeid is None:
1959 if changeid is None:
1959 return context.workingctx(self)
1960 return context.workingctx(self)
1960 if isinstance(changeid, context.basectx):
1961 if isinstance(changeid, context.basectx):
1961 return changeid
1962 return changeid
1962
1963
1963 # dealing with multiple revisions
1964 # dealing with multiple revisions
1964 if isinstance(changeid, slice):
1965 if isinstance(changeid, slice):
1965 # wdirrev isn't contiguous so the slice shouldn't include it
1966 # wdirrev isn't contiguous so the slice shouldn't include it
1966 return [
1967 return [
1967 self[i]
1968 self[i]
1968 for i in range(*changeid.indices(len(self)))
1969 for i in range(*changeid.indices(len(self)))
1969 if i not in self.changelog.filteredrevs
1970 if i not in self.changelog.filteredrevs
1970 ]
1971 ]
1971
1972
1972 # dealing with some special values
1973 # dealing with some special values
1973 quick_access = self._quick_access_changeid.get(changeid)
1974 quick_access = self._quick_access_changeid.get(changeid)
1974 if quick_access is not None:
1975 if quick_access is not None:
1975 rev, node = quick_access
1976 rev, node = quick_access
1976 return context.changectx(self, rev, node, maybe_filtered=False)
1977 return context.changectx(self, rev, node, maybe_filtered=False)
1977 if changeid == b'tip':
1978 if changeid == b'tip':
1978 node = self.changelog.tip()
1979 node = self.changelog.tip()
1979 rev = self.changelog.rev(node)
1980 rev = self.changelog.rev(node)
1980 return context.changectx(self, rev, node)
1981 return context.changectx(self, rev, node)
1981
1982
1982 # dealing with arbitrary values
1983 # dealing with arbitrary values
1983 try:
1984 try:
1984 if isinstance(changeid, int):
1985 if isinstance(changeid, int):
1985 node = self.changelog.node(changeid)
1986 node = self.changelog.node(changeid)
1986 rev = changeid
1987 rev = changeid
1987 elif changeid == b'.':
1988 elif changeid == b'.':
1988 # this is a hack to delay/avoid loading obsmarkers
1989 # this is a hack to delay/avoid loading obsmarkers
1989 # when we know that '.' won't be hidden
1990 # when we know that '.' won't be hidden
1990 node = self.dirstate.p1()
1991 node = self.dirstate.p1()
1991 rev = self.unfiltered().changelog.rev(node)
1992 rev = self.unfiltered().changelog.rev(node)
1992 elif len(changeid) == self.nodeconstants.nodelen:
1993 elif len(changeid) == self.nodeconstants.nodelen:
1993 try:
1994 try:
1994 node = changeid
1995 node = changeid
1995 rev = self.changelog.rev(changeid)
1996 rev = self.changelog.rev(changeid)
1996 except error.FilteredLookupError:
1997 except error.FilteredLookupError:
1997 changeid = hex(changeid) # for the error message
1998 changeid = hex(changeid) # for the error message
1998 raise
1999 raise
1999 except LookupError:
2000 except LookupError:
2000 # check if it might have come from damaged dirstate
2001 # check if it might have come from damaged dirstate
2001 #
2002 #
2002 # XXX we could avoid the unfiltered if we had a recognizable
2003 # XXX we could avoid the unfiltered if we had a recognizable
2003 # exception for filtered changeset access
2004 # exception for filtered changeset access
2004 if (
2005 if (
2005 self.local()
2006 self.local()
2006 and changeid in self.unfiltered().dirstate.parents()
2007 and changeid in self.unfiltered().dirstate.parents()
2007 ):
2008 ):
2008 msg = _(b"working directory has unknown parent '%s'!")
2009 msg = _(b"working directory has unknown parent '%s'!")
2009 raise error.Abort(msg % short(changeid))
2010 raise error.Abort(msg % short(changeid))
2010 changeid = hex(changeid) # for the error message
2011 changeid = hex(changeid) # for the error message
2011 raise
2012 raise
2012
2013
2013 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2014 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2014 node = bin(changeid)
2015 node = bin(changeid)
2015 rev = self.changelog.rev(node)
2016 rev = self.changelog.rev(node)
2016 else:
2017 else:
2017 raise error.ProgrammingError(
2018 raise error.ProgrammingError(
2018 b"unsupported changeid '%s' of type %s"
2019 b"unsupported changeid '%s' of type %s"
2019 % (changeid, pycompat.bytestr(type(changeid)))
2020 % (changeid, pycompat.bytestr(type(changeid)))
2020 )
2021 )
2021
2022
2022 return context.changectx(self, rev, node)
2023 return context.changectx(self, rev, node)
2023
2024
2024 except (error.FilteredIndexError, error.FilteredLookupError):
2025 except (error.FilteredIndexError, error.FilteredLookupError):
2025 raise error.FilteredRepoLookupError(
2026 raise error.FilteredRepoLookupError(
2026 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2027 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2027 )
2028 )
2028 except (IndexError, LookupError):
2029 except (IndexError, LookupError):
2029 raise error.RepoLookupError(
2030 raise error.RepoLookupError(
2030 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2031 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2031 )
2032 )
2032 except error.WdirUnsupported:
2033 except error.WdirUnsupported:
2033 return context.workingctx(self)
2034 return context.workingctx(self)
2034
2035
2035 def __contains__(self, changeid):
2036 def __contains__(self, changeid):
2036 """True if the given changeid exists"""
2037 """True if the given changeid exists"""
2037 try:
2038 try:
2038 self[changeid]
2039 self[changeid]
2039 return True
2040 return True
2040 except error.RepoLookupError:
2041 except error.RepoLookupError:
2041 return False
2042 return False
2042
2043
2043 def __nonzero__(self):
2044 def __nonzero__(self):
2044 return True
2045 return True
2045
2046
2046 __bool__ = __nonzero__
2047 __bool__ = __nonzero__
2047
2048
2048 def __len__(self):
2049 def __len__(self):
2049 # no need to pay the cost of repoview.changelog
2050 # no need to pay the cost of repoview.changelog
2050 unfi = self.unfiltered()
2051 unfi = self.unfiltered()
2051 return len(unfi.changelog)
2052 return len(unfi.changelog)
2052
2053
2053 def __iter__(self):
2054 def __iter__(self):
2054 return iter(self.changelog)
2055 return iter(self.changelog)
2055
2056
2056 def revs(self, expr: bytes, *args):
2057 def revs(self, expr: bytes, *args):
2057 """Find revisions matching a revset.
2058 """Find revisions matching a revset.
2058
2059
2059 The revset is specified as a string ``expr`` that may contain
2060 The revset is specified as a string ``expr`` that may contain
2060 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2061 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2061
2062
2062 Revset aliases from the configuration are not expanded. To expand
2063 Revset aliases from the configuration are not expanded. To expand
2063 user aliases, consider calling ``scmutil.revrange()`` or
2064 user aliases, consider calling ``scmutil.revrange()`` or
2064 ``repo.anyrevs([expr], user=True)``.
2065 ``repo.anyrevs([expr], user=True)``.
2065
2066
2066 Returns a smartset.abstractsmartset, which is a list-like interface
2067 Returns a smartset.abstractsmartset, which is a list-like interface
2067 that contains integer revisions.
2068 that contains integer revisions.
2068 """
2069 """
2069 tree = revsetlang.spectree(expr, *args)
2070 tree = revsetlang.spectree(expr, *args)
2070 return revset.makematcher(tree)(self)
2071 return revset.makematcher(tree)(self)
2071
2072
2072 def set(self, expr: bytes, *args):
2073 def set(self, expr: bytes, *args):
2073 """Find revisions matching a revset and emit changectx instances.
2074 """Find revisions matching a revset and emit changectx instances.
2074
2075
2075 This is a convenience wrapper around ``revs()`` that iterates the
2076 This is a convenience wrapper around ``revs()`` that iterates the
2076 result and is a generator of changectx instances.
2077 result and is a generator of changectx instances.
2077
2078
2078 Revset aliases from the configuration are not expanded. To expand
2079 Revset aliases from the configuration are not expanded. To expand
2079 user aliases, consider calling ``scmutil.revrange()``.
2080 user aliases, consider calling ``scmutil.revrange()``.
2080 """
2081 """
2081 for r in self.revs(expr, *args):
2082 for r in self.revs(expr, *args):
2082 yield self[r]
2083 yield self[r]
2083
2084
2084 def anyrevs(self, specs: bytes, user=False, localalias=None):
2085 def anyrevs(self, specs: bytes, user=False, localalias=None):
2085 """Find revisions matching one of the given revsets.
2086 """Find revisions matching one of the given revsets.
2086
2087
2087 Revset aliases from the configuration are not expanded by default. To
2088 Revset aliases from the configuration are not expanded by default. To
2088 expand user aliases, specify ``user=True``. To provide some local
2089 expand user aliases, specify ``user=True``. To provide some local
2089 definitions overriding user aliases, set ``localalias`` to
2090 definitions overriding user aliases, set ``localalias`` to
2090 ``{name: definitionstring}``.
2091 ``{name: definitionstring}``.
2091 """
2092 """
2092 if specs == [b'null']:
2093 if specs == [b'null']:
2093 return revset.baseset([nullrev])
2094 return revset.baseset([nullrev])
2094 if specs == [b'.']:
2095 if specs == [b'.']:
2095 quick_data = self._quick_access_changeid.get(b'.')
2096 quick_data = self._quick_access_changeid.get(b'.')
2096 if quick_data is not None:
2097 if quick_data is not None:
2097 return revset.baseset([quick_data[0]])
2098 return revset.baseset([quick_data[0]])
2098 if user:
2099 if user:
2099 m = revset.matchany(
2100 m = revset.matchany(
2100 self.ui,
2101 self.ui,
2101 specs,
2102 specs,
2102 lookup=revset.lookupfn(self),
2103 lookup=revset.lookupfn(self),
2103 localalias=localalias,
2104 localalias=localalias,
2104 )
2105 )
2105 else:
2106 else:
2106 m = revset.matchany(None, specs, localalias=localalias)
2107 m = revset.matchany(None, specs, localalias=localalias)
2107 return m(self)
2108 return m(self)
2108
2109
2109 def url(self) -> bytes:
2110 def url(self) -> bytes:
2110 return b'file:' + self.root
2111 return b'file:' + self.root
2111
2112
2112 def hook(self, name, throw=False, **args):
2113 def hook(self, name, throw=False, **args):
2113 """Call a hook, passing this repo instance.
2114 """Call a hook, passing this repo instance.
2114
2115
2115 This a convenience method to aid invoking hooks. Extensions likely
2116 This a convenience method to aid invoking hooks. Extensions likely
2116 won't call this unless they have registered a custom hook or are
2117 won't call this unless they have registered a custom hook or are
2117 replacing code that is expected to call a hook.
2118 replacing code that is expected to call a hook.
2118 """
2119 """
2119 return hook.hook(self.ui, self, name, throw, **args)
2120 return hook.hook(self.ui, self, name, throw, **args)
2120
2121
2121 @filteredpropertycache
2122 @filteredpropertycache
2122 def _tagscache(self):
2123 def _tagscache(self):
2123 """Returns a tagscache object that contains various tags related
2124 """Returns a tagscache object that contains various tags related
2124 caches."""
2125 caches."""
2125
2126
2126 # This simplifies its cache management by having one decorated
2127 # This simplifies its cache management by having one decorated
2127 # function (this one) and the rest simply fetch things from it.
2128 # function (this one) and the rest simply fetch things from it.
2128 class tagscache:
2129 class tagscache:
2129 def __init__(self):
2130 def __init__(self):
2130 # These two define the set of tags for this repository. tags
2131 # These two define the set of tags for this repository. tags
2131 # maps tag name to node; tagtypes maps tag name to 'global' or
2132 # maps tag name to node; tagtypes maps tag name to 'global' or
2132 # 'local'. (Global tags are defined by .hgtags across all
2133 # 'local'. (Global tags are defined by .hgtags across all
2133 # heads, and local tags are defined in .hg/localtags.)
2134 # heads, and local tags are defined in .hg/localtags.)
2134 # They constitute the in-memory cache of tags.
2135 # They constitute the in-memory cache of tags.
2135 self.tags = self.tagtypes = None
2136 self.tags = self.tagtypes = None
2136
2137
2137 self.nodetagscache = self.tagslist = None
2138 self.nodetagscache = self.tagslist = None
2138
2139
2139 cache = tagscache()
2140 cache = tagscache()
2140 cache.tags, cache.tagtypes = self._findtags()
2141 cache.tags, cache.tagtypes = self._findtags()
2141
2142
2142 return cache
2143 return cache
2143
2144
2144 def tags(self):
2145 def tags(self):
2145 '''return a mapping of tag to node'''
2146 '''return a mapping of tag to node'''
2146 t = {}
2147 t = {}
2147 if self.changelog.filteredrevs:
2148 if self.changelog.filteredrevs:
2148 tags, tt = self._findtags()
2149 tags, tt = self._findtags()
2149 else:
2150 else:
2150 tags = self._tagscache.tags
2151 tags = self._tagscache.tags
2151 rev = self.changelog.rev
2152 rev = self.changelog.rev
2152 for k, v in tags.items():
2153 for k, v in tags.items():
2153 try:
2154 try:
2154 # ignore tags to unknown nodes
2155 # ignore tags to unknown nodes
2155 rev(v)
2156 rev(v)
2156 t[k] = v
2157 t[k] = v
2157 except (error.LookupError, ValueError):
2158 except (error.LookupError, ValueError):
2158 pass
2159 pass
2159 return t
2160 return t
2160
2161
2161 def _findtags(self):
2162 def _findtags(self):
2162 """Do the hard work of finding tags. Return a pair of dicts
2163 """Do the hard work of finding tags. Return a pair of dicts
2163 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2164 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2164 maps tag name to a string like \'global\' or \'local\'.
2165 maps tag name to a string like \'global\' or \'local\'.
2165 Subclasses or extensions are free to add their own tags, but
2166 Subclasses or extensions are free to add their own tags, but
2166 should be aware that the returned dicts will be retained for the
2167 should be aware that the returned dicts will be retained for the
2167 duration of the localrepo object."""
2168 duration of the localrepo object."""
2168
2169
2169 # XXX what tagtype should subclasses/extensions use? Currently
2170 # XXX what tagtype should subclasses/extensions use? Currently
2170 # mq and bookmarks add tags, but do not set the tagtype at all.
2171 # mq and bookmarks add tags, but do not set the tagtype at all.
2171 # Should each extension invent its own tag type? Should there
2172 # Should each extension invent its own tag type? Should there
2172 # be one tagtype for all such "virtual" tags? Or is the status
2173 # be one tagtype for all such "virtual" tags? Or is the status
2173 # quo fine?
2174 # quo fine?
2174
2175
2175 # map tag name to (node, hist)
2176 # map tag name to (node, hist)
2176 alltags = tagsmod.findglobaltags(self.ui, self)
2177 alltags = tagsmod.findglobaltags(self.ui, self)
2177 # map tag name to tag type
2178 # map tag name to tag type
2178 tagtypes = {tag: b'global' for tag in alltags}
2179 tagtypes = {tag: b'global' for tag in alltags}
2179
2180
2180 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2181 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2181
2182
2182 # Build the return dicts. Have to re-encode tag names because
2183 # Build the return dicts. Have to re-encode tag names because
2183 # the tags module always uses UTF-8 (in order not to lose info
2184 # the tags module always uses UTF-8 (in order not to lose info
2184 # writing to the cache), but the rest of Mercurial wants them in
2185 # writing to the cache), but the rest of Mercurial wants them in
2185 # local encoding.
2186 # local encoding.
2186 tags = {}
2187 tags = {}
2187 for name, (node, hist) in alltags.items():
2188 for name, (node, hist) in alltags.items():
2188 if node != self.nullid:
2189 if node != self.nullid:
2189 tags[encoding.tolocal(name)] = node
2190 tags[encoding.tolocal(name)] = node
2190 tags[b'tip'] = self.changelog.tip()
2191 tags[b'tip'] = self.changelog.tip()
2191 tagtypes = {
2192 tagtypes = {
2192 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2193 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2193 }
2194 }
2194 return (tags, tagtypes)
2195 return (tags, tagtypes)
2195
2196
2196 def tagtype(self, tagname):
2197 def tagtype(self, tagname):
2197 """
2198 """
2198 return the type of the given tag. result can be:
2199 return the type of the given tag. result can be:
2199
2200
2200 'local' : a local tag
2201 'local' : a local tag
2201 'global' : a global tag
2202 'global' : a global tag
2202 None : tag does not exist
2203 None : tag does not exist
2203 """
2204 """
2204
2205
2205 return self._tagscache.tagtypes.get(tagname)
2206 return self._tagscache.tagtypes.get(tagname)
2206
2207
2207 def tagslist(self):
2208 def tagslist(self):
2208 '''return a list of tags ordered by revision'''
2209 '''return a list of tags ordered by revision'''
2209 if not self._tagscache.tagslist:
2210 if not self._tagscache.tagslist:
2210 l = []
2211 l = []
2211 for t, n in self.tags().items():
2212 for t, n in self.tags().items():
2212 l.append((self.changelog.rev(n), t, n))
2213 l.append((self.changelog.rev(n), t, n))
2213 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2214 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2214
2215
2215 return self._tagscache.tagslist
2216 return self._tagscache.tagslist
2216
2217
2217 def nodetags(self, node):
2218 def nodetags(self, node):
2218 '''return the tags associated with a node'''
2219 '''return the tags associated with a node'''
2219 if not self._tagscache.nodetagscache:
2220 if not self._tagscache.nodetagscache:
2220 nodetagscache = {}
2221 nodetagscache = {}
2221 for t, n in self._tagscache.tags.items():
2222 for t, n in self._tagscache.tags.items():
2222 nodetagscache.setdefault(n, []).append(t)
2223 nodetagscache.setdefault(n, []).append(t)
2223 for tags in nodetagscache.values():
2224 for tags in nodetagscache.values():
2224 tags.sort()
2225 tags.sort()
2225 self._tagscache.nodetagscache = nodetagscache
2226 self._tagscache.nodetagscache = nodetagscache
2226 return self._tagscache.nodetagscache.get(node, [])
2227 return self._tagscache.nodetagscache.get(node, [])
2227
2228
2228 def nodebookmarks(self, node):
2229 def nodebookmarks(self, node):
2229 """return the list of bookmarks pointing to the specified node"""
2230 """return the list of bookmarks pointing to the specified node"""
2230 return self._bookmarks.names(node)
2231 return self._bookmarks.names(node)
2231
2232
2232 def branchmap(self):
2233 def branchmap(self):
2233 """returns a dictionary {branch: [branchheads]} with branchheads
2234 """returns a dictionary {branch: [branchheads]} with branchheads
2234 ordered by increasing revision number"""
2235 ordered by increasing revision number"""
2235 return self._branchcaches[self]
2236 return self._branchcaches[self]
2236
2237
2237 @unfilteredmethod
2238 @unfilteredmethod
2238 def revbranchcache(self):
2239 def revbranchcache(self):
2239 if not self._revbranchcache:
2240 if not self._revbranchcache:
2240 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2241 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2241 return self._revbranchcache
2242 return self._revbranchcache
2242
2243
2243 def register_changeset(self, rev, changelogrevision):
2244 def register_changeset(self, rev, changelogrevision):
2244 self.revbranchcache().setdata(rev, changelogrevision)
2245 self.revbranchcache().setdata(rev, changelogrevision)
2245
2246
2246 def branchtip(self, branch, ignoremissing=False):
2247 def branchtip(self, branch, ignoremissing=False):
2247 """return the tip node for a given branch
2248 """return the tip node for a given branch
2248
2249
2249 If ignoremissing is True, then this method will not raise an error.
2250 If ignoremissing is True, then this method will not raise an error.
2250 This is helpful for callers that only expect None for a missing branch
2251 This is helpful for callers that only expect None for a missing branch
2251 (e.g. namespace).
2252 (e.g. namespace).
2252
2253
2253 """
2254 """
2254 try:
2255 try:
2255 return self.branchmap().branchtip(branch)
2256 return self.branchmap().branchtip(branch)
2256 except KeyError:
2257 except KeyError:
2257 if not ignoremissing:
2258 if not ignoremissing:
2258 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2259 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2259 else:
2260 else:
2260 pass
2261 pass
2261
2262
2262 def lookup(self, key):
2263 def lookup(self, key):
2263 node = scmutil.revsymbol(self, key).node()
2264 node = scmutil.revsymbol(self, key).node()
2264 if node is None:
2265 if node is None:
2265 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2266 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2266 return node
2267 return node
2267
2268
2268 def lookupbranch(self, key):
2269 def lookupbranch(self, key):
2269 if self.branchmap().hasbranch(key):
2270 if self.branchmap().hasbranch(key):
2270 return key
2271 return key
2271
2272
2272 return scmutil.revsymbol(self, key).branch()
2273 return scmutil.revsymbol(self, key).branch()
2273
2274
2274 def known(self, nodes):
2275 def known(self, nodes):
2275 cl = self.changelog
2276 cl = self.changelog
2276 get_rev = cl.index.get_rev
2277 get_rev = cl.index.get_rev
2277 filtered = cl.filteredrevs
2278 filtered = cl.filteredrevs
2278 result = []
2279 result = []
2279 for n in nodes:
2280 for n in nodes:
2280 r = get_rev(n)
2281 r = get_rev(n)
2281 resp = not (r is None or r in filtered)
2282 resp = not (r is None or r in filtered)
2282 result.append(resp)
2283 result.append(resp)
2283 return result
2284 return result
2284
2285
2285 def local(self):
2286 def local(self):
2286 return self
2287 return self
2287
2288
2288 def publishing(self):
2289 def publishing(self):
2289 # it's safe (and desirable) to trust the publish flag unconditionally
2290 # it's safe (and desirable) to trust the publish flag unconditionally
2290 # so that we don't finalize changes shared between users via ssh or nfs
2291 # so that we don't finalize changes shared between users via ssh or nfs
2291 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2292 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2292
2293
2293 def cancopy(self):
2294 def cancopy(self):
2294 # so statichttprepo's override of local() works
2295 # so statichttprepo's override of local() works
2295 if not self.local():
2296 if not self.local():
2296 return False
2297 return False
2297 if not self.publishing():
2298 if not self.publishing():
2298 return True
2299 return True
2299 # if publishing we can't copy if there is filtered content
2300 # if publishing we can't copy if there is filtered content
2300 return not self.filtered(b'visible').changelog.filteredrevs
2301 return not self.filtered(b'visible').changelog.filteredrevs
2301
2302
2302 def shared(self):
2303 def shared(self):
2303 '''the type of shared repository (None if not shared)'''
2304 '''the type of shared repository (None if not shared)'''
2304 if self.sharedpath != self.path:
2305 if self.sharedpath != self.path:
2305 return b'store'
2306 return b'store'
2306 return None
2307 return None
2307
2308
2308 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2309 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2309 return self.vfs.reljoin(self.root, f, *insidef)
2310 return self.vfs.reljoin(self.root, f, *insidef)
2310
2311
2311 def setparents(self, p1, p2=None):
2312 def setparents(self, p1, p2=None):
2312 if p2 is None:
2313 if p2 is None:
2313 p2 = self.nullid
2314 p2 = self.nullid
2314 self[None].setparents(p1, p2)
2315 self[None].setparents(p1, p2)
2315 self._quick_access_changeid_invalidate()
2316 self._quick_access_changeid_invalidate()
2316
2317
2317 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2318 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2318 """changeid must be a changeset revision, if specified.
2319 """changeid must be a changeset revision, if specified.
2319 fileid can be a file revision or node."""
2320 fileid can be a file revision or node."""
2320 return context.filectx(
2321 return context.filectx(
2321 self, path, changeid, fileid, changectx=changectx
2322 self, path, changeid, fileid, changectx=changectx
2322 )
2323 )
2323
2324
2324 def getcwd(self) -> bytes:
2325 def getcwd(self) -> bytes:
2325 return self.dirstate.getcwd()
2326 return self.dirstate.getcwd()
2326
2327
2327 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2328 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2328 return self.dirstate.pathto(f, cwd)
2329 return self.dirstate.pathto(f, cwd)
2329
2330
2330 def _loadfilter(self, filter):
2331 def _loadfilter(self, filter):
2331 if filter not in self._filterpats:
2332 if filter not in self._filterpats:
2332 l = []
2333 l = []
2333 for pat, cmd in self.ui.configitems(filter):
2334 for pat, cmd in self.ui.configitems(filter):
2334 if cmd == b'!':
2335 if cmd == b'!':
2335 continue
2336 continue
2336 mf = matchmod.match(self.root, b'', [pat])
2337 mf = matchmod.match(self.root, b'', [pat])
2337 fn = None
2338 fn = None
2338 params = cmd
2339 params = cmd
2339 for name, filterfn in self._datafilters.items():
2340 for name, filterfn in self._datafilters.items():
2340 if cmd.startswith(name):
2341 if cmd.startswith(name):
2341 fn = filterfn
2342 fn = filterfn
2342 params = cmd[len(name) :].lstrip()
2343 params = cmd[len(name) :].lstrip()
2343 break
2344 break
2344 if not fn:
2345 if not fn:
2345 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2346 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2346 fn.__name__ = 'commandfilter'
2347 fn.__name__ = 'commandfilter'
2347 # Wrap old filters not supporting keyword arguments
2348 # Wrap old filters not supporting keyword arguments
2348 if not pycompat.getargspec(fn)[2]:
2349 if not pycompat.getargspec(fn)[2]:
2349 oldfn = fn
2350 oldfn = fn
2350 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2351 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2351 fn.__name__ = 'compat-' + oldfn.__name__
2352 fn.__name__ = 'compat-' + oldfn.__name__
2352 l.append((mf, fn, params))
2353 l.append((mf, fn, params))
2353 self._filterpats[filter] = l
2354 self._filterpats[filter] = l
2354 return self._filterpats[filter]
2355 return self._filterpats[filter]
2355
2356
2356 def _filter(self, filterpats, filename, data):
2357 def _filter(self, filterpats, filename, data):
2357 for mf, fn, cmd in filterpats:
2358 for mf, fn, cmd in filterpats:
2358 if mf(filename):
2359 if mf(filename):
2359 self.ui.debug(
2360 self.ui.debug(
2360 b"filtering %s through %s\n"
2361 b"filtering %s through %s\n"
2361 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2362 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2362 )
2363 )
2363 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2364 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2364 break
2365 break
2365
2366
2366 return data
2367 return data
2367
2368
2368 @unfilteredpropertycache
2369 @unfilteredpropertycache
2369 def _encodefilterpats(self):
2370 def _encodefilterpats(self):
2370 return self._loadfilter(b'encode')
2371 return self._loadfilter(b'encode')
2371
2372
2372 @unfilteredpropertycache
2373 @unfilteredpropertycache
2373 def _decodefilterpats(self):
2374 def _decodefilterpats(self):
2374 return self._loadfilter(b'decode')
2375 return self._loadfilter(b'decode')
2375
2376
2376 def adddatafilter(self, name, filter):
2377 def adddatafilter(self, name, filter):
2377 self._datafilters[name] = filter
2378 self._datafilters[name] = filter
2378
2379
2379 def wread(self, filename: bytes) -> bytes:
2380 def wread(self, filename: bytes) -> bytes:
2380 if self.wvfs.islink(filename):
2381 if self.wvfs.islink(filename):
2381 data = self.wvfs.readlink(filename)
2382 data = self.wvfs.readlink(filename)
2382 else:
2383 else:
2383 data = self.wvfs.read(filename)
2384 data = self.wvfs.read(filename)
2384 return self._filter(self._encodefilterpats, filename, data)
2385 return self._filter(self._encodefilterpats, filename, data)
2385
2386
2386 def wwrite(
2387 def wwrite(
2387 self,
2388 self,
2388 filename: bytes,
2389 filename: bytes,
2389 data: bytes,
2390 data: bytes,
2390 flags: bytes,
2391 flags: bytes,
2391 backgroundclose=False,
2392 backgroundclose=False,
2392 **kwargs
2393 **kwargs
2393 ) -> int:
2394 ) -> int:
2394 """write ``data`` into ``filename`` in the working directory
2395 """write ``data`` into ``filename`` in the working directory
2395
2396
2396 This returns length of written (maybe decoded) data.
2397 This returns length of written (maybe decoded) data.
2397 """
2398 """
2398 data = self._filter(self._decodefilterpats, filename, data)
2399 data = self._filter(self._decodefilterpats, filename, data)
2399 if b'l' in flags:
2400 if b'l' in flags:
2400 self.wvfs.symlink(data, filename)
2401 self.wvfs.symlink(data, filename)
2401 else:
2402 else:
2402 self.wvfs.write(
2403 self.wvfs.write(
2403 filename, data, backgroundclose=backgroundclose, **kwargs
2404 filename, data, backgroundclose=backgroundclose, **kwargs
2404 )
2405 )
2405 if b'x' in flags:
2406 if b'x' in flags:
2406 self.wvfs.setflags(filename, False, True)
2407 self.wvfs.setflags(filename, False, True)
2407 else:
2408 else:
2408 self.wvfs.setflags(filename, False, False)
2409 self.wvfs.setflags(filename, False, False)
2409 return len(data)
2410 return len(data)
2410
2411
2411 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2412 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2412 return self._filter(self._decodefilterpats, filename, data)
2413 return self._filter(self._decodefilterpats, filename, data)
2413
2414
2414 def currenttransaction(self):
2415 def currenttransaction(self):
2415 """return the current transaction or None if non exists"""
2416 """return the current transaction or None if non exists"""
2416 if self._transref:
2417 if self._transref:
2417 tr = self._transref()
2418 tr = self._transref()
2418 else:
2419 else:
2419 tr = None
2420 tr = None
2420
2421
2421 if tr and tr.running():
2422 if tr and tr.running():
2422 return tr
2423 return tr
2423 return None
2424 return None
2424
2425
2425 def transaction(self, desc, report=None):
2426 def transaction(self, desc, report=None):
2426 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2427 b'devel', b'check-locks'
2428 b'devel', b'check-locks'
2428 ):
2429 ):
2429 if self._currentlock(self._lockref) is None:
2430 if self._currentlock(self._lockref) is None:
2430 raise error.ProgrammingError(b'transaction requires locking')
2431 raise error.ProgrammingError(b'transaction requires locking')
2431 tr = self.currenttransaction()
2432 tr = self.currenttransaction()
2432 if tr is not None:
2433 if tr is not None:
2433 return tr.nest(name=desc)
2434 return tr.nest(name=desc)
2434
2435
2435 # abort here if the journal already exists
2436 # abort here if the journal already exists
2436 if self.svfs.exists(b"journal"):
2437 if self.svfs.exists(b"journal"):
2437 raise error.RepoError(
2438 raise error.RepoError(
2438 _(b"abandoned transaction found"),
2439 _(b"abandoned transaction found"),
2439 hint=_(b"run 'hg recover' to clean up transaction"),
2440 hint=_(b"run 'hg recover' to clean up transaction"),
2440 )
2441 )
2441
2442
2442 # At that point your dirstate should be clean:
2443 # At that point your dirstate should be clean:
2443 #
2444 #
2444 # - If you don't have the wlock, why would you still have a dirty
2445 # - If you don't have the wlock, why would you still have a dirty
2445 # dirstate ?
2446 # dirstate ?
2446 #
2447 #
2447 # - If you hold the wlock, you should not be opening a transaction in
2448 # - If you hold the wlock, you should not be opening a transaction in
2448 # the middle of a `distate.changing_*` block. The transaction needs to
2449 # the middle of a `distate.changing_*` block. The transaction needs to
2449 # be open before that and wrap the change-context.
2450 # be open before that and wrap the change-context.
2450 #
2451 #
2451 # - If you are not within a `dirstate.changing_*` context, why is our
2452 # - If you are not within a `dirstate.changing_*` context, why is our
2452 # dirstate dirty?
2453 # dirstate dirty?
2453 if self.dirstate._dirty:
2454 if self.dirstate._dirty:
2454 m = "cannot open a transaction with a dirty dirstate"
2455 m = "cannot open a transaction with a dirty dirstate"
2455 raise error.ProgrammingError(m)
2456 raise error.ProgrammingError(m)
2456
2457
2457 idbase = b"%.40f#%f" % (random.random(), time.time())
2458 idbase = b"%.40f#%f" % (random.random(), time.time())
2458 ha = hex(hashutil.sha1(idbase).digest())
2459 ha = hex(hashutil.sha1(idbase).digest())
2459 txnid = b'TXN:' + ha
2460 txnid = b'TXN:' + ha
2460 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2461 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2461
2462
2462 self._writejournal(desc)
2463 self._writejournal(desc)
2463 if report:
2464 if report:
2464 rp = report
2465 rp = report
2465 else:
2466 else:
2466 rp = self.ui.warn
2467 rp = self.ui.warn
2467 vfsmap = self.vfs_map
2468 vfsmap = self.vfs_map
2468 # we must avoid cyclic reference between repo and transaction.
2469 # we must avoid cyclic reference between repo and transaction.
2469 reporef = weakref.ref(self)
2470 reporef = weakref.ref(self)
2470 # Code to track tag movement
2471 # Code to track tag movement
2471 #
2472 #
2472 # Since tags are all handled as file content, it is actually quite hard
2473 # Since tags are all handled as file content, it is actually quite hard
2473 # to track these movement from a code perspective. So we fallback to a
2474 # to track these movement from a code perspective. So we fallback to a
2474 # tracking at the repository level. One could envision to track changes
2475 # tracking at the repository level. One could envision to track changes
2475 # to the '.hgtags' file through changegroup apply but that fails to
2476 # to the '.hgtags' file through changegroup apply but that fails to
2476 # cope with case where transaction expose new heads without changegroup
2477 # cope with case where transaction expose new heads without changegroup
2477 # being involved (eg: phase movement).
2478 # being involved (eg: phase movement).
2478 #
2479 #
2479 # For now, We gate the feature behind a flag since this likely comes
2480 # For now, We gate the feature behind a flag since this likely comes
2480 # with performance impacts. The current code run more often than needed
2481 # with performance impacts. The current code run more often than needed
2481 # and do not use caches as much as it could. The current focus is on
2482 # and do not use caches as much as it could. The current focus is on
2482 # the behavior of the feature so we disable it by default. The flag
2483 # the behavior of the feature so we disable it by default. The flag
2483 # will be removed when we are happy with the performance impact.
2484 # will be removed when we are happy with the performance impact.
2484 #
2485 #
2485 # Once this feature is no longer experimental move the following
2486 # Once this feature is no longer experimental move the following
2486 # documentation to the appropriate help section:
2487 # documentation to the appropriate help section:
2487 #
2488 #
2488 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2489 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2489 # tags (new or changed or deleted tags). In addition the details of
2490 # tags (new or changed or deleted tags). In addition the details of
2490 # these changes are made available in a file at:
2491 # these changes are made available in a file at:
2491 # ``REPOROOT/.hg/changes/tags.changes``.
2492 # ``REPOROOT/.hg/changes/tags.changes``.
2492 # Make sure you check for HG_TAG_MOVED before reading that file as it
2493 # Make sure you check for HG_TAG_MOVED before reading that file as it
2493 # might exist from a previous transaction even if no tag were touched
2494 # might exist from a previous transaction even if no tag were touched
2494 # in this one. Changes are recorded in a line base format::
2495 # in this one. Changes are recorded in a line base format::
2495 #
2496 #
2496 # <action> <hex-node> <tag-name>\n
2497 # <action> <hex-node> <tag-name>\n
2497 #
2498 #
2498 # Actions are defined as follow:
2499 # Actions are defined as follow:
2499 # "-R": tag is removed,
2500 # "-R": tag is removed,
2500 # "+A": tag is added,
2501 # "+A": tag is added,
2501 # "-M": tag is moved (old value),
2502 # "-M": tag is moved (old value),
2502 # "+M": tag is moved (new value),
2503 # "+M": tag is moved (new value),
2503 tracktags = lambda x: None
2504 tracktags = lambda x: None
2504 # experimental config: experimental.hook-track-tags
2505 # experimental config: experimental.hook-track-tags
2505 shouldtracktags = self.ui.configbool(
2506 shouldtracktags = self.ui.configbool(
2506 b'experimental', b'hook-track-tags'
2507 b'experimental', b'hook-track-tags'
2507 )
2508 )
2508 if desc != b'strip' and shouldtracktags:
2509 if desc != b'strip' and shouldtracktags:
2509 oldheads = self.changelog.headrevs()
2510 oldheads = self.changelog.headrevs()
2510
2511
2511 def tracktags(tr2):
2512 def tracktags(tr2):
2512 repo = reporef()
2513 repo = reporef()
2513 assert repo is not None # help pytype
2514 assert repo is not None # help pytype
2514 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2515 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2515 newheads = repo.changelog.headrevs()
2516 newheads = repo.changelog.headrevs()
2516 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2517 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2517 # notes: we compare lists here.
2518 # notes: we compare lists here.
2518 # As we do it only once buiding set would not be cheaper
2519 # As we do it only once buiding set would not be cheaper
2519 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2520 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2520 if changes:
2521 if changes:
2521 tr2.hookargs[b'tag_moved'] = b'1'
2522 tr2.hookargs[b'tag_moved'] = b'1'
2522 with repo.vfs(
2523 with repo.vfs(
2523 b'changes/tags.changes', b'w', atomictemp=True
2524 b'changes/tags.changes', b'w', atomictemp=True
2524 ) as changesfile:
2525 ) as changesfile:
2525 # note: we do not register the file to the transaction
2526 # note: we do not register the file to the transaction
2526 # because we needs it to still exist on the transaction
2527 # because we needs it to still exist on the transaction
2527 # is close (for txnclose hooks)
2528 # is close (for txnclose hooks)
2528 tagsmod.writediff(changesfile, changes)
2529 tagsmod.writediff(changesfile, changes)
2529
2530
2530 def validate(tr2):
2531 def validate(tr2):
2531 """will run pre-closing hooks"""
2532 """will run pre-closing hooks"""
2532 # XXX the transaction API is a bit lacking here so we take a hacky
2533 # XXX the transaction API is a bit lacking here so we take a hacky
2533 # path for now
2534 # path for now
2534 #
2535 #
2535 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2536 # dict is copied before these run. In addition we needs the data
2537 # dict is copied before these run. In addition we needs the data
2537 # available to in memory hooks too.
2538 # available to in memory hooks too.
2538 #
2539 #
2539 # Moreover, we also need to make sure this runs before txnclose
2540 # Moreover, we also need to make sure this runs before txnclose
2540 # hooks and there is no "pending" mechanism that would execute
2541 # hooks and there is no "pending" mechanism that would execute
2541 # logic only if hooks are about to run.
2542 # logic only if hooks are about to run.
2542 #
2543 #
2543 # Fixing this limitation of the transaction is also needed to track
2544 # Fixing this limitation of the transaction is also needed to track
2544 # other families of changes (bookmarks, phases, obsolescence).
2545 # other families of changes (bookmarks, phases, obsolescence).
2545 #
2546 #
2546 # This will have to be fixed before we remove the experimental
2547 # This will have to be fixed before we remove the experimental
2547 # gating.
2548 # gating.
2548 tracktags(tr2)
2549 tracktags(tr2)
2549 repo = reporef()
2550 repo = reporef()
2550 assert repo is not None # help pytype
2551 assert repo is not None # help pytype
2551
2552
2552 singleheadopt = (b'experimental', b'single-head-per-branch')
2553 singleheadopt = (b'experimental', b'single-head-per-branch')
2553 singlehead = repo.ui.configbool(*singleheadopt)
2554 singlehead = repo.ui.configbool(*singleheadopt)
2554 if singlehead:
2555 if singlehead:
2555 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2556 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2556 accountclosed = singleheadsub.get(
2557 accountclosed = singleheadsub.get(
2557 b"account-closed-heads", False
2558 b"account-closed-heads", False
2558 )
2559 )
2559 if singleheadsub.get(b"public-changes-only", False):
2560 if singleheadsub.get(b"public-changes-only", False):
2560 filtername = b"immutable"
2561 filtername = b"immutable"
2561 else:
2562 else:
2562 filtername = b"visible"
2563 filtername = b"visible"
2563 scmutil.enforcesinglehead(
2564 scmutil.enforcesinglehead(
2564 repo, tr2, desc, accountclosed, filtername
2565 repo, tr2, desc, accountclosed, filtername
2565 )
2566 )
2566 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2567 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2567 for name, (old, new) in sorted(
2568 for name, (old, new) in sorted(
2568 tr.changes[b'bookmarks'].items()
2569 tr.changes[b'bookmarks'].items()
2569 ):
2570 ):
2570 args = tr.hookargs.copy()
2571 args = tr.hookargs.copy()
2571 args.update(bookmarks.preparehookargs(name, old, new))
2572 args.update(bookmarks.preparehookargs(name, old, new))
2572 repo.hook(
2573 repo.hook(
2573 b'pretxnclose-bookmark',
2574 b'pretxnclose-bookmark',
2574 throw=True,
2575 throw=True,
2575 **pycompat.strkwargs(args)
2576 **pycompat.strkwargs(args)
2576 )
2577 )
2577 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2578 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2578 cl = repo.unfiltered().changelog
2579 cl = repo.unfiltered().changelog
2579 for revs, (old, new) in tr.changes[b'phases']:
2580 for revs, (old, new) in tr.changes[b'phases']:
2580 for rev in revs:
2581 for rev in revs:
2581 args = tr.hookargs.copy()
2582 args = tr.hookargs.copy()
2582 node = hex(cl.node(rev))
2583 node = hex(cl.node(rev))
2583 args.update(phases.preparehookargs(node, old, new))
2584 args.update(phases.preparehookargs(node, old, new))
2584 repo.hook(
2585 repo.hook(
2585 b'pretxnclose-phase',
2586 b'pretxnclose-phase',
2586 throw=True,
2587 throw=True,
2587 **pycompat.strkwargs(args)
2588 **pycompat.strkwargs(args)
2588 )
2589 )
2589
2590
2590 repo.hook(
2591 repo.hook(
2591 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2592 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2592 )
2593 )
2593
2594
2594 def releasefn(tr, success):
2595 def releasefn(tr, success):
2595 repo = reporef()
2596 repo = reporef()
2596 if repo is None:
2597 if repo is None:
2597 # If the repo has been GC'd (and this release function is being
2598 # If the repo has been GC'd (and this release function is being
2598 # called from transaction.__del__), there's not much we can do,
2599 # called from transaction.__del__), there's not much we can do,
2599 # so just leave the unfinished transaction there and let the
2600 # so just leave the unfinished transaction there and let the
2600 # user run `hg recover`.
2601 # user run `hg recover`.
2601 return
2602 return
2602 if success:
2603 if success:
2603 # this should be explicitly invoked here, because
2604 # this should be explicitly invoked here, because
2604 # in-memory changes aren't written out at closing
2605 # in-memory changes aren't written out at closing
2605 # transaction, if tr.addfilegenerator (via
2606 # transaction, if tr.addfilegenerator (via
2606 # dirstate.write or so) isn't invoked while
2607 # dirstate.write or so) isn't invoked while
2607 # transaction running
2608 # transaction running
2608 repo.dirstate.write(None)
2609 repo.dirstate.write(None)
2609 else:
2610 else:
2610 # discard all changes (including ones already written
2611 # discard all changes (including ones already written
2611 # out) in this transaction
2612 # out) in this transaction
2612 repo.invalidate(clearfilecache=True)
2613 repo.invalidate(clearfilecache=True)
2613
2614
2614 tr = transaction.transaction(
2615 tr = transaction.transaction(
2615 rp,
2616 rp,
2616 self.svfs,
2617 self.svfs,
2617 vfsmap,
2618 vfsmap,
2618 b"journal",
2619 b"journal",
2619 b"undo",
2620 b"undo",
2620 lambda: None,
2621 lambda: None,
2621 self.store.createmode,
2622 self.store.createmode,
2622 validator=validate,
2623 validator=validate,
2623 releasefn=releasefn,
2624 releasefn=releasefn,
2624 checkambigfiles=_cachedfiles,
2625 checkambigfiles=_cachedfiles,
2625 name=desc,
2626 name=desc,
2626 )
2627 )
2627 for vfs_id, path in self._journalfiles():
2628 for vfs_id, path in self._journalfiles():
2628 tr.add_journal(vfs_id, path)
2629 tr.add_journal(vfs_id, path)
2629 tr.changes[b'origrepolen'] = len(self)
2630 tr.changes[b'origrepolen'] = len(self)
2630 tr.changes[b'obsmarkers'] = set()
2631 tr.changes[b'obsmarkers'] = set()
2631 tr.changes[b'phases'] = []
2632 tr.changes[b'phases'] = []
2632 tr.changes[b'bookmarks'] = {}
2633 tr.changes[b'bookmarks'] = {}
2633
2634
2634 tr.hookargs[b'txnid'] = txnid
2635 tr.hookargs[b'txnid'] = txnid
2635 tr.hookargs[b'txnname'] = desc
2636 tr.hookargs[b'txnname'] = desc
2636 tr.hookargs[b'changes'] = tr.changes
2637 tr.hookargs[b'changes'] = tr.changes
2637 # note: writing the fncache only during finalize mean that the file is
2638 # note: writing the fncache only during finalize mean that the file is
2638 # outdated when running hooks. As fncache is used for streaming clone,
2639 # outdated when running hooks. As fncache is used for streaming clone,
2639 # this is not expected to break anything that happen during the hooks.
2640 # this is not expected to break anything that happen during the hooks.
2640 tr.addfinalize(b'flush-fncache', self.store.write)
2641 tr.addfinalize(b'flush-fncache', self.store.write)
2641
2642
2642 def txnclosehook(tr2):
2643 def txnclosehook(tr2):
2643 """To be run if transaction is successful, will schedule a hook run"""
2644 """To be run if transaction is successful, will schedule a hook run"""
2644 # Don't reference tr2 in hook() so we don't hold a reference.
2645 # Don't reference tr2 in hook() so we don't hold a reference.
2645 # This reduces memory consumption when there are multiple
2646 # This reduces memory consumption when there are multiple
2646 # transactions per lock. This can likely go away if issue5045
2647 # transactions per lock. This can likely go away if issue5045
2647 # fixes the function accumulation.
2648 # fixes the function accumulation.
2648 hookargs = tr2.hookargs
2649 hookargs = tr2.hookargs
2649
2650
2650 def hookfunc(unused_success):
2651 def hookfunc(unused_success):
2651 repo = reporef()
2652 repo = reporef()
2652 assert repo is not None # help pytype
2653 assert repo is not None # help pytype
2653
2654
2654 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2655 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2655 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2656 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2656 for name, (old, new) in bmchanges:
2657 for name, (old, new) in bmchanges:
2657 args = tr.hookargs.copy()
2658 args = tr.hookargs.copy()
2658 args.update(bookmarks.preparehookargs(name, old, new))
2659 args.update(bookmarks.preparehookargs(name, old, new))
2659 repo.hook(
2660 repo.hook(
2660 b'txnclose-bookmark',
2661 b'txnclose-bookmark',
2661 throw=False,
2662 throw=False,
2662 **pycompat.strkwargs(args)
2663 **pycompat.strkwargs(args)
2663 )
2664 )
2664
2665
2665 if hook.hashook(repo.ui, b'txnclose-phase'):
2666 if hook.hashook(repo.ui, b'txnclose-phase'):
2666 cl = repo.unfiltered().changelog
2667 cl = repo.unfiltered().changelog
2667 phasemv = sorted(
2668 phasemv = sorted(
2668 tr.changes[b'phases'], key=lambda r: r[0][0]
2669 tr.changes[b'phases'], key=lambda r: r[0][0]
2669 )
2670 )
2670 for revs, (old, new) in phasemv:
2671 for revs, (old, new) in phasemv:
2671 for rev in revs:
2672 for rev in revs:
2672 args = tr.hookargs.copy()
2673 args = tr.hookargs.copy()
2673 node = hex(cl.node(rev))
2674 node = hex(cl.node(rev))
2674 args.update(phases.preparehookargs(node, old, new))
2675 args.update(phases.preparehookargs(node, old, new))
2675 repo.hook(
2676 repo.hook(
2676 b'txnclose-phase',
2677 b'txnclose-phase',
2677 throw=False,
2678 throw=False,
2678 **pycompat.strkwargs(args)
2679 **pycompat.strkwargs(args)
2679 )
2680 )
2680
2681
2681 repo.hook(
2682 repo.hook(
2682 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2683 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2683 )
2684 )
2684
2685
2685 repo = reporef()
2686 repo = reporef()
2686 assert repo is not None # help pytype
2687 assert repo is not None # help pytype
2687 repo._afterlock(hookfunc)
2688 repo._afterlock(hookfunc)
2688
2689
2689 tr.addfinalize(b'txnclose-hook', txnclosehook)
2690 tr.addfinalize(b'txnclose-hook', txnclosehook)
2690 # Include a leading "-" to make it happen before the transaction summary
2691 # Include a leading "-" to make it happen before the transaction summary
2691 # reports registered via scmutil.registersummarycallback() whose names
2692 # reports registered via scmutil.registersummarycallback() whose names
2692 # are 00-txnreport etc. That way, the caches will be warm when the
2693 # are 00-txnreport etc. That way, the caches will be warm when the
2693 # callbacks run.
2694 # callbacks run.
2694 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2695 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2695
2696
2696 def txnaborthook(tr2):
2697 def txnaborthook(tr2):
2697 """To be run if transaction is aborted"""
2698 """To be run if transaction is aborted"""
2698 repo = reporef()
2699 repo = reporef()
2699 assert repo is not None # help pytype
2700 assert repo is not None # help pytype
2700 repo.hook(
2701 repo.hook(
2701 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2702 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2702 )
2703 )
2703
2704
2704 tr.addabort(b'txnabort-hook', txnaborthook)
2705 tr.addabort(b'txnabort-hook', txnaborthook)
2705 # avoid eager cache invalidation. in-memory data should be identical
2706 # avoid eager cache invalidation. in-memory data should be identical
2706 # to stored data if transaction has no error.
2707 # to stored data if transaction has no error.
2707 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2708 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2708 self._transref = weakref.ref(tr)
2709 self._transref = weakref.ref(tr)
2709 scmutil.registersummarycallback(self, tr, desc)
2710 scmutil.registersummarycallback(self, tr, desc)
2710 # This only exist to deal with the need of rollback to have viable
2711 # This only exist to deal with the need of rollback to have viable
2711 # parents at the end of the operation. So backup viable parents at the
2712 # parents at the end of the operation. So backup viable parents at the
2712 # time of this operation.
2713 # time of this operation.
2713 #
2714 #
2714 # We only do it when the `wlock` is taken, otherwise other might be
2715 # We only do it when the `wlock` is taken, otherwise other might be
2715 # altering the dirstate under us.
2716 # altering the dirstate under us.
2716 #
2717 #
2717 # This is really not a great way to do this (first, because we cannot
2718 # This is really not a great way to do this (first, because we cannot
2718 # always do it). There are more viable alternative that exists
2719 # always do it). There are more viable alternative that exists
2719 #
2720 #
2720 # - backing only the working copy parent in a dedicated files and doing
2721 # - backing only the working copy parent in a dedicated files and doing
2721 # a clean "keep-update" to them on `hg rollback`.
2722 # a clean "keep-update" to them on `hg rollback`.
2722 #
2723 #
2723 # - slightly changing the behavior an applying a logic similar to "hg
2724 # - slightly changing the behavior an applying a logic similar to "hg
2724 # strip" to pick a working copy destination on `hg rollback`
2725 # strip" to pick a working copy destination on `hg rollback`
2725 if self.currentwlock() is not None:
2726 if self.currentwlock() is not None:
2726 ds = self.dirstate
2727 ds = self.dirstate
2727 if not self.vfs.exists(b'branch'):
2728 if not self.vfs.exists(b'branch'):
2728 # force a file to be written if None exist
2729 # force a file to be written if None exist
2729 ds.setbranch(b'default', None)
2730 ds.setbranch(b'default', None)
2730
2731
2731 def backup_dirstate(tr):
2732 def backup_dirstate(tr):
2732 for f in ds.all_file_names():
2733 for f in ds.all_file_names():
2733 # hardlink backup is okay because `dirstate` is always
2734 # hardlink backup is okay because `dirstate` is always
2734 # atomically written and possible data file are append only
2735 # atomically written and possible data file are append only
2735 # and resistant to trailing data.
2736 # and resistant to trailing data.
2736 tr.addbackup(f, hardlink=True, location=b'plain')
2737 tr.addbackup(f, hardlink=True, location=b'plain')
2737
2738
2738 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2739 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2739 return tr
2740 return tr
2740
2741
2741 def _journalfiles(self):
2742 def _journalfiles(self):
2742 return (
2743 return (
2743 (self.svfs, b'journal'),
2744 (self.svfs, b'journal'),
2744 (self.vfs, b'journal.desc'),
2745 (self.vfs, b'journal.desc'),
2745 )
2746 )
2746
2747
2747 def undofiles(self):
2748 def undofiles(self):
2748 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2749 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2749
2750
2750 @unfilteredmethod
2751 @unfilteredmethod
2751 def _writejournal(self, desc):
2752 def _writejournal(self, desc):
2752 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2753 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2753
2754
2754 def recover(self):
2755 def recover(self):
2755 with self.lock():
2756 with self.lock():
2756 if self.svfs.exists(b"journal"):
2757 if self.svfs.exists(b"journal"):
2757 self.ui.status(_(b"rolling back interrupted transaction\n"))
2758 self.ui.status(_(b"rolling back interrupted transaction\n"))
2758 vfsmap = self.vfs_map
2759 vfsmap = self.vfs_map
2759 transaction.rollback(
2760 transaction.rollback(
2760 self.svfs,
2761 self.svfs,
2761 vfsmap,
2762 vfsmap,
2762 b"journal",
2763 b"journal",
2763 self.ui.warn,
2764 self.ui.warn,
2764 checkambigfiles=_cachedfiles,
2765 checkambigfiles=_cachedfiles,
2765 )
2766 )
2766 self.invalidate()
2767 self.invalidate()
2767 return True
2768 return True
2768 else:
2769 else:
2769 self.ui.warn(_(b"no interrupted transaction available\n"))
2770 self.ui.warn(_(b"no interrupted transaction available\n"))
2770 return False
2771 return False
2771
2772
2772 def rollback(self, dryrun=False, force=False):
2773 def rollback(self, dryrun=False, force=False):
2773 wlock = lock = None
2774 wlock = lock = None
2774 try:
2775 try:
2775 wlock = self.wlock()
2776 wlock = self.wlock()
2776 lock = self.lock()
2777 lock = self.lock()
2777 if self.svfs.exists(b"undo"):
2778 if self.svfs.exists(b"undo"):
2778 return self._rollback(dryrun, force)
2779 return self._rollback(dryrun, force)
2779 else:
2780 else:
2780 self.ui.warn(_(b"no rollback information available\n"))
2781 self.ui.warn(_(b"no rollback information available\n"))
2781 return 1
2782 return 1
2782 finally:
2783 finally:
2783 release(lock, wlock)
2784 release(lock, wlock)
2784
2785
2785 @unfilteredmethod # Until we get smarter cache management
2786 @unfilteredmethod # Until we get smarter cache management
2786 def _rollback(self, dryrun, force):
2787 def _rollback(self, dryrun, force):
2787 ui = self.ui
2788 ui = self.ui
2788
2789
2789 parents = self.dirstate.parents()
2790 parents = self.dirstate.parents()
2790 try:
2791 try:
2791 args = self.vfs.read(b'undo.desc').splitlines()
2792 args = self.vfs.read(b'undo.desc').splitlines()
2792 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2793 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2793 if len(args) >= 3:
2794 if len(args) >= 3:
2794 detail = args[2]
2795 detail = args[2]
2795 oldtip = oldlen - 1
2796 oldtip = oldlen - 1
2796
2797
2797 if detail and ui.verbose:
2798 if detail and ui.verbose:
2798 msg = _(
2799 msg = _(
2799 b'repository tip rolled back to revision %d'
2800 b'repository tip rolled back to revision %d'
2800 b' (undo %s: %s)\n'
2801 b' (undo %s: %s)\n'
2801 ) % (oldtip, desc, detail)
2802 ) % (oldtip, desc, detail)
2802 else:
2803 else:
2803 msg = _(
2804 msg = _(
2804 b'repository tip rolled back to revision %d (undo %s)\n'
2805 b'repository tip rolled back to revision %d (undo %s)\n'
2805 ) % (oldtip, desc)
2806 ) % (oldtip, desc)
2806 parentgone = any(self[p].rev() > oldtip for p in parents)
2807 parentgone = any(self[p].rev() > oldtip for p in parents)
2807 except IOError:
2808 except IOError:
2808 msg = _(b'rolling back unknown transaction\n')
2809 msg = _(b'rolling back unknown transaction\n')
2809 desc = None
2810 desc = None
2810 parentgone = True
2811 parentgone = True
2811
2812
2812 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2813 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2813 raise error.Abort(
2814 raise error.Abort(
2814 _(
2815 _(
2815 b'rollback of last commit while not checked out '
2816 b'rollback of last commit while not checked out '
2816 b'may lose data'
2817 b'may lose data'
2817 ),
2818 ),
2818 hint=_(b'use -f to force'),
2819 hint=_(b'use -f to force'),
2819 )
2820 )
2820
2821
2821 ui.status(msg)
2822 ui.status(msg)
2822 if dryrun:
2823 if dryrun:
2823 return 0
2824 return 0
2824
2825
2825 self.destroying()
2826 self.destroying()
2826 vfsmap = self.vfs_map
2827 vfsmap = self.vfs_map
2827 skip_journal_pattern = None
2828 skip_journal_pattern = None
2828 if not parentgone:
2829 if not parentgone:
2829 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2830 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2830 transaction.rollback(
2831 transaction.rollback(
2831 self.svfs,
2832 self.svfs,
2832 vfsmap,
2833 vfsmap,
2833 b'undo',
2834 b'undo',
2834 ui.warn,
2835 ui.warn,
2835 checkambigfiles=_cachedfiles,
2836 checkambigfiles=_cachedfiles,
2836 skip_journal_pattern=skip_journal_pattern,
2837 skip_journal_pattern=skip_journal_pattern,
2837 )
2838 )
2838 self.invalidate()
2839 self.invalidate()
2839 self.dirstate.invalidate()
2840 self.dirstate.invalidate()
2840
2841
2841 if parentgone:
2842 if parentgone:
2842 # replace this with some explicit parent update in the future.
2843 # replace this with some explicit parent update in the future.
2843 has_node = self.changelog.index.has_node
2844 has_node = self.changelog.index.has_node
2844 if not all(has_node(p) for p in self.dirstate._pl):
2845 if not all(has_node(p) for p in self.dirstate._pl):
2845 # There was no dirstate to backup initially, we need to drop
2846 # There was no dirstate to backup initially, we need to drop
2846 # the existing one.
2847 # the existing one.
2847 with self.dirstate.changing_parents(self):
2848 with self.dirstate.changing_parents(self):
2848 self.dirstate.setparents(self.nullid)
2849 self.dirstate.setparents(self.nullid)
2849 self.dirstate.clear()
2850 self.dirstate.clear()
2850
2851
2851 parents = tuple([p.rev() for p in self[None].parents()])
2852 parents = tuple([p.rev() for p in self[None].parents()])
2852 if len(parents) > 1:
2853 if len(parents) > 1:
2853 ui.status(
2854 ui.status(
2854 _(
2855 _(
2855 b'working directory now based on '
2856 b'working directory now based on '
2856 b'revisions %d and %d\n'
2857 b'revisions %d and %d\n'
2857 )
2858 )
2858 % parents
2859 % parents
2859 )
2860 )
2860 else:
2861 else:
2861 ui.status(
2862 ui.status(
2862 _(b'working directory now based on revision %d\n') % parents
2863 _(b'working directory now based on revision %d\n') % parents
2863 )
2864 )
2864 mergestatemod.mergestate.clean(self)
2865 mergestatemod.mergestate.clean(self)
2865
2866
2866 # TODO: if we know which new heads may result from this rollback, pass
2867 # TODO: if we know which new heads may result from this rollback, pass
2867 # them to destroy(), which will prevent the branchhead cache from being
2868 # them to destroy(), which will prevent the branchhead cache from being
2868 # invalidated.
2869 # invalidated.
2869 self.destroyed()
2870 self.destroyed()
2870 return 0
2871 return 0
2871
2872
2872 def _buildcacheupdater(self, newtransaction):
2873 def _buildcacheupdater(self, newtransaction):
2873 """called during transaction to build the callback updating cache
2874 """called during transaction to build the callback updating cache
2874
2875
2875 Lives on the repository to help extension who might want to augment
2876 Lives on the repository to help extension who might want to augment
2876 this logic. For this purpose, the created transaction is passed to the
2877 this logic. For this purpose, the created transaction is passed to the
2877 method.
2878 method.
2878 """
2879 """
2879 # we must avoid cyclic reference between repo and transaction.
2880 # we must avoid cyclic reference between repo and transaction.
2880 reporef = weakref.ref(self)
2881 reporef = weakref.ref(self)
2881
2882
2882 def updater(tr):
2883 def updater(tr):
2883 repo = reporef()
2884 repo = reporef()
2884 assert repo is not None # help pytype
2885 assert repo is not None # help pytype
2885 repo.updatecaches(tr)
2886 repo.updatecaches(tr)
2886
2887
2887 return updater
2888 return updater
2888
2889
2889 @unfilteredmethod
2890 @unfilteredmethod
2890 def updatecaches(self, tr=None, full=False, caches=None):
2891 def updatecaches(self, tr=None, full=False, caches=None):
2891 """warm appropriate caches
2892 """warm appropriate caches
2892
2893
2893 If this function is called after a transaction closed. The transaction
2894 If this function is called after a transaction closed. The transaction
2894 will be available in the 'tr' argument. This can be used to selectively
2895 will be available in the 'tr' argument. This can be used to selectively
2895 update caches relevant to the changes in that transaction.
2896 update caches relevant to the changes in that transaction.
2896
2897
2897 If 'full' is set, make sure all caches the function knows about have
2898 If 'full' is set, make sure all caches the function knows about have
2898 up-to-date data. Even the ones usually loaded more lazily.
2899 up-to-date data. Even the ones usually loaded more lazily.
2899
2900
2900 The `full` argument can take a special "post-clone" value. In this case
2901 The `full` argument can take a special "post-clone" value. In this case
2901 the cache warming is made after a clone and of the slower cache might
2902 the cache warming is made after a clone and of the slower cache might
2902 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2903 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2903 as we plan for a cleaner way to deal with this for 5.9.
2904 as we plan for a cleaner way to deal with this for 5.9.
2904 """
2905 """
2905 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2906 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2906 # During strip, many caches are invalid but
2907 # During strip, many caches are invalid but
2907 # later call to `destroyed` will refresh them.
2908 # later call to `destroyed` will refresh them.
2908 return
2909 return
2909
2910
2910 unfi = self.unfiltered()
2911 unfi = self.unfiltered()
2911
2912
2912 if full:
2913 if full:
2913 msg = (
2914 msg = (
2914 "`full` argument for `repo.updatecaches` is deprecated\n"
2915 "`full` argument for `repo.updatecaches` is deprecated\n"
2915 "(use `caches=repository.CACHE_ALL` instead)"
2916 "(use `caches=repository.CACHE_ALL` instead)"
2916 )
2917 )
2917 self.ui.deprecwarn(msg, b"5.9")
2918 self.ui.deprecwarn(msg, b"5.9")
2918 caches = repository.CACHES_ALL
2919 caches = repository.CACHES_ALL
2919 if full == b"post-clone":
2920 if full == b"post-clone":
2920 caches = repository.CACHES_POST_CLONE
2921 caches = repository.CACHES_POST_CLONE
2921 caches = repository.CACHES_ALL
2922 caches = repository.CACHES_ALL
2922 elif caches is None:
2923 elif caches is None:
2923 caches = repository.CACHES_DEFAULT
2924 caches = repository.CACHES_DEFAULT
2924
2925
2925 if repository.CACHE_BRANCHMAP_SERVED in caches:
2926 if repository.CACHE_BRANCHMAP_SERVED in caches:
2926 if tr is None or tr.changes[b'origrepolen'] < len(self):
2927 if tr is None or tr.changes[b'origrepolen'] < len(self):
2927 # accessing the 'served' branchmap should refresh all the others,
2928 # accessing the 'served' branchmap should refresh all the others,
2928 self.ui.debug(b'updating the branch cache\n')
2929 self.ui.debug(b'updating the branch cache\n')
2929 self.filtered(b'served').branchmap()
2930 self.filtered(b'served').branchmap()
2930 self.filtered(b'served.hidden').branchmap()
2931 self.filtered(b'served.hidden').branchmap()
2931 # flush all possibly delayed write.
2932 # flush all possibly delayed write.
2932 self._branchcaches.write_delayed(self)
2933 self._branchcaches.write_delayed(self)
2933
2934
2934 if repository.CACHE_CHANGELOG_CACHE in caches:
2935 if repository.CACHE_CHANGELOG_CACHE in caches:
2935 self.changelog.update_caches(transaction=tr)
2936 self.changelog.update_caches(transaction=tr)
2936
2937
2937 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2938 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2938 self.manifestlog.update_caches(transaction=tr)
2939 self.manifestlog.update_caches(transaction=tr)
2939 for entry in self.store.walk():
2940 for entry in self.store.walk():
2940 if not entry.is_revlog:
2941 if not entry.is_revlog:
2941 continue
2942 continue
2942 if not entry.is_manifestlog:
2943 if not entry.is_manifestlog:
2943 continue
2944 continue
2944 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2945 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2945 if manifestrevlog is not None:
2946 if manifestrevlog is not None:
2946 manifestrevlog.update_caches(transaction=tr)
2947 manifestrevlog.update_caches(transaction=tr)
2947
2948
2948 if repository.CACHE_REV_BRANCH in caches:
2949 if repository.CACHE_REV_BRANCH in caches:
2949 rbc = unfi.revbranchcache()
2950 rbc = unfi.revbranchcache()
2950 for r in unfi.changelog:
2951 for r in unfi.changelog:
2951 rbc.branchinfo(r)
2952 rbc.branchinfo(r)
2952 rbc.write()
2953 rbc.write()
2953
2954
2954 if repository.CACHE_FULL_MANIFEST in caches:
2955 if repository.CACHE_FULL_MANIFEST in caches:
2955 # ensure the working copy parents are in the manifestfulltextcache
2956 # ensure the working copy parents are in the manifestfulltextcache
2956 for ctx in self[b'.'].parents():
2957 for ctx in self[b'.'].parents():
2957 ctx.manifest() # accessing the manifest is enough
2958 ctx.manifest() # accessing the manifest is enough
2958
2959
2959 if repository.CACHE_FILE_NODE_TAGS in caches:
2960 if repository.CACHE_FILE_NODE_TAGS in caches:
2960 # accessing fnode cache warms the cache
2961 # accessing fnode cache warms the cache
2961 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2962 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2962
2963
2963 if repository.CACHE_TAGS_DEFAULT in caches:
2964 if repository.CACHE_TAGS_DEFAULT in caches:
2964 # accessing tags warm the cache
2965 # accessing tags warm the cache
2965 self.tags()
2966 self.tags()
2966 if repository.CACHE_TAGS_SERVED in caches:
2967 if repository.CACHE_TAGS_SERVED in caches:
2967 self.filtered(b'served').tags()
2968 self.filtered(b'served').tags()
2968
2969
2969 if repository.CACHE_BRANCHMAP_ALL in caches:
2970 if repository.CACHE_BRANCHMAP_ALL in caches:
2970 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2971 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2971 # so we're forcing a write to cause these caches to be warmed up
2972 # so we're forcing a write to cause these caches to be warmed up
2972 # even if they haven't explicitly been requested yet (if they've
2973 # even if they haven't explicitly been requested yet (if they've
2973 # never been used by hg, they won't ever have been written, even if
2974 # never been used by hg, they won't ever have been written, even if
2974 # they're a subset of another kind of cache that *has* been used).
2975 # they're a subset of another kind of cache that *has* been used).
2975 for filt in repoview.filtertable.keys():
2976 for filt in repoview.filtertable.keys():
2976 filtered = self.filtered(filt)
2977 filtered = self.filtered(filt)
2977 filtered.branchmap().write(filtered)
2978 filtered.branchmap().write(filtered)
2978
2979
2979 def invalidatecaches(self):
2980 def invalidatecaches(self):
2980 if '_tagscache' in vars(self):
2981 if '_tagscache' in vars(self):
2981 # can't use delattr on proxy
2982 # can't use delattr on proxy
2982 del self.__dict__['_tagscache']
2983 del self.__dict__['_tagscache']
2983
2984
2984 self._branchcaches.clear()
2985 self._branchcaches.clear()
2985 self.invalidatevolatilesets()
2986 self.invalidatevolatilesets()
2986 self._sparsesignaturecache.clear()
2987 self._sparsesignaturecache.clear()
2987
2988
2988 def invalidatevolatilesets(self):
2989 def invalidatevolatilesets(self):
2989 self.filteredrevcache.clear()
2990 self.filteredrevcache.clear()
2990 obsolete.clearobscaches(self)
2991 obsolete.clearobscaches(self)
2991 self._quick_access_changeid_invalidate()
2992 self._quick_access_changeid_invalidate()
2992
2993
2993 def invalidatedirstate(self):
2994 def invalidatedirstate(self):
2994 """Invalidates the dirstate, causing the next call to dirstate
2995 """Invalidates the dirstate, causing the next call to dirstate
2995 to check if it was modified since the last time it was read,
2996 to check if it was modified since the last time it was read,
2996 rereading it if it has.
2997 rereading it if it has.
2997
2998
2998 This is different to dirstate.invalidate() that it doesn't always
2999 This is different to dirstate.invalidate() that it doesn't always
2999 rereads the dirstate. Use dirstate.invalidate() if you want to
3000 rereads the dirstate. Use dirstate.invalidate() if you want to
3000 explicitly read the dirstate again (i.e. restoring it to a previous
3001 explicitly read the dirstate again (i.e. restoring it to a previous
3001 known good state)."""
3002 known good state)."""
3002 unfi = self.unfiltered()
3003 unfi = self.unfiltered()
3003 if 'dirstate' in unfi.__dict__:
3004 if 'dirstate' in unfi.__dict__:
3004 assert not self.dirstate.is_changing_any
3005 assert not self.dirstate.is_changing_any
3005 del unfi.__dict__['dirstate']
3006 del unfi.__dict__['dirstate']
3006
3007
3007 def invalidate(self, clearfilecache=False):
3008 def invalidate(self, clearfilecache=False):
3008 """Invalidates both store and non-store parts other than dirstate
3009 """Invalidates both store and non-store parts other than dirstate
3009
3010
3010 If a transaction is running, invalidation of store is omitted,
3011 If a transaction is running, invalidation of store is omitted,
3011 because discarding in-memory changes might cause inconsistency
3012 because discarding in-memory changes might cause inconsistency
3012 (e.g. incomplete fncache causes unintentional failure, but
3013 (e.g. incomplete fncache causes unintentional failure, but
3013 redundant one doesn't).
3014 redundant one doesn't).
3014 """
3015 """
3015 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3016 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3016 for k in list(self._filecache.keys()):
3017 for k in list(self._filecache.keys()):
3017 if (
3018 if (
3018 k == b'changelog'
3019 k == b'changelog'
3019 and self.currenttransaction()
3020 and self.currenttransaction()
3020 and self.changelog._delayed
3021 and self.changelog._delayed
3021 ):
3022 ):
3022 # The changelog object may store unwritten revisions. We don't
3023 # The changelog object may store unwritten revisions. We don't
3023 # want to lose them.
3024 # want to lose them.
3024 # TODO: Solve the problem instead of working around it.
3025 # TODO: Solve the problem instead of working around it.
3025 continue
3026 continue
3026
3027
3027 if clearfilecache:
3028 if clearfilecache:
3028 del self._filecache[k]
3029 del self._filecache[k]
3029 try:
3030 try:
3030 delattr(unfiltered, k)
3031 delattr(unfiltered, k)
3031 except AttributeError:
3032 except AttributeError:
3032 pass
3033 pass
3033 self.invalidatecaches()
3034 self.invalidatecaches()
3034 if not self.currenttransaction():
3035 if not self.currenttransaction():
3035 # TODO: Changing contents of store outside transaction
3036 # TODO: Changing contents of store outside transaction
3036 # causes inconsistency. We should make in-memory store
3037 # causes inconsistency. We should make in-memory store
3037 # changes detectable, and abort if changed.
3038 # changes detectable, and abort if changed.
3038 self.store.invalidatecaches()
3039 self.store.invalidatecaches()
3039
3040
3040 def invalidateall(self):
3041 def invalidateall(self):
3041 """Fully invalidates both store and non-store parts, causing the
3042 """Fully invalidates both store and non-store parts, causing the
3042 subsequent operation to reread any outside changes."""
3043 subsequent operation to reread any outside changes."""
3043 # extension should hook this to invalidate its caches
3044 # extension should hook this to invalidate its caches
3044 self.invalidate()
3045 self.invalidate()
3045 self.invalidatedirstate()
3046 self.invalidatedirstate()
3046
3047
3047 @unfilteredmethod
3048 @unfilteredmethod
3048 def _refreshfilecachestats(self, tr):
3049 def _refreshfilecachestats(self, tr):
3049 """Reload stats of cached files so that they are flagged as valid"""
3050 """Reload stats of cached files so that they are flagged as valid"""
3050 for k, ce in self._filecache.items():
3051 for k, ce in self._filecache.items():
3051 k = pycompat.sysstr(k)
3052 k = pycompat.sysstr(k)
3052 if k == 'dirstate' or k not in self.__dict__:
3053 if k == 'dirstate' or k not in self.__dict__:
3053 continue
3054 continue
3054 ce.refresh()
3055 ce.refresh()
3055
3056
3056 def _lock(
3057 def _lock(
3057 self,
3058 self,
3058 vfs,
3059 vfs,
3059 lockname,
3060 lockname,
3060 wait,
3061 wait,
3061 releasefn,
3062 releasefn,
3062 acquirefn,
3063 acquirefn,
3063 desc,
3064 desc,
3064 ):
3065 ):
3065 timeout = 0
3066 timeout = 0
3066 warntimeout = 0
3067 warntimeout = 0
3067 if wait:
3068 if wait:
3068 timeout = self.ui.configint(b"ui", b"timeout")
3069 timeout = self.ui.configint(b"ui", b"timeout")
3069 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3070 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3070 # internal config: ui.signal-safe-lock
3071 # internal config: ui.signal-safe-lock
3071 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3072 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3072
3073
3073 l = lockmod.trylock(
3074 l = lockmod.trylock(
3074 self.ui,
3075 self.ui,
3075 vfs,
3076 vfs,
3076 lockname,
3077 lockname,
3077 timeout,
3078 timeout,
3078 warntimeout,
3079 warntimeout,
3079 releasefn=releasefn,
3080 releasefn=releasefn,
3080 acquirefn=acquirefn,
3081 acquirefn=acquirefn,
3081 desc=desc,
3082 desc=desc,
3082 signalsafe=signalsafe,
3083 signalsafe=signalsafe,
3083 )
3084 )
3084 return l
3085 return l
3085
3086
3086 def _afterlock(self, callback):
3087 def _afterlock(self, callback):
3087 """add a callback to be run when the repository is fully unlocked
3088 """add a callback to be run when the repository is fully unlocked
3088
3089
3089 The callback will be executed when the outermost lock is released
3090 The callback will be executed when the outermost lock is released
3090 (with wlock being higher level than 'lock')."""
3091 (with wlock being higher level than 'lock')."""
3091 for ref in (self._wlockref, self._lockref):
3092 for ref in (self._wlockref, self._lockref):
3092 l = ref and ref()
3093 l = ref and ref()
3093 if l and l.held:
3094 if l and l.held:
3094 l.postrelease.append(callback)
3095 l.postrelease.append(callback)
3095 break
3096 break
3096 else: # no lock have been found.
3097 else: # no lock have been found.
3097 callback(True)
3098 callback(True)
3098
3099
3099 def lock(self, wait=True):
3100 def lock(self, wait=True):
3100 """Lock the repository store (.hg/store) and return a weak reference
3101 """Lock the repository store (.hg/store) and return a weak reference
3101 to the lock. Use this before modifying the store (e.g. committing or
3102 to the lock. Use this before modifying the store (e.g. committing or
3102 stripping). If you are opening a transaction, get a lock as well.)
3103 stripping). If you are opening a transaction, get a lock as well.)
3103
3104
3104 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3105 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3105 'wlock' first to avoid a dead-lock hazard."""
3106 'wlock' first to avoid a dead-lock hazard."""
3106 l = self._currentlock(self._lockref)
3107 l = self._currentlock(self._lockref)
3107 if l is not None:
3108 if l is not None:
3108 l.lock()
3109 l.lock()
3109 return l
3110 return l
3110
3111
3111 l = self._lock(
3112 l = self._lock(
3112 vfs=self.svfs,
3113 vfs=self.svfs,
3113 lockname=b"lock",
3114 lockname=b"lock",
3114 wait=wait,
3115 wait=wait,
3115 releasefn=None,
3116 releasefn=None,
3116 acquirefn=self.invalidate,
3117 acquirefn=self.invalidate,
3117 desc=_(b'repository %s') % self.origroot,
3118 desc=_(b'repository %s') % self.origroot,
3118 )
3119 )
3119 self._lockref = weakref.ref(l)
3120 self._lockref = weakref.ref(l)
3120 return l
3121 return l
3121
3122
3122 def wlock(self, wait=True):
3123 def wlock(self, wait=True):
3123 """Lock the non-store parts of the repository (everything under
3124 """Lock the non-store parts of the repository (everything under
3124 .hg except .hg/store) and return a weak reference to the lock.
3125 .hg except .hg/store) and return a weak reference to the lock.
3125
3126
3126 Use this before modifying files in .hg.
3127 Use this before modifying files in .hg.
3127
3128
3128 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3129 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3129 'wlock' first to avoid a dead-lock hazard."""
3130 'wlock' first to avoid a dead-lock hazard."""
3130 l = self._wlockref() if self._wlockref else None
3131 l = self._wlockref() if self._wlockref else None
3131 if l is not None and l.held:
3132 if l is not None and l.held:
3132 l.lock()
3133 l.lock()
3133 return l
3134 return l
3134
3135
3135 # We do not need to check for non-waiting lock acquisition. Such
3136 # We do not need to check for non-waiting lock acquisition. Such
3136 # acquisition would not cause dead-lock as they would just fail.
3137 # acquisition would not cause dead-lock as they would just fail.
3137 if wait and (
3138 if wait and (
3138 self.ui.configbool(b'devel', b'all-warnings')
3139 self.ui.configbool(b'devel', b'all-warnings')
3139 or self.ui.configbool(b'devel', b'check-locks')
3140 or self.ui.configbool(b'devel', b'check-locks')
3140 ):
3141 ):
3141 if self._currentlock(self._lockref) is not None:
3142 if self._currentlock(self._lockref) is not None:
3142 self.ui.develwarn(b'"wlock" acquired after "lock"')
3143 self.ui.develwarn(b'"wlock" acquired after "lock"')
3143
3144
3144 def unlock():
3145 def unlock():
3145 if self.dirstate.is_changing_any:
3146 if self.dirstate.is_changing_any:
3146 msg = b"wlock release in the middle of a changing parents"
3147 msg = b"wlock release in the middle of a changing parents"
3147 self.ui.develwarn(msg)
3148 self.ui.develwarn(msg)
3148 self.dirstate.invalidate()
3149 self.dirstate.invalidate()
3149 else:
3150 else:
3150 if self.dirstate._dirty:
3151 if self.dirstate._dirty:
3151 msg = b"dirty dirstate on wlock release"
3152 msg = b"dirty dirstate on wlock release"
3152 self.ui.develwarn(msg)
3153 self.ui.develwarn(msg)
3153 self.dirstate.write(None)
3154 self.dirstate.write(None)
3154
3155
3155 unfi = self.unfiltered()
3156 unfi = self.unfiltered()
3156 if 'dirstate' in unfi.__dict__:
3157 if 'dirstate' in unfi.__dict__:
3157 del unfi.__dict__['dirstate']
3158 del unfi.__dict__['dirstate']
3158
3159
3159 l = self._lock(
3160 l = self._lock(
3160 self.vfs,
3161 self.vfs,
3161 b"wlock",
3162 b"wlock",
3162 wait,
3163 wait,
3163 unlock,
3164 unlock,
3164 self.invalidatedirstate,
3165 self.invalidatedirstate,
3165 _(b'working directory of %s') % self.origroot,
3166 _(b'working directory of %s') % self.origroot,
3166 )
3167 )
3167 self._wlockref = weakref.ref(l)
3168 self._wlockref = weakref.ref(l)
3168 return l
3169 return l
3169
3170
3170 def _currentlock(self, lockref):
3171 def _currentlock(self, lockref):
3171 """Returns the lock if it's held, or None if it's not."""
3172 """Returns the lock if it's held, or None if it's not."""
3172 if lockref is None:
3173 if lockref is None:
3173 return None
3174 return None
3174 l = lockref()
3175 l = lockref()
3175 if l is None or not l.held:
3176 if l is None or not l.held:
3176 return None
3177 return None
3177 return l
3178 return l
3178
3179
3179 def currentwlock(self):
3180 def currentwlock(self):
3180 """Returns the wlock if it's held, or None if it's not."""
3181 """Returns the wlock if it's held, or None if it's not."""
3181 return self._currentlock(self._wlockref)
3182 return self._currentlock(self._wlockref)
3182
3183
3183 def currentlock(self):
3184 def currentlock(self):
3184 """Returns the lock if it's held, or None if it's not."""
3185 """Returns the lock if it's held, or None if it's not."""
3185 return self._currentlock(self._lockref)
3186 return self._currentlock(self._lockref)
3186
3187
3187 def checkcommitpatterns(self, wctx, match, status, fail):
3188 def checkcommitpatterns(self, wctx, match, status, fail):
3188 """check for commit arguments that aren't committable"""
3189 """check for commit arguments that aren't committable"""
3189 if match.isexact() or match.prefix():
3190 if match.isexact() or match.prefix():
3190 matched = set(status.modified + status.added + status.removed)
3191 matched = set(status.modified + status.added + status.removed)
3191
3192
3192 for f in match.files():
3193 for f in match.files():
3193 f = self.dirstate.normalize(f)
3194 f = self.dirstate.normalize(f)
3194 if f == b'.' or f in matched or f in wctx.substate:
3195 if f == b'.' or f in matched or f in wctx.substate:
3195 continue
3196 continue
3196 if f in status.deleted:
3197 if f in status.deleted:
3197 fail(f, _(b'file not found!'))
3198 fail(f, _(b'file not found!'))
3198 # Is it a directory that exists or used to exist?
3199 # Is it a directory that exists or used to exist?
3199 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3200 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3200 d = f + b'/'
3201 d = f + b'/'
3201 for mf in matched:
3202 for mf in matched:
3202 if mf.startswith(d):
3203 if mf.startswith(d):
3203 break
3204 break
3204 else:
3205 else:
3205 fail(f, _(b"no match under directory!"))
3206 fail(f, _(b"no match under directory!"))
3206 elif f not in self.dirstate:
3207 elif f not in self.dirstate:
3207 fail(f, _(b"file not tracked!"))
3208 fail(f, _(b"file not tracked!"))
3208
3209
3209 @unfilteredmethod
3210 @unfilteredmethod
3210 def commit(
3211 def commit(
3211 self,
3212 self,
3212 text=b"",
3213 text=b"",
3213 user=None,
3214 user=None,
3214 date=None,
3215 date=None,
3215 match=None,
3216 match=None,
3216 force=False,
3217 force=False,
3217 editor=None,
3218 editor=None,
3218 extra=None,
3219 extra=None,
3219 ):
3220 ):
3220 """Add a new revision to current repository.
3221 """Add a new revision to current repository.
3221
3222
3222 Revision information is gathered from the working directory,
3223 Revision information is gathered from the working directory,
3223 match can be used to filter the committed files. If editor is
3224 match can be used to filter the committed files. If editor is
3224 supplied, it is called to get a commit message.
3225 supplied, it is called to get a commit message.
3225 """
3226 """
3226 if extra is None:
3227 if extra is None:
3227 extra = {}
3228 extra = {}
3228
3229
3229 def fail(f, msg):
3230 def fail(f, msg):
3230 raise error.InputError(b'%s: %s' % (f, msg))
3231 raise error.InputError(b'%s: %s' % (f, msg))
3231
3232
3232 if not match:
3233 if not match:
3233 match = matchmod.always()
3234 match = matchmod.always()
3234
3235
3235 if not force:
3236 if not force:
3236 match.bad = fail
3237 match.bad = fail
3237
3238
3238 # lock() for recent changelog (see issue4368)
3239 # lock() for recent changelog (see issue4368)
3239 with self.wlock(), self.lock():
3240 with self.wlock(), self.lock():
3240 wctx = self[None]
3241 wctx = self[None]
3241 merge = len(wctx.parents()) > 1
3242 merge = len(wctx.parents()) > 1
3242
3243
3243 if not force and merge and not match.always():
3244 if not force and merge and not match.always():
3244 raise error.Abort(
3245 raise error.Abort(
3245 _(
3246 _(
3246 b'cannot partially commit a merge '
3247 b'cannot partially commit a merge '
3247 b'(do not specify files or patterns)'
3248 b'(do not specify files or patterns)'
3248 )
3249 )
3249 )
3250 )
3250
3251
3251 status = self.status(match=match, clean=force)
3252 status = self.status(match=match, clean=force)
3252 if force:
3253 if force:
3253 status.modified.extend(
3254 status.modified.extend(
3254 status.clean
3255 status.clean
3255 ) # mq may commit clean files
3256 ) # mq may commit clean files
3256
3257
3257 # check subrepos
3258 # check subrepos
3258 subs, commitsubs, newstate = subrepoutil.precommit(
3259 subs, commitsubs, newstate = subrepoutil.precommit(
3259 self.ui, wctx, status, match, force=force
3260 self.ui, wctx, status, match, force=force
3260 )
3261 )
3261
3262
3262 # make sure all explicit patterns are matched
3263 # make sure all explicit patterns are matched
3263 if not force:
3264 if not force:
3264 self.checkcommitpatterns(wctx, match, status, fail)
3265 self.checkcommitpatterns(wctx, match, status, fail)
3265
3266
3266 cctx = context.workingcommitctx(
3267 cctx = context.workingcommitctx(
3267 self, status, text, user, date, extra
3268 self, status, text, user, date, extra
3268 )
3269 )
3269
3270
3270 ms = mergestatemod.mergestate.read(self)
3271 ms = mergestatemod.mergestate.read(self)
3271 mergeutil.checkunresolved(ms)
3272 mergeutil.checkunresolved(ms)
3272
3273
3273 # internal config: ui.allowemptycommit
3274 # internal config: ui.allowemptycommit
3274 if cctx.isempty() and not self.ui.configbool(
3275 if cctx.isempty() and not self.ui.configbool(
3275 b'ui', b'allowemptycommit'
3276 b'ui', b'allowemptycommit'
3276 ):
3277 ):
3277 self.ui.debug(b'nothing to commit, clearing merge state\n')
3278 self.ui.debug(b'nothing to commit, clearing merge state\n')
3278 ms.reset()
3279 ms.reset()
3279 return None
3280 return None
3280
3281
3281 if merge and cctx.deleted():
3282 if merge and cctx.deleted():
3282 raise error.Abort(_(b"cannot commit merge with missing files"))
3283 raise error.Abort(_(b"cannot commit merge with missing files"))
3283
3284
3284 if editor:
3285 if editor:
3285 cctx._text = editor(self, cctx, subs)
3286 cctx._text = editor(self, cctx, subs)
3286 edited = text != cctx._text
3287 edited = text != cctx._text
3287
3288
3288 # Save commit message in case this transaction gets rolled back
3289 # Save commit message in case this transaction gets rolled back
3289 # (e.g. by a pretxncommit hook). Leave the content alone on
3290 # (e.g. by a pretxncommit hook). Leave the content alone on
3290 # the assumption that the user will use the same editor again.
3291 # the assumption that the user will use the same editor again.
3291 msg_path = self.savecommitmessage(cctx._text)
3292 msg_path = self.savecommitmessage(cctx._text)
3292
3293
3293 # commit subs and write new state
3294 # commit subs and write new state
3294 if subs:
3295 if subs:
3295 uipathfn = scmutil.getuipathfn(self)
3296 uipathfn = scmutil.getuipathfn(self)
3296 for s in sorted(commitsubs):
3297 for s in sorted(commitsubs):
3297 sub = wctx.sub(s)
3298 sub = wctx.sub(s)
3298 self.ui.status(
3299 self.ui.status(
3299 _(b'committing subrepository %s\n')
3300 _(b'committing subrepository %s\n')
3300 % uipathfn(subrepoutil.subrelpath(sub))
3301 % uipathfn(subrepoutil.subrelpath(sub))
3301 )
3302 )
3302 sr = sub.commit(cctx._text, user, date)
3303 sr = sub.commit(cctx._text, user, date)
3303 newstate[s] = (newstate[s][0], sr)
3304 newstate[s] = (newstate[s][0], sr)
3304 subrepoutil.writestate(self, newstate)
3305 subrepoutil.writestate(self, newstate)
3305
3306
3306 p1, p2 = self.dirstate.parents()
3307 p1, p2 = self.dirstate.parents()
3307 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3308 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3308 try:
3309 try:
3309 self.hook(
3310 self.hook(
3310 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3311 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3311 )
3312 )
3312 with self.transaction(b'commit'):
3313 with self.transaction(b'commit'):
3313 ret = self.commitctx(cctx, True)
3314 ret = self.commitctx(cctx, True)
3314 # update bookmarks, dirstate and mergestate
3315 # update bookmarks, dirstate and mergestate
3315 bookmarks.update(self, [p1, p2], ret)
3316 bookmarks.update(self, [p1, p2], ret)
3316 cctx.markcommitted(ret)
3317 cctx.markcommitted(ret)
3317 ms.reset()
3318 ms.reset()
3318 except: # re-raises
3319 except: # re-raises
3319 if edited:
3320 if edited:
3320 self.ui.write(
3321 self.ui.write(
3321 _(b'note: commit message saved in %s\n') % msg_path
3322 _(b'note: commit message saved in %s\n') % msg_path
3322 )
3323 )
3323 self.ui.write(
3324 self.ui.write(
3324 _(
3325 _(
3325 b"note: use 'hg commit --logfile "
3326 b"note: use 'hg commit --logfile "
3326 b"%s --edit' to reuse it\n"
3327 b"%s --edit' to reuse it\n"
3327 )
3328 )
3328 % msg_path
3329 % msg_path
3329 )
3330 )
3330 raise
3331 raise
3331
3332
3332 def commithook(unused_success):
3333 def commithook(unused_success):
3333 # hack for command that use a temporary commit (eg: histedit)
3334 # hack for command that use a temporary commit (eg: histedit)
3334 # temporary commit got stripped before hook release
3335 # temporary commit got stripped before hook release
3335 if self.changelog.hasnode(ret):
3336 if self.changelog.hasnode(ret):
3336 self.hook(
3337 self.hook(
3337 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3338 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3338 )
3339 )
3339
3340
3340 self._afterlock(commithook)
3341 self._afterlock(commithook)
3341 return ret
3342 return ret
3342
3343
3343 @unfilteredmethod
3344 @unfilteredmethod
3344 def commitctx(self, ctx, error=False, origctx=None):
3345 def commitctx(self, ctx, error=False, origctx=None):
3345 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3346 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3346
3347
3347 @unfilteredmethod
3348 @unfilteredmethod
3348 def destroying(self):
3349 def destroying(self):
3349 """Inform the repository that nodes are about to be destroyed.
3350 """Inform the repository that nodes are about to be destroyed.
3350 Intended for use by strip and rollback, so there's a common
3351 Intended for use by strip and rollback, so there's a common
3351 place for anything that has to be done before destroying history.
3352 place for anything that has to be done before destroying history.
3352
3353
3353 This is mostly useful for saving state that is in memory and waiting
3354 This is mostly useful for saving state that is in memory and waiting
3354 to be flushed when the current lock is released. Because a call to
3355 to be flushed when the current lock is released. Because a call to
3355 destroyed is imminent, the repo will be invalidated causing those
3356 destroyed is imminent, the repo will be invalidated causing those
3356 changes to stay in memory (waiting for the next unlock), or vanish
3357 changes to stay in memory (waiting for the next unlock), or vanish
3357 completely.
3358 completely.
3358 """
3359 """
3359 # When using the same lock to commit and strip, the phasecache is left
3360 # When using the same lock to commit and strip, the phasecache is left
3360 # dirty after committing. Then when we strip, the repo is invalidated,
3361 # dirty after committing. Then when we strip, the repo is invalidated,
3361 # causing those changes to disappear.
3362 # causing those changes to disappear.
3362 if '_phasecache' in vars(self):
3363 if '_phasecache' in vars(self):
3363 self._phasecache.write()
3364 self._phasecache.write()
3364
3365
3365 @unfilteredmethod
3366 @unfilteredmethod
3366 def destroyed(self):
3367 def destroyed(self):
3367 """Inform the repository that nodes have been destroyed.
3368 """Inform the repository that nodes have been destroyed.
3368 Intended for use by strip and rollback, so there's a common
3369 Intended for use by strip and rollback, so there's a common
3369 place for anything that has to be done after destroying history.
3370 place for anything that has to be done after destroying history.
3370 """
3371 """
3371 # When one tries to:
3372 # When one tries to:
3372 # 1) destroy nodes thus calling this method (e.g. strip)
3373 # 1) destroy nodes thus calling this method (e.g. strip)
3373 # 2) use phasecache somewhere (e.g. commit)
3374 # 2) use phasecache somewhere (e.g. commit)
3374 #
3375 #
3375 # then 2) will fail because the phasecache contains nodes that were
3376 # then 2) will fail because the phasecache contains nodes that were
3376 # removed. We can either remove phasecache from the filecache,
3377 # removed. We can either remove phasecache from the filecache,
3377 # causing it to reload next time it is accessed, or simply filter
3378 # causing it to reload next time it is accessed, or simply filter
3378 # the removed nodes now and write the updated cache.
3379 # the removed nodes now and write the updated cache.
3379 self._phasecache.filterunknown(self)
3380 self._phasecache.filterunknown(self)
3380 self._phasecache.write()
3381 self._phasecache.write()
3381
3382
3382 # refresh all repository caches
3383 # refresh all repository caches
3383 self.updatecaches()
3384 self.updatecaches()
3384
3385
3385 # Ensure the persistent tag cache is updated. Doing it now
3386 # Ensure the persistent tag cache is updated. Doing it now
3386 # means that the tag cache only has to worry about destroyed
3387 # means that the tag cache only has to worry about destroyed
3387 # heads immediately after a strip/rollback. That in turn
3388 # heads immediately after a strip/rollback. That in turn
3388 # guarantees that "cachetip == currenttip" (comparing both rev
3389 # guarantees that "cachetip == currenttip" (comparing both rev
3389 # and node) always means no nodes have been added or destroyed.
3390 # and node) always means no nodes have been added or destroyed.
3390
3391
3391 # XXX this is suboptimal when qrefresh'ing: we strip the current
3392 # XXX this is suboptimal when qrefresh'ing: we strip the current
3392 # head, refresh the tag cache, then immediately add a new head.
3393 # head, refresh the tag cache, then immediately add a new head.
3393 # But I think doing it this way is necessary for the "instant
3394 # But I think doing it this way is necessary for the "instant
3394 # tag cache retrieval" case to work.
3395 # tag cache retrieval" case to work.
3395 self.invalidate()
3396 self.invalidate()
3396
3397
3397 def status(
3398 def status(
3398 self,
3399 self,
3399 node1=b'.',
3400 node1=b'.',
3400 node2=None,
3401 node2=None,
3401 match=None,
3402 match=None,
3402 ignored=False,
3403 ignored=False,
3403 clean=False,
3404 clean=False,
3404 unknown=False,
3405 unknown=False,
3405 listsubrepos=False,
3406 listsubrepos=False,
3406 ):
3407 ):
3407 '''a convenience method that calls node1.status(node2)'''
3408 '''a convenience method that calls node1.status(node2)'''
3408 return self[node1].status(
3409 return self[node1].status(
3409 node2, match, ignored, clean, unknown, listsubrepos
3410 node2, match, ignored, clean, unknown, listsubrepos
3410 )
3411 )
3411
3412
3412 def addpostdsstatus(self, ps):
3413 def addpostdsstatus(self, ps):
3413 """Add a callback to run within the wlock, at the point at which status
3414 """Add a callback to run within the wlock, at the point at which status
3414 fixups happen.
3415 fixups happen.
3415
3416
3416 On status completion, callback(wctx, status) will be called with the
3417 On status completion, callback(wctx, status) will be called with the
3417 wlock held, unless the dirstate has changed from underneath or the wlock
3418 wlock held, unless the dirstate has changed from underneath or the wlock
3418 couldn't be grabbed.
3419 couldn't be grabbed.
3419
3420
3420 Callbacks should not capture and use a cached copy of the dirstate --
3421 Callbacks should not capture and use a cached copy of the dirstate --
3421 it might change in the meanwhile. Instead, they should access the
3422 it might change in the meanwhile. Instead, they should access the
3422 dirstate via wctx.repo().dirstate.
3423 dirstate via wctx.repo().dirstate.
3423
3424
3424 This list is emptied out after each status run -- extensions should
3425 This list is emptied out after each status run -- extensions should
3425 make sure it adds to this list each time dirstate.status is called.
3426 make sure it adds to this list each time dirstate.status is called.
3426 Extensions should also make sure they don't call this for statuses
3427 Extensions should also make sure they don't call this for statuses
3427 that don't involve the dirstate.
3428 that don't involve the dirstate.
3428 """
3429 """
3429
3430
3430 # The list is located here for uniqueness reasons -- it is actually
3431 # The list is located here for uniqueness reasons -- it is actually
3431 # managed by the workingctx, but that isn't unique per-repo.
3432 # managed by the workingctx, but that isn't unique per-repo.
3432 self._postdsstatus.append(ps)
3433 self._postdsstatus.append(ps)
3433
3434
3434 def postdsstatus(self):
3435 def postdsstatus(self):
3435 """Used by workingctx to get the list of post-dirstate-status hooks."""
3436 """Used by workingctx to get the list of post-dirstate-status hooks."""
3436 return self._postdsstatus
3437 return self._postdsstatus
3437
3438
3438 def clearpostdsstatus(self):
3439 def clearpostdsstatus(self):
3439 """Used by workingctx to clear post-dirstate-status hooks."""
3440 """Used by workingctx to clear post-dirstate-status hooks."""
3440 del self._postdsstatus[:]
3441 del self._postdsstatus[:]
3441
3442
3442 def heads(self, start=None):
3443 def heads(self, start=None):
3443 if start is None:
3444 if start is None:
3444 cl = self.changelog
3445 cl = self.changelog
3445 headrevs = reversed(cl.headrevs())
3446 headrevs = reversed(cl.headrevs())
3446 return [cl.node(rev) for rev in headrevs]
3447 return [cl.node(rev) for rev in headrevs]
3447
3448
3448 heads = self.changelog.heads(start)
3449 heads = self.changelog.heads(start)
3449 # sort the output in rev descending order
3450 # sort the output in rev descending order
3450 return sorted(heads, key=self.changelog.rev, reverse=True)
3451 return sorted(heads, key=self.changelog.rev, reverse=True)
3451
3452
3452 def branchheads(self, branch=None, start=None, closed=False):
3453 def branchheads(self, branch=None, start=None, closed=False):
3453 """return a (possibly filtered) list of heads for the given branch
3454 """return a (possibly filtered) list of heads for the given branch
3454
3455
3455 Heads are returned in topological order, from newest to oldest.
3456 Heads are returned in topological order, from newest to oldest.
3456 If branch is None, use the dirstate branch.
3457 If branch is None, use the dirstate branch.
3457 If start is not None, return only heads reachable from start.
3458 If start is not None, return only heads reachable from start.
3458 If closed is True, return heads that are marked as closed as well.
3459 If closed is True, return heads that are marked as closed as well.
3459 """
3460 """
3460 if branch is None:
3461 if branch is None:
3461 branch = self[None].branch()
3462 branch = self[None].branch()
3462 branches = self.branchmap()
3463 branches = self.branchmap()
3463 if not branches.hasbranch(branch):
3464 if not branches.hasbranch(branch):
3464 return []
3465 return []
3465 # the cache returns heads ordered lowest to highest
3466 # the cache returns heads ordered lowest to highest
3466 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3467 if start is not None:
3468 if start is not None:
3468 # filter out the heads that cannot be reached from startrev
3469 # filter out the heads that cannot be reached from startrev
3469 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3470 bheads = [h for h in bheads if h in fbheads]
3471 bheads = [h for h in bheads if h in fbheads]
3471 return bheads
3472 return bheads
3472
3473
3473 def branches(self, nodes):
3474 def branches(self, nodes):
3474 if not nodes:
3475 if not nodes:
3475 nodes = [self.changelog.tip()]
3476 nodes = [self.changelog.tip()]
3476 b = []
3477 b = []
3477 for n in nodes:
3478 for n in nodes:
3478 t = n
3479 t = n
3479 while True:
3480 while True:
3480 p = self.changelog.parents(n)
3481 p = self.changelog.parents(n)
3481 if p[1] != self.nullid or p[0] == self.nullid:
3482 if p[1] != self.nullid or p[0] == self.nullid:
3482 b.append((t, n, p[0], p[1]))
3483 b.append((t, n, p[0], p[1]))
3483 break
3484 break
3484 n = p[0]
3485 n = p[0]
3485 return b
3486 return b
3486
3487
3487 def between(self, pairs):
3488 def between(self, pairs):
3488 r = []
3489 r = []
3489
3490
3490 for top, bottom in pairs:
3491 for top, bottom in pairs:
3491 n, l, i = top, [], 0
3492 n, l, i = top, [], 0
3492 f = 1
3493 f = 1
3493
3494
3494 while n != bottom and n != self.nullid:
3495 while n != bottom and n != self.nullid:
3495 p = self.changelog.parents(n)[0]
3496 p = self.changelog.parents(n)[0]
3496 if i == f:
3497 if i == f:
3497 l.append(n)
3498 l.append(n)
3498 f = f * 2
3499 f = f * 2
3499 n = p
3500 n = p
3500 i += 1
3501 i += 1
3501
3502
3502 r.append(l)
3503 r.append(l)
3503
3504
3504 return r
3505 return r
3505
3506
3506 def checkpush(self, pushop):
3507 def checkpush(self, pushop):
3507 """Extensions can override this function if additional checks have
3508 """Extensions can override this function if additional checks have
3508 to be performed before pushing, or call it if they override push
3509 to be performed before pushing, or call it if they override push
3509 command.
3510 command.
3510 """
3511 """
3511
3512
3512 @unfilteredpropertycache
3513 @unfilteredpropertycache
3513 def prepushoutgoinghooks(self):
3514 def prepushoutgoinghooks(self):
3514 """Return util.hooks consists of a pushop with repo, remote, outgoing
3515 """Return util.hooks consists of a pushop with repo, remote, outgoing
3515 methods, which are called before pushing changesets.
3516 methods, which are called before pushing changesets.
3516 """
3517 """
3517 return util.hooks()
3518 return util.hooks()
3518
3519
3519 def pushkey(self, namespace, key, old, new):
3520 def pushkey(self, namespace, key, old, new):
3520 try:
3521 try:
3521 tr = self.currenttransaction()
3522 tr = self.currenttransaction()
3522 hookargs = {}
3523 hookargs = {}
3523 if tr is not None:
3524 if tr is not None:
3524 hookargs.update(tr.hookargs)
3525 hookargs.update(tr.hookargs)
3525 hookargs = pycompat.strkwargs(hookargs)
3526 hookargs = pycompat.strkwargs(hookargs)
3526 hookargs['namespace'] = namespace
3527 hookargs['namespace'] = namespace
3527 hookargs['key'] = key
3528 hookargs['key'] = key
3528 hookargs['old'] = old
3529 hookargs['old'] = old
3529 hookargs['new'] = new
3530 hookargs['new'] = new
3530 self.hook(b'prepushkey', throw=True, **hookargs)
3531 self.hook(b'prepushkey', throw=True, **hookargs)
3531 except error.HookAbort as exc:
3532 except error.HookAbort as exc:
3532 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3533 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3533 if exc.hint:
3534 if exc.hint:
3534 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3535 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3535 return False
3536 return False
3536 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3537 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3537 ret = pushkey.push(self, namespace, key, old, new)
3538 ret = pushkey.push(self, namespace, key, old, new)
3538
3539
3539 def runhook(unused_success):
3540 def runhook(unused_success):
3540 self.hook(
3541 self.hook(
3541 b'pushkey',
3542 b'pushkey',
3542 namespace=namespace,
3543 namespace=namespace,
3543 key=key,
3544 key=key,
3544 old=old,
3545 old=old,
3545 new=new,
3546 new=new,
3546 ret=ret,
3547 ret=ret,
3547 )
3548 )
3548
3549
3549 self._afterlock(runhook)
3550 self._afterlock(runhook)
3550 return ret
3551 return ret
3551
3552
3552 def listkeys(self, namespace):
3553 def listkeys(self, namespace):
3553 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3554 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3554 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3555 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3555 values = pushkey.list(self, namespace)
3556 values = pushkey.list(self, namespace)
3556 self.hook(b'listkeys', namespace=namespace, values=values)
3557 self.hook(b'listkeys', namespace=namespace, values=values)
3557 return values
3558 return values
3558
3559
3559 def debugwireargs(self, one, two, three=None, four=None, five=None):
3560 def debugwireargs(self, one, two, three=None, four=None, five=None):
3560 '''used to test argument passing over the wire'''
3561 '''used to test argument passing over the wire'''
3561 return b"%s %s %s %s %s" % (
3562 return b"%s %s %s %s %s" % (
3562 one,
3563 one,
3563 two,
3564 two,
3564 pycompat.bytestr(three),
3565 pycompat.bytestr(three),
3565 pycompat.bytestr(four),
3566 pycompat.bytestr(four),
3566 pycompat.bytestr(five),
3567 pycompat.bytestr(five),
3567 )
3568 )
3568
3569
3569 def savecommitmessage(self, text):
3570 def savecommitmessage(self, text):
3570 fp = self.vfs(b'last-message.txt', b'wb')
3571 fp = self.vfs(b'last-message.txt', b'wb')
3571 try:
3572 try:
3572 fp.write(text)
3573 fp.write(text)
3573 finally:
3574 finally:
3574 fp.close()
3575 fp.close()
3575 return self.pathto(fp.name[len(self.root) + 1 :])
3576 return self.pathto(fp.name[len(self.root) + 1 :])
3576
3577
3577 def register_wanted_sidedata(self, category):
3578 def register_wanted_sidedata(self, category):
3578 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3579 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3579 # Only revlogv2 repos can want sidedata.
3580 # Only revlogv2 repos can want sidedata.
3580 return
3581 return
3581 self._wanted_sidedata.add(pycompat.bytestr(category))
3582 self._wanted_sidedata.add(pycompat.bytestr(category))
3582
3583
3583 def register_sidedata_computer(
3584 def register_sidedata_computer(
3584 self, kind, category, keys, computer, flags, replace=False
3585 self, kind, category, keys, computer, flags, replace=False
3585 ):
3586 ):
3586 if kind not in revlogconst.ALL_KINDS:
3587 if kind not in revlogconst.ALL_KINDS:
3587 msg = _(b"unexpected revlog kind '%s'.")
3588 msg = _(b"unexpected revlog kind '%s'.")
3588 raise error.ProgrammingError(msg % kind)
3589 raise error.ProgrammingError(msg % kind)
3589 category = pycompat.bytestr(category)
3590 category = pycompat.bytestr(category)
3590 already_registered = category in self._sidedata_computers.get(kind, [])
3591 already_registered = category in self._sidedata_computers.get(kind, [])
3591 if already_registered and not replace:
3592 if already_registered and not replace:
3592 msg = _(
3593 msg = _(
3593 b"cannot register a sidedata computer twice for category '%s'."
3594 b"cannot register a sidedata computer twice for category '%s'."
3594 )
3595 )
3595 raise error.ProgrammingError(msg % category)
3596 raise error.ProgrammingError(msg % category)
3596 if replace and not already_registered:
3597 if replace and not already_registered:
3597 msg = _(
3598 msg = _(
3598 b"cannot replace a sidedata computer that isn't registered "
3599 b"cannot replace a sidedata computer that isn't registered "
3599 b"for category '%s'."
3600 b"for category '%s'."
3600 )
3601 )
3601 raise error.ProgrammingError(msg % category)
3602 raise error.ProgrammingError(msg % category)
3602 self._sidedata_computers.setdefault(kind, {})
3603 self._sidedata_computers.setdefault(kind, {})
3603 self._sidedata_computers[kind][category] = (keys, computer, flags)
3604 self._sidedata_computers[kind][category] = (keys, computer, flags)
3604
3605
3605
3606
3606 def undoname(fn: bytes) -> bytes:
3607 def undoname(fn: bytes) -> bytes:
3607 base, name = os.path.split(fn)
3608 base, name = os.path.split(fn)
3608 assert name.startswith(b'journal')
3609 assert name.startswith(b'journal')
3609 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3610 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3610
3611
3611
3612
3612 def instance(ui, path: bytes, create, intents=None, createopts=None):
3613 def instance(ui, path: bytes, create, intents=None, createopts=None):
3613 # prevent cyclic import localrepo -> upgrade -> localrepo
3614 # prevent cyclic import localrepo -> upgrade -> localrepo
3614 from . import upgrade
3615 from . import upgrade
3615
3616
3616 localpath = urlutil.urllocalpath(path)
3617 localpath = urlutil.urllocalpath(path)
3617 if create:
3618 if create:
3618 createrepository(ui, localpath, createopts=createopts)
3619 createrepository(ui, localpath, createopts=createopts)
3619
3620
3620 def repo_maker():
3621 def repo_maker():
3621 return makelocalrepository(ui, localpath, intents=intents)
3622 return makelocalrepository(ui, localpath, intents=intents)
3622
3623
3623 repo = repo_maker()
3624 repo = repo_maker()
3624 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3625 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3625 return repo
3626 return repo
3626
3627
3627
3628
3628 def islocal(path: bytes) -> bool:
3629 def islocal(path: bytes) -> bool:
3629 return True
3630 return True
3630
3631
3631
3632
3632 def defaultcreateopts(ui, createopts=None):
3633 def defaultcreateopts(ui, createopts=None):
3633 """Populate the default creation options for a repository.
3634 """Populate the default creation options for a repository.
3634
3635
3635 A dictionary of explicitly requested creation options can be passed
3636 A dictionary of explicitly requested creation options can be passed
3636 in. Missing keys will be populated.
3637 in. Missing keys will be populated.
3637 """
3638 """
3638 createopts = dict(createopts or {})
3639 createopts = dict(createopts or {})
3639
3640
3640 if b'backend' not in createopts:
3641 if b'backend' not in createopts:
3641 # experimental config: storage.new-repo-backend
3642 # experimental config: storage.new-repo-backend
3642 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3643 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3643
3644
3644 return createopts
3645 return createopts
3645
3646
3646
3647
3647 def clone_requirements(ui, createopts, srcrepo):
3648 def clone_requirements(ui, createopts, srcrepo):
3648 """clone the requirements of a local repo for a local clone
3649 """clone the requirements of a local repo for a local clone
3649
3650
3650 The store requirements are unchanged while the working copy requirements
3651 The store requirements are unchanged while the working copy requirements
3651 depends on the configuration
3652 depends on the configuration
3652 """
3653 """
3653 target_requirements = set()
3654 target_requirements = set()
3654 if not srcrepo.requirements:
3655 if not srcrepo.requirements:
3655 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3656 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3656 # with it.
3657 # with it.
3657 return target_requirements
3658 return target_requirements
3658 createopts = defaultcreateopts(ui, createopts=createopts)
3659 createopts = defaultcreateopts(ui, createopts=createopts)
3659 for r in newreporequirements(ui, createopts):
3660 for r in newreporequirements(ui, createopts):
3660 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3661 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3661 target_requirements.add(r)
3662 target_requirements.add(r)
3662
3663
3663 for r in srcrepo.requirements:
3664 for r in srcrepo.requirements:
3664 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3665 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3665 target_requirements.add(r)
3666 target_requirements.add(r)
3666 return target_requirements
3667 return target_requirements
3667
3668
3668
3669
3669 def newreporequirements(ui, createopts):
3670 def newreporequirements(ui, createopts):
3670 """Determine the set of requirements for a new local repository.
3671 """Determine the set of requirements for a new local repository.
3671
3672
3672 Extensions can wrap this function to specify custom requirements for
3673 Extensions can wrap this function to specify custom requirements for
3673 new repositories.
3674 new repositories.
3674 """
3675 """
3675
3676
3676 if b'backend' not in createopts:
3677 if b'backend' not in createopts:
3677 raise error.ProgrammingError(
3678 raise error.ProgrammingError(
3678 b'backend key not present in createopts; '
3679 b'backend key not present in createopts; '
3679 b'was defaultcreateopts() called?'
3680 b'was defaultcreateopts() called?'
3680 )
3681 )
3681
3682
3682 if createopts[b'backend'] != b'revlogv1':
3683 if createopts[b'backend'] != b'revlogv1':
3683 raise error.Abort(
3684 raise error.Abort(
3684 _(
3685 _(
3685 b'unable to determine repository requirements for '
3686 b'unable to determine repository requirements for '
3686 b'storage backend: %s'
3687 b'storage backend: %s'
3687 )
3688 )
3688 % createopts[b'backend']
3689 % createopts[b'backend']
3689 )
3690 )
3690
3691
3691 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3692 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3692 if ui.configbool(b'format', b'usestore'):
3693 if ui.configbool(b'format', b'usestore'):
3693 requirements.add(requirementsmod.STORE_REQUIREMENT)
3694 requirements.add(requirementsmod.STORE_REQUIREMENT)
3694 if ui.configbool(b'format', b'usefncache'):
3695 if ui.configbool(b'format', b'usefncache'):
3695 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3696 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3696 if ui.configbool(b'format', b'dotencode'):
3697 if ui.configbool(b'format', b'dotencode'):
3697 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3698 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3698
3699
3699 compengines = ui.configlist(b'format', b'revlog-compression')
3700 compengines = ui.configlist(b'format', b'revlog-compression')
3700 for compengine in compengines:
3701 for compengine in compengines:
3701 if compengine in util.compengines:
3702 if compengine in util.compengines:
3702 engine = util.compengines[compengine]
3703 engine = util.compengines[compengine]
3703 if engine.available() and engine.revlogheader():
3704 if engine.available() and engine.revlogheader():
3704 break
3705 break
3705 else:
3706 else:
3706 raise error.Abort(
3707 raise error.Abort(
3707 _(
3708 _(
3708 b'compression engines %s defined by '
3709 b'compression engines %s defined by '
3709 b'format.revlog-compression not available'
3710 b'format.revlog-compression not available'
3710 )
3711 )
3711 % b', '.join(b'"%s"' % e for e in compengines),
3712 % b', '.join(b'"%s"' % e for e in compengines),
3712 hint=_(
3713 hint=_(
3713 b'run "hg debuginstall" to list available '
3714 b'run "hg debuginstall" to list available '
3714 b'compression engines'
3715 b'compression engines'
3715 ),
3716 ),
3716 )
3717 )
3717
3718
3718 # zlib is the historical default and doesn't need an explicit requirement.
3719 # zlib is the historical default and doesn't need an explicit requirement.
3719 if compengine == b'zstd':
3720 if compengine == b'zstd':
3720 requirements.add(b'revlog-compression-zstd')
3721 requirements.add(b'revlog-compression-zstd')
3721 elif compengine != b'zlib':
3722 elif compengine != b'zlib':
3722 requirements.add(b'exp-compression-%s' % compengine)
3723 requirements.add(b'exp-compression-%s' % compengine)
3723
3724
3724 if scmutil.gdinitconfig(ui):
3725 if scmutil.gdinitconfig(ui):
3725 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3726 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3726 if ui.configbool(b'format', b'sparse-revlog'):
3727 if ui.configbool(b'format', b'sparse-revlog'):
3727 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3728 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3728
3729
3729 # experimental config: format.use-dirstate-v2
3730 # experimental config: format.use-dirstate-v2
3730 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3731 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3731 if ui.configbool(b'format', b'use-dirstate-v2'):
3732 if ui.configbool(b'format', b'use-dirstate-v2'):
3732 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3733 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3733
3734
3734 # experimental config: format.exp-use-copies-side-data-changeset
3735 # experimental config: format.exp-use-copies-side-data-changeset
3735 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3736 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3736 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3737 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3737 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3738 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3738 if ui.configbool(b'experimental', b'treemanifest'):
3739 if ui.configbool(b'experimental', b'treemanifest'):
3739 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3740 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3740
3741
3741 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3742 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3742 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3743 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3743 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3744 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3744
3745
3745 revlogv2 = ui.config(b'experimental', b'revlogv2')
3746 revlogv2 = ui.config(b'experimental', b'revlogv2')
3746 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3747 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3747 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3748 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3748 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3749 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3749 # experimental config: format.internal-phase
3750 # experimental config: format.internal-phase
3750 if ui.configbool(b'format', b'use-internal-phase'):
3751 if ui.configbool(b'format', b'use-internal-phase'):
3751 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3752 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3752
3753
3753 # experimental config: format.exp-archived-phase
3754 # experimental config: format.exp-archived-phase
3754 if ui.configbool(b'format', b'exp-archived-phase'):
3755 if ui.configbool(b'format', b'exp-archived-phase'):
3755 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3756 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3756
3757
3757 if createopts.get(b'narrowfiles'):
3758 if createopts.get(b'narrowfiles'):
3758 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3759 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3759
3760
3760 if createopts.get(b'lfs'):
3761 if createopts.get(b'lfs'):
3761 requirements.add(b'lfs')
3762 requirements.add(b'lfs')
3762
3763
3763 if ui.configbool(b'format', b'bookmarks-in-store'):
3764 if ui.configbool(b'format', b'bookmarks-in-store'):
3764 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3765 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3765
3766
3766 if ui.configbool(b'format', b'use-persistent-nodemap'):
3767 # The feature is disabled unless a fast implementation is available.
3768 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 if ui.configbool(
3770 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 ):
3767 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3772 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3768
3773
3769 # if share-safe is enabled, let's create the new repository with the new
3774 # if share-safe is enabled, let's create the new repository with the new
3770 # requirement
3775 # requirement
3771 if ui.configbool(b'format', b'use-share-safe'):
3776 if ui.configbool(b'format', b'use-share-safe'):
3772 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3777 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3773
3778
3774 # if we are creating a share-repo¹ we have to handle requirement
3779 # if we are creating a share-repo¹ we have to handle requirement
3775 # differently.
3780 # differently.
3776 #
3781 #
3777 # [1] (i.e. reusing the store from another repository, just having a
3782 # [1] (i.e. reusing the store from another repository, just having a
3778 # working copy)
3783 # working copy)
3779 if b'sharedrepo' in createopts:
3784 if b'sharedrepo' in createopts:
3780 source_requirements = set(createopts[b'sharedrepo'].requirements)
3785 source_requirements = set(createopts[b'sharedrepo'].requirements)
3781
3786
3782 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3787 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3783 # share to an old school repository, we have to copy the
3788 # share to an old school repository, we have to copy the
3784 # requirements and hope for the best.
3789 # requirements and hope for the best.
3785 requirements = source_requirements
3790 requirements = source_requirements
3786 else:
3791 else:
3787 # We have control on the working copy only, so "copy" the non
3792 # We have control on the working copy only, so "copy" the non
3788 # working copy part over, ignoring previous logic.
3793 # working copy part over, ignoring previous logic.
3789 to_drop = set()
3794 to_drop = set()
3790 for req in requirements:
3795 for req in requirements:
3791 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3796 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3792 continue
3797 continue
3793 if req in source_requirements:
3798 if req in source_requirements:
3794 continue
3799 continue
3795 to_drop.add(req)
3800 to_drop.add(req)
3796 requirements -= to_drop
3801 requirements -= to_drop
3797 requirements |= source_requirements
3802 requirements |= source_requirements
3798
3803
3799 if createopts.get(b'sharedrelative'):
3804 if createopts.get(b'sharedrelative'):
3800 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3805 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3801 else:
3806 else:
3802 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3807 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3803
3808
3804 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3809 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3805 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3810 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3806 msg = _(b"ignoring unknown tracked key version: %d\n")
3811 msg = _(b"ignoring unknown tracked key version: %d\n")
3807 hint = _(
3812 hint = _(
3808 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3813 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3809 )
3814 )
3810 if version != 1:
3815 if version != 1:
3811 ui.warn(msg % version, hint=hint)
3816 ui.warn(msg % version, hint=hint)
3812 else:
3817 else:
3813 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3818 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3814
3819
3815 return requirements
3820 return requirements
3816
3821
3817
3822
3818 def checkrequirementscompat(ui, requirements):
3823 def checkrequirementscompat(ui, requirements):
3819 """Checks compatibility of repository requirements enabled and disabled.
3824 """Checks compatibility of repository requirements enabled and disabled.
3820
3825
3821 Returns a set of requirements which needs to be dropped because dependend
3826 Returns a set of requirements which needs to be dropped because dependend
3822 requirements are not enabled. Also warns users about it"""
3827 requirements are not enabled. Also warns users about it"""
3823
3828
3824 dropped = set()
3829 dropped = set()
3825
3830
3826 if requirementsmod.STORE_REQUIREMENT not in requirements:
3831 if requirementsmod.STORE_REQUIREMENT not in requirements:
3827 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3832 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3828 ui.warn(
3833 ui.warn(
3829 _(
3834 _(
3830 b'ignoring enabled \'format.bookmarks-in-store\' config '
3835 b'ignoring enabled \'format.bookmarks-in-store\' config '
3831 b'beacuse it is incompatible with disabled '
3836 b'beacuse it is incompatible with disabled '
3832 b'\'format.usestore\' config\n'
3837 b'\'format.usestore\' config\n'
3833 )
3838 )
3834 )
3839 )
3835 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3840 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3836
3841
3837 if (
3842 if (
3838 requirementsmod.SHARED_REQUIREMENT in requirements
3843 requirementsmod.SHARED_REQUIREMENT in requirements
3839 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3844 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3840 ):
3845 ):
3841 raise error.Abort(
3846 raise error.Abort(
3842 _(
3847 _(
3843 b"cannot create shared repository as source was created"
3848 b"cannot create shared repository as source was created"
3844 b" with 'format.usestore' config disabled"
3849 b" with 'format.usestore' config disabled"
3845 )
3850 )
3846 )
3851 )
3847
3852
3848 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3853 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3849 if ui.hasconfig(b'format', b'use-share-safe'):
3854 if ui.hasconfig(b'format', b'use-share-safe'):
3850 msg = _(
3855 msg = _(
3851 b"ignoring enabled 'format.use-share-safe' config because "
3856 b"ignoring enabled 'format.use-share-safe' config because "
3852 b"it is incompatible with disabled 'format.usestore'"
3857 b"it is incompatible with disabled 'format.usestore'"
3853 b" config\n"
3858 b" config\n"
3854 )
3859 )
3855 ui.warn(msg)
3860 ui.warn(msg)
3856 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3861 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3857
3862
3858 return dropped
3863 return dropped
3859
3864
3860
3865
3861 def filterknowncreateopts(ui, createopts):
3866 def filterknowncreateopts(ui, createopts):
3862 """Filters a dict of repo creation options against options that are known.
3867 """Filters a dict of repo creation options against options that are known.
3863
3868
3864 Receives a dict of repo creation options and returns a dict of those
3869 Receives a dict of repo creation options and returns a dict of those
3865 options that we don't know how to handle.
3870 options that we don't know how to handle.
3866
3871
3867 This function is called as part of repository creation. If the
3872 This function is called as part of repository creation. If the
3868 returned dict contains any items, repository creation will not
3873 returned dict contains any items, repository creation will not
3869 be allowed, as it means there was a request to create a repository
3874 be allowed, as it means there was a request to create a repository
3870 with options not recognized by loaded code.
3875 with options not recognized by loaded code.
3871
3876
3872 Extensions can wrap this function to filter out creation options
3877 Extensions can wrap this function to filter out creation options
3873 they know how to handle.
3878 they know how to handle.
3874 """
3879 """
3875 known = {
3880 known = {
3876 b'backend',
3881 b'backend',
3877 b'lfs',
3882 b'lfs',
3878 b'narrowfiles',
3883 b'narrowfiles',
3879 b'sharedrepo',
3884 b'sharedrepo',
3880 b'sharedrelative',
3885 b'sharedrelative',
3881 b'shareditems',
3886 b'shareditems',
3882 b'shallowfilestore',
3887 b'shallowfilestore',
3883 }
3888 }
3884
3889
3885 return {k: v for k, v in createopts.items() if k not in known}
3890 return {k: v for k, v in createopts.items() if k not in known}
3886
3891
3887
3892
3888 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3893 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3889 """Create a new repository in a vfs.
3894 """Create a new repository in a vfs.
3890
3895
3891 ``path`` path to the new repo's working directory.
3896 ``path`` path to the new repo's working directory.
3892 ``createopts`` options for the new repository.
3897 ``createopts`` options for the new repository.
3893 ``requirement`` predefined set of requirements.
3898 ``requirement`` predefined set of requirements.
3894 (incompatible with ``createopts``)
3899 (incompatible with ``createopts``)
3895
3900
3896 The following keys for ``createopts`` are recognized:
3901 The following keys for ``createopts`` are recognized:
3897
3902
3898 backend
3903 backend
3899 The storage backend to use.
3904 The storage backend to use.
3900 lfs
3905 lfs
3901 Repository will be created with ``lfs`` requirement. The lfs extension
3906 Repository will be created with ``lfs`` requirement. The lfs extension
3902 will automatically be loaded when the repository is accessed.
3907 will automatically be loaded when the repository is accessed.
3903 narrowfiles
3908 narrowfiles
3904 Set up repository to support narrow file storage.
3909 Set up repository to support narrow file storage.
3905 sharedrepo
3910 sharedrepo
3906 Repository object from which storage should be shared.
3911 Repository object from which storage should be shared.
3907 sharedrelative
3912 sharedrelative
3908 Boolean indicating if the path to the shared repo should be
3913 Boolean indicating if the path to the shared repo should be
3909 stored as relative. By default, the pointer to the "parent" repo
3914 stored as relative. By default, the pointer to the "parent" repo
3910 is stored as an absolute path.
3915 is stored as an absolute path.
3911 shareditems
3916 shareditems
3912 Set of items to share to the new repository (in addition to storage).
3917 Set of items to share to the new repository (in addition to storage).
3913 shallowfilestore
3918 shallowfilestore
3914 Indicates that storage for files should be shallow (not all ancestor
3919 Indicates that storage for files should be shallow (not all ancestor
3915 revisions are known).
3920 revisions are known).
3916 """
3921 """
3917
3922
3918 if requirements is not None:
3923 if requirements is not None:
3919 if createopts is not None:
3924 if createopts is not None:
3920 msg = b'cannot specify both createopts and requirements'
3925 msg = b'cannot specify both createopts and requirements'
3921 raise error.ProgrammingError(msg)
3926 raise error.ProgrammingError(msg)
3922 createopts = {}
3927 createopts = {}
3923 else:
3928 else:
3924 createopts = defaultcreateopts(ui, createopts=createopts)
3929 createopts = defaultcreateopts(ui, createopts=createopts)
3925
3930
3926 unknownopts = filterknowncreateopts(ui, createopts)
3931 unknownopts = filterknowncreateopts(ui, createopts)
3927
3932
3928 if not isinstance(unknownopts, dict):
3933 if not isinstance(unknownopts, dict):
3929 raise error.ProgrammingError(
3934 raise error.ProgrammingError(
3930 b'filterknowncreateopts() did not return a dict'
3935 b'filterknowncreateopts() did not return a dict'
3931 )
3936 )
3932
3937
3933 if unknownopts:
3938 if unknownopts:
3934 raise error.Abort(
3939 raise error.Abort(
3935 _(
3940 _(
3936 b'unable to create repository because of unknown '
3941 b'unable to create repository because of unknown '
3937 b'creation option: %s'
3942 b'creation option: %s'
3938 )
3943 )
3939 % b', '.join(sorted(unknownopts)),
3944 % b', '.join(sorted(unknownopts)),
3940 hint=_(b'is a required extension not loaded?'),
3945 hint=_(b'is a required extension not loaded?'),
3941 )
3946 )
3942
3947
3943 requirements = newreporequirements(ui, createopts=createopts)
3948 requirements = newreporequirements(ui, createopts=createopts)
3944 requirements -= checkrequirementscompat(ui, requirements)
3949 requirements -= checkrequirementscompat(ui, requirements)
3945
3950
3946 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3947
3952
3948 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3949 if hgvfs.exists():
3954 if hgvfs.exists():
3950 raise error.RepoError(_(b'repository %s already exists') % path)
3955 raise error.RepoError(_(b'repository %s already exists') % path)
3951
3956
3952 if b'sharedrepo' in createopts:
3957 if b'sharedrepo' in createopts:
3953 sharedpath = createopts[b'sharedrepo'].sharedpath
3958 sharedpath = createopts[b'sharedrepo'].sharedpath
3954
3959
3955 if createopts.get(b'sharedrelative'):
3960 if createopts.get(b'sharedrelative'):
3956 try:
3961 try:
3957 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3958 sharedpath = util.pconvert(sharedpath)
3963 sharedpath = util.pconvert(sharedpath)
3959 except (IOError, ValueError) as e:
3964 except (IOError, ValueError) as e:
3960 # ValueError is raised on Windows if the drive letters differ
3965 # ValueError is raised on Windows if the drive letters differ
3961 # on each path.
3966 # on each path.
3962 raise error.Abort(
3967 raise error.Abort(
3963 _(b'cannot calculate relative path'),
3968 _(b'cannot calculate relative path'),
3964 hint=stringutil.forcebytestr(e),
3969 hint=stringutil.forcebytestr(e),
3965 )
3970 )
3966
3971
3967 if not wdirvfs.exists():
3972 if not wdirvfs.exists():
3968 wdirvfs.makedirs()
3973 wdirvfs.makedirs()
3969
3974
3970 hgvfs.makedir(notindexed=True)
3975 hgvfs.makedir(notindexed=True)
3971 if b'sharedrepo' not in createopts:
3976 if b'sharedrepo' not in createopts:
3972 hgvfs.mkdir(b'cache')
3977 hgvfs.mkdir(b'cache')
3973 hgvfs.mkdir(b'wcache')
3978 hgvfs.mkdir(b'wcache')
3974
3979
3975 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3980 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3976 if has_store and b'sharedrepo' not in createopts:
3981 if has_store and b'sharedrepo' not in createopts:
3977 hgvfs.mkdir(b'store')
3982 hgvfs.mkdir(b'store')
3978
3983
3979 # We create an invalid changelog outside the store so very old
3984 # We create an invalid changelog outside the store so very old
3980 # Mercurial versions (which didn't know about the requirements
3985 # Mercurial versions (which didn't know about the requirements
3981 # file) encounter an error on reading the changelog. This
3986 # file) encounter an error on reading the changelog. This
3982 # effectively locks out old clients and prevents them from
3987 # effectively locks out old clients and prevents them from
3983 # mucking with a repo in an unknown format.
3988 # mucking with a repo in an unknown format.
3984 #
3989 #
3985 # The revlog header has version 65535, which won't be recognized by
3990 # The revlog header has version 65535, which won't be recognized by
3986 # such old clients.
3991 # such old clients.
3987 hgvfs.append(
3992 hgvfs.append(
3988 b'00changelog.i',
3993 b'00changelog.i',
3989 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3994 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3990 b'layout',
3995 b'layout',
3991 )
3996 )
3992
3997
3993 # Filter the requirements into working copy and store ones
3998 # Filter the requirements into working copy and store ones
3994 wcreq, storereq = scmutil.filterrequirements(requirements)
3999 wcreq, storereq = scmutil.filterrequirements(requirements)
3995 # write working copy ones
4000 # write working copy ones
3996 scmutil.writerequires(hgvfs, wcreq)
4001 scmutil.writerequires(hgvfs, wcreq)
3997 # If there are store requirements and the current repository
4002 # If there are store requirements and the current repository
3998 # is not a shared one, write stored requirements
4003 # is not a shared one, write stored requirements
3999 # For new shared repository, we don't need to write the store
4004 # For new shared repository, we don't need to write the store
4000 # requirements as they are already present in store requires
4005 # requirements as they are already present in store requires
4001 if storereq and b'sharedrepo' not in createopts:
4006 if storereq and b'sharedrepo' not in createopts:
4002 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4007 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4003 scmutil.writerequires(storevfs, storereq)
4008 scmutil.writerequires(storevfs, storereq)
4004
4009
4005 # Write out file telling readers where to find the shared store.
4010 # Write out file telling readers where to find the shared store.
4006 if b'sharedrepo' in createopts:
4011 if b'sharedrepo' in createopts:
4007 hgvfs.write(b'sharedpath', sharedpath)
4012 hgvfs.write(b'sharedpath', sharedpath)
4008
4013
4009 if createopts.get(b'shareditems'):
4014 if createopts.get(b'shareditems'):
4010 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4015 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4011 hgvfs.write(b'shared', shared)
4016 hgvfs.write(b'shared', shared)
4012
4017
4013
4018
4014 def poisonrepository(repo):
4019 def poisonrepository(repo):
4015 """Poison a repository instance so it can no longer be used."""
4020 """Poison a repository instance so it can no longer be used."""
4016 # Perform any cleanup on the instance.
4021 # Perform any cleanup on the instance.
4017 repo.close()
4022 repo.close()
4018
4023
4019 # Our strategy is to replace the type of the object with one that
4024 # Our strategy is to replace the type of the object with one that
4020 # has all attribute lookups result in error.
4025 # has all attribute lookups result in error.
4021 #
4026 #
4022 # But we have to allow the close() method because some constructors
4027 # But we have to allow the close() method because some constructors
4023 # of repos call close() on repo references.
4028 # of repos call close() on repo references.
4024 class poisonedrepository:
4029 class poisonedrepository:
4025 def __getattribute__(self, item):
4030 def __getattribute__(self, item):
4026 if item == 'close':
4031 if item == 'close':
4027 return object.__getattribute__(self, item)
4032 return object.__getattribute__(self, item)
4028
4033
4029 raise error.ProgrammingError(
4034 raise error.ProgrammingError(
4030 b'repo instances should not be used after unshare'
4035 b'repo instances should not be used after unshare'
4031 )
4036 )
4032
4037
4033 def close(self):
4038 def close(self):
4034 pass
4039 pass
4035
4040
4036 # We may have a repoview, which intercepts __setattr__. So be sure
4041 # We may have a repoview, which intercepts __setattr__. So be sure
4037 # we operate at the lowest level possible.
4042 # we operate at the lowest level possible.
4038 object.__setattr__(repo, '__class__', poisonedrepository)
4043 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now