##// END OF EJS Templates
persistent-nodemap: rename the storage.revlog.nodemap.mmap option...
marmoute -
r47024:7d096e5a default
parent child Browse files
Show More
@@ -1,2584 +1,2587 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'debug',
573 b'debug',
574 b'dirstate.delaywrite',
574 b'dirstate.delaywrite',
575 default=0,
575 default=0,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'defaults',
578 b'defaults',
579 b'.*',
579 b'.*',
580 default=None,
580 default=None,
581 generic=True,
581 generic=True,
582 )
582 )
583 coreconfigitem(
583 coreconfigitem(
584 b'devel',
584 b'devel',
585 b'all-warnings',
585 b'all-warnings',
586 default=False,
586 default=False,
587 )
587 )
588 coreconfigitem(
588 coreconfigitem(
589 b'devel',
589 b'devel',
590 b'bundle2.debug',
590 b'bundle2.debug',
591 default=False,
591 default=False,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'bundle.delta',
595 b'bundle.delta',
596 default=b'',
596 default=b'',
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'cache-vfs',
600 b'cache-vfs',
601 default=None,
601 default=None,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'check-locks',
605 b'check-locks',
606 default=False,
606 default=False,
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'check-relroot',
610 b'check-relroot',
611 default=False,
611 default=False,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'default-date',
615 b'default-date',
616 default=None,
616 default=None,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'deprec-warn',
620 b'deprec-warn',
621 default=False,
621 default=False,
622 )
622 )
623 coreconfigitem(
623 coreconfigitem(
624 b'devel',
624 b'devel',
625 b'disableloaddefaultcerts',
625 b'disableloaddefaultcerts',
626 default=False,
626 default=False,
627 )
627 )
628 coreconfigitem(
628 coreconfigitem(
629 b'devel',
629 b'devel',
630 b'warn-empty-changegroup',
630 b'warn-empty-changegroup',
631 default=False,
631 default=False,
632 )
632 )
633 coreconfigitem(
633 coreconfigitem(
634 b'devel',
634 b'devel',
635 b'legacy.exchange',
635 b'legacy.exchange',
636 default=list,
636 default=list,
637 )
637 )
638 # When True, revlogs use a special reference version of the nodemap, that is not
638 # When True, revlogs use a special reference version of the nodemap, that is not
639 # performant but is "known" to behave properly.
639 # performant but is "known" to behave properly.
640 coreconfigitem(
640 coreconfigitem(
641 b'devel',
641 b'devel',
642 b'persistent-nodemap',
642 b'persistent-nodemap',
643 default=False,
643 default=False,
644 )
644 )
645 coreconfigitem(
645 coreconfigitem(
646 b'devel',
646 b'devel',
647 b'servercafile',
647 b'servercafile',
648 default=b'',
648 default=b'',
649 )
649 )
650 coreconfigitem(
650 coreconfigitem(
651 b'devel',
651 b'devel',
652 b'serverexactprotocol',
652 b'serverexactprotocol',
653 default=b'',
653 default=b'',
654 )
654 )
655 coreconfigitem(
655 coreconfigitem(
656 b'devel',
656 b'devel',
657 b'serverrequirecert',
657 b'serverrequirecert',
658 default=False,
658 default=False,
659 )
659 )
660 coreconfigitem(
660 coreconfigitem(
661 b'devel',
661 b'devel',
662 b'strip-obsmarkers',
662 b'strip-obsmarkers',
663 default=True,
663 default=True,
664 )
664 )
665 coreconfigitem(
665 coreconfigitem(
666 b'devel',
666 b'devel',
667 b'warn-config',
667 b'warn-config',
668 default=None,
668 default=None,
669 )
669 )
670 coreconfigitem(
670 coreconfigitem(
671 b'devel',
671 b'devel',
672 b'warn-config-default',
672 b'warn-config-default',
673 default=None,
673 default=None,
674 )
674 )
675 coreconfigitem(
675 coreconfigitem(
676 b'devel',
676 b'devel',
677 b'user.obsmarker',
677 b'user.obsmarker',
678 default=None,
678 default=None,
679 )
679 )
680 coreconfigitem(
680 coreconfigitem(
681 b'devel',
681 b'devel',
682 b'warn-config-unknown',
682 b'warn-config-unknown',
683 default=None,
683 default=None,
684 )
684 )
685 coreconfigitem(
685 coreconfigitem(
686 b'devel',
686 b'devel',
687 b'debug.copies',
687 b'debug.copies',
688 default=False,
688 default=False,
689 )
689 )
690 coreconfigitem(
690 coreconfigitem(
691 b'devel',
691 b'devel',
692 b'debug.extensions',
692 b'debug.extensions',
693 default=False,
693 default=False,
694 )
694 )
695 coreconfigitem(
695 coreconfigitem(
696 b'devel',
696 b'devel',
697 b'debug.repo-filters',
697 b'debug.repo-filters',
698 default=False,
698 default=False,
699 )
699 )
700 coreconfigitem(
700 coreconfigitem(
701 b'devel',
701 b'devel',
702 b'debug.peer-request',
702 b'debug.peer-request',
703 default=False,
703 default=False,
704 )
704 )
705 # If discovery.grow-sample is False, the sample size used in set discovery will
705 # If discovery.grow-sample is False, the sample size used in set discovery will
706 # not be increased through the process
706 # not be increased through the process
707 coreconfigitem(
707 coreconfigitem(
708 b'devel',
708 b'devel',
709 b'discovery.grow-sample',
709 b'discovery.grow-sample',
710 default=True,
710 default=True,
711 )
711 )
712 # discovery.grow-sample.rate control the rate at which the sample grow
712 # discovery.grow-sample.rate control the rate at which the sample grow
713 coreconfigitem(
713 coreconfigitem(
714 b'devel',
714 b'devel',
715 b'discovery.grow-sample.rate',
715 b'discovery.grow-sample.rate',
716 default=1.05,
716 default=1.05,
717 )
717 )
718 # If discovery.randomize is False, random sampling during discovery are
718 # If discovery.randomize is False, random sampling during discovery are
719 # deterministic. It is meant for integration tests.
719 # deterministic. It is meant for integration tests.
720 coreconfigitem(
720 coreconfigitem(
721 b'devel',
721 b'devel',
722 b'discovery.randomize',
722 b'discovery.randomize',
723 default=True,
723 default=True,
724 )
724 )
725 _registerdiffopts(section=b'diff')
725 _registerdiffopts(section=b'diff')
726 coreconfigitem(
726 coreconfigitem(
727 b'email',
727 b'email',
728 b'bcc',
728 b'bcc',
729 default=None,
729 default=None,
730 )
730 )
731 coreconfigitem(
731 coreconfigitem(
732 b'email',
732 b'email',
733 b'cc',
733 b'cc',
734 default=None,
734 default=None,
735 )
735 )
736 coreconfigitem(
736 coreconfigitem(
737 b'email',
737 b'email',
738 b'charsets',
738 b'charsets',
739 default=list,
739 default=list,
740 )
740 )
741 coreconfigitem(
741 coreconfigitem(
742 b'email',
742 b'email',
743 b'from',
743 b'from',
744 default=None,
744 default=None,
745 )
745 )
746 coreconfigitem(
746 coreconfigitem(
747 b'email',
747 b'email',
748 b'method',
748 b'method',
749 default=b'smtp',
749 default=b'smtp',
750 )
750 )
751 coreconfigitem(
751 coreconfigitem(
752 b'email',
752 b'email',
753 b'reply-to',
753 b'reply-to',
754 default=None,
754 default=None,
755 )
755 )
756 coreconfigitem(
756 coreconfigitem(
757 b'email',
757 b'email',
758 b'to',
758 b'to',
759 default=None,
759 default=None,
760 )
760 )
761 coreconfigitem(
761 coreconfigitem(
762 b'experimental',
762 b'experimental',
763 b'archivemetatemplate',
763 b'archivemetatemplate',
764 default=dynamicdefault,
764 default=dynamicdefault,
765 )
765 )
766 coreconfigitem(
766 coreconfigitem(
767 b'experimental',
767 b'experimental',
768 b'auto-publish',
768 b'auto-publish',
769 default=b'publish',
769 default=b'publish',
770 )
770 )
771 coreconfigitem(
771 coreconfigitem(
772 b'experimental',
772 b'experimental',
773 b'bundle-phases',
773 b'bundle-phases',
774 default=False,
774 default=False,
775 )
775 )
776 coreconfigitem(
776 coreconfigitem(
777 b'experimental',
777 b'experimental',
778 b'bundle2-advertise',
778 b'bundle2-advertise',
779 default=True,
779 default=True,
780 )
780 )
781 coreconfigitem(
781 coreconfigitem(
782 b'experimental',
782 b'experimental',
783 b'bundle2-output-capture',
783 b'bundle2-output-capture',
784 default=False,
784 default=False,
785 )
785 )
786 coreconfigitem(
786 coreconfigitem(
787 b'experimental',
787 b'experimental',
788 b'bundle2.pushback',
788 b'bundle2.pushback',
789 default=False,
789 default=False,
790 )
790 )
791 coreconfigitem(
791 coreconfigitem(
792 b'experimental',
792 b'experimental',
793 b'bundle2lazylocking',
793 b'bundle2lazylocking',
794 default=False,
794 default=False,
795 )
795 )
796 coreconfigitem(
796 coreconfigitem(
797 b'experimental',
797 b'experimental',
798 b'bundlecomplevel',
798 b'bundlecomplevel',
799 default=None,
799 default=None,
800 )
800 )
801 coreconfigitem(
801 coreconfigitem(
802 b'experimental',
802 b'experimental',
803 b'bundlecomplevel.bzip2',
803 b'bundlecomplevel.bzip2',
804 default=None,
804 default=None,
805 )
805 )
806 coreconfigitem(
806 coreconfigitem(
807 b'experimental',
807 b'experimental',
808 b'bundlecomplevel.gzip',
808 b'bundlecomplevel.gzip',
809 default=None,
809 default=None,
810 )
810 )
811 coreconfigitem(
811 coreconfigitem(
812 b'experimental',
812 b'experimental',
813 b'bundlecomplevel.none',
813 b'bundlecomplevel.none',
814 default=None,
814 default=None,
815 )
815 )
816 coreconfigitem(
816 coreconfigitem(
817 b'experimental',
817 b'experimental',
818 b'bundlecomplevel.zstd',
818 b'bundlecomplevel.zstd',
819 default=None,
819 default=None,
820 )
820 )
821 coreconfigitem(
821 coreconfigitem(
822 b'experimental',
822 b'experimental',
823 b'changegroup3',
823 b'changegroup3',
824 default=False,
824 default=False,
825 )
825 )
826 coreconfigitem(
826 coreconfigitem(
827 b'experimental',
827 b'experimental',
828 b'cleanup-as-archived',
828 b'cleanup-as-archived',
829 default=False,
829 default=False,
830 )
830 )
831 coreconfigitem(
831 coreconfigitem(
832 b'experimental',
832 b'experimental',
833 b'clientcompressionengines',
833 b'clientcompressionengines',
834 default=list,
834 default=list,
835 )
835 )
836 coreconfigitem(
836 coreconfigitem(
837 b'experimental',
837 b'experimental',
838 b'copytrace',
838 b'copytrace',
839 default=b'on',
839 default=b'on',
840 )
840 )
841 coreconfigitem(
841 coreconfigitem(
842 b'experimental',
842 b'experimental',
843 b'copytrace.movecandidateslimit',
843 b'copytrace.movecandidateslimit',
844 default=100,
844 default=100,
845 )
845 )
846 coreconfigitem(
846 coreconfigitem(
847 b'experimental',
847 b'experimental',
848 b'copytrace.sourcecommitlimit',
848 b'copytrace.sourcecommitlimit',
849 default=100,
849 default=100,
850 )
850 )
851 coreconfigitem(
851 coreconfigitem(
852 b'experimental',
852 b'experimental',
853 b'copies.read-from',
853 b'copies.read-from',
854 default=b"filelog-only",
854 default=b"filelog-only",
855 )
855 )
856 coreconfigitem(
856 coreconfigitem(
857 b'experimental',
857 b'experimental',
858 b'copies.write-to',
858 b'copies.write-to',
859 default=b'filelog-only',
859 default=b'filelog-only',
860 )
860 )
861 coreconfigitem(
861 coreconfigitem(
862 b'experimental',
862 b'experimental',
863 b'crecordtest',
863 b'crecordtest',
864 default=None,
864 default=None,
865 )
865 )
866 coreconfigitem(
866 coreconfigitem(
867 b'experimental',
867 b'experimental',
868 b'directaccess',
868 b'directaccess',
869 default=False,
869 default=False,
870 )
870 )
871 coreconfigitem(
871 coreconfigitem(
872 b'experimental',
872 b'experimental',
873 b'directaccess.revnums',
873 b'directaccess.revnums',
874 default=False,
874 default=False,
875 )
875 )
876 coreconfigitem(
876 coreconfigitem(
877 b'experimental',
877 b'experimental',
878 b'editortmpinhg',
878 b'editortmpinhg',
879 default=False,
879 default=False,
880 )
880 )
881 coreconfigitem(
881 coreconfigitem(
882 b'experimental',
882 b'experimental',
883 b'evolution',
883 b'evolution',
884 default=list,
884 default=list,
885 )
885 )
886 coreconfigitem(
886 coreconfigitem(
887 b'experimental',
887 b'experimental',
888 b'evolution.allowdivergence',
888 b'evolution.allowdivergence',
889 default=False,
889 default=False,
890 alias=[(b'experimental', b'allowdivergence')],
890 alias=[(b'experimental', b'allowdivergence')],
891 )
891 )
892 coreconfigitem(
892 coreconfigitem(
893 b'experimental',
893 b'experimental',
894 b'evolution.allowunstable',
894 b'evolution.allowunstable',
895 default=None,
895 default=None,
896 )
896 )
897 coreconfigitem(
897 coreconfigitem(
898 b'experimental',
898 b'experimental',
899 b'evolution.createmarkers',
899 b'evolution.createmarkers',
900 default=None,
900 default=None,
901 )
901 )
902 coreconfigitem(
902 coreconfigitem(
903 b'experimental',
903 b'experimental',
904 b'evolution.effect-flags',
904 b'evolution.effect-flags',
905 default=True,
905 default=True,
906 alias=[(b'experimental', b'effect-flags')],
906 alias=[(b'experimental', b'effect-flags')],
907 )
907 )
908 coreconfigitem(
908 coreconfigitem(
909 b'experimental',
909 b'experimental',
910 b'evolution.exchange',
910 b'evolution.exchange',
911 default=None,
911 default=None,
912 )
912 )
913 coreconfigitem(
913 coreconfigitem(
914 b'experimental',
914 b'experimental',
915 b'evolution.bundle-obsmarker',
915 b'evolution.bundle-obsmarker',
916 default=False,
916 default=False,
917 )
917 )
918 coreconfigitem(
918 coreconfigitem(
919 b'experimental',
919 b'experimental',
920 b'evolution.bundle-obsmarker:mandatory',
920 b'evolution.bundle-obsmarker:mandatory',
921 default=True,
921 default=True,
922 )
922 )
923 coreconfigitem(
923 coreconfigitem(
924 b'experimental',
924 b'experimental',
925 b'log.topo',
925 b'log.topo',
926 default=False,
926 default=False,
927 )
927 )
928 coreconfigitem(
928 coreconfigitem(
929 b'experimental',
929 b'experimental',
930 b'evolution.report-instabilities',
930 b'evolution.report-instabilities',
931 default=True,
931 default=True,
932 )
932 )
933 coreconfigitem(
933 coreconfigitem(
934 b'experimental',
934 b'experimental',
935 b'evolution.track-operation',
935 b'evolution.track-operation',
936 default=True,
936 default=True,
937 )
937 )
938 # repo-level config to exclude a revset visibility
938 # repo-level config to exclude a revset visibility
939 #
939 #
940 # The target use case is to use `share` to expose different subset of the same
940 # The target use case is to use `share` to expose different subset of the same
941 # repository, especially server side. See also `server.view`.
941 # repository, especially server side. See also `server.view`.
942 coreconfigitem(
942 coreconfigitem(
943 b'experimental',
943 b'experimental',
944 b'extra-filter-revs',
944 b'extra-filter-revs',
945 default=None,
945 default=None,
946 )
946 )
947 coreconfigitem(
947 coreconfigitem(
948 b'experimental',
948 b'experimental',
949 b'maxdeltachainspan',
949 b'maxdeltachainspan',
950 default=-1,
950 default=-1,
951 )
951 )
952 # tracks files which were undeleted (merge might delete them but we explicitly
952 # tracks files which were undeleted (merge might delete them but we explicitly
953 # kept/undeleted them) and creates new filenodes for them
953 # kept/undeleted them) and creates new filenodes for them
954 coreconfigitem(
954 coreconfigitem(
955 b'experimental',
955 b'experimental',
956 b'merge-track-salvaged',
956 b'merge-track-salvaged',
957 default=False,
957 default=False,
958 )
958 )
959 coreconfigitem(
959 coreconfigitem(
960 b'experimental',
960 b'experimental',
961 b'mergetempdirprefix',
961 b'mergetempdirprefix',
962 default=None,
962 default=None,
963 )
963 )
964 coreconfigitem(
964 coreconfigitem(
965 b'experimental',
965 b'experimental',
966 b'mmapindexthreshold',
966 b'mmapindexthreshold',
967 default=None,
967 default=None,
968 )
968 )
969 coreconfigitem(
969 coreconfigitem(
970 b'experimental',
970 b'experimental',
971 b'narrow',
971 b'narrow',
972 default=False,
972 default=False,
973 )
973 )
974 coreconfigitem(
974 coreconfigitem(
975 b'experimental',
975 b'experimental',
976 b'nonnormalparanoidcheck',
976 b'nonnormalparanoidcheck',
977 default=False,
977 default=False,
978 )
978 )
979 coreconfigitem(
979 coreconfigitem(
980 b'experimental',
980 b'experimental',
981 b'exportableenviron',
981 b'exportableenviron',
982 default=list,
982 default=list,
983 )
983 )
984 coreconfigitem(
984 coreconfigitem(
985 b'experimental',
985 b'experimental',
986 b'extendedheader.index',
986 b'extendedheader.index',
987 default=None,
987 default=None,
988 )
988 )
989 coreconfigitem(
989 coreconfigitem(
990 b'experimental',
990 b'experimental',
991 b'extendedheader.similarity',
991 b'extendedheader.similarity',
992 default=False,
992 default=False,
993 )
993 )
994 coreconfigitem(
994 coreconfigitem(
995 b'experimental',
995 b'experimental',
996 b'graphshorten',
996 b'graphshorten',
997 default=False,
997 default=False,
998 )
998 )
999 coreconfigitem(
999 coreconfigitem(
1000 b'experimental',
1000 b'experimental',
1001 b'graphstyle.parent',
1001 b'graphstyle.parent',
1002 default=dynamicdefault,
1002 default=dynamicdefault,
1003 )
1003 )
1004 coreconfigitem(
1004 coreconfigitem(
1005 b'experimental',
1005 b'experimental',
1006 b'graphstyle.missing',
1006 b'graphstyle.missing',
1007 default=dynamicdefault,
1007 default=dynamicdefault,
1008 )
1008 )
1009 coreconfigitem(
1009 coreconfigitem(
1010 b'experimental',
1010 b'experimental',
1011 b'graphstyle.grandparent',
1011 b'graphstyle.grandparent',
1012 default=dynamicdefault,
1012 default=dynamicdefault,
1013 )
1013 )
1014 coreconfigitem(
1014 coreconfigitem(
1015 b'experimental',
1015 b'experimental',
1016 b'hook-track-tags',
1016 b'hook-track-tags',
1017 default=False,
1017 default=False,
1018 )
1018 )
1019 coreconfigitem(
1019 coreconfigitem(
1020 b'experimental',
1020 b'experimental',
1021 b'httppeer.advertise-v2',
1021 b'httppeer.advertise-v2',
1022 default=False,
1022 default=False,
1023 )
1023 )
1024 coreconfigitem(
1024 coreconfigitem(
1025 b'experimental',
1025 b'experimental',
1026 b'httppeer.v2-encoder-order',
1026 b'httppeer.v2-encoder-order',
1027 default=None,
1027 default=None,
1028 )
1028 )
1029 coreconfigitem(
1029 coreconfigitem(
1030 b'experimental',
1030 b'experimental',
1031 b'httppostargs',
1031 b'httppostargs',
1032 default=False,
1032 default=False,
1033 )
1033 )
1034 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1034 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1035 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1035 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1036
1036
1037 coreconfigitem(
1037 coreconfigitem(
1038 b'experimental',
1038 b'experimental',
1039 b'obsmarkers-exchange-debug',
1039 b'obsmarkers-exchange-debug',
1040 default=False,
1040 default=False,
1041 )
1041 )
1042 coreconfigitem(
1042 coreconfigitem(
1043 b'experimental',
1043 b'experimental',
1044 b'remotenames',
1044 b'remotenames',
1045 default=False,
1045 default=False,
1046 )
1046 )
1047 coreconfigitem(
1047 coreconfigitem(
1048 b'experimental',
1048 b'experimental',
1049 b'removeemptydirs',
1049 b'removeemptydirs',
1050 default=True,
1050 default=True,
1051 )
1051 )
1052 coreconfigitem(
1052 coreconfigitem(
1053 b'experimental',
1053 b'experimental',
1054 b'revert.interactive.select-to-keep',
1054 b'revert.interactive.select-to-keep',
1055 default=False,
1055 default=False,
1056 )
1056 )
1057 coreconfigitem(
1057 coreconfigitem(
1058 b'experimental',
1058 b'experimental',
1059 b'revisions.prefixhexnode',
1059 b'revisions.prefixhexnode',
1060 default=False,
1060 default=False,
1061 )
1061 )
1062 coreconfigitem(
1062 coreconfigitem(
1063 b'experimental',
1063 b'experimental',
1064 b'revlogv2',
1064 b'revlogv2',
1065 default=None,
1065 default=None,
1066 )
1066 )
1067 coreconfigitem(
1067 coreconfigitem(
1068 b'experimental',
1068 b'experimental',
1069 b'revisions.disambiguatewithin',
1069 b'revisions.disambiguatewithin',
1070 default=None,
1070 default=None,
1071 )
1071 )
1072 coreconfigitem(
1072 coreconfigitem(
1073 b'experimental',
1073 b'experimental',
1074 b'rust.index',
1074 b'rust.index',
1075 default=False,
1075 default=False,
1076 )
1076 )
1077 coreconfigitem(
1077 coreconfigitem(
1078 b'experimental',
1078 b'experimental',
1079 b'server.filesdata.recommended-batch-size',
1079 b'server.filesdata.recommended-batch-size',
1080 default=50000,
1080 default=50000,
1081 )
1081 )
1082 coreconfigitem(
1082 coreconfigitem(
1083 b'experimental',
1083 b'experimental',
1084 b'server.manifestdata.recommended-batch-size',
1084 b'server.manifestdata.recommended-batch-size',
1085 default=100000,
1085 default=100000,
1086 )
1086 )
1087 coreconfigitem(
1087 coreconfigitem(
1088 b'experimental',
1088 b'experimental',
1089 b'server.stream-narrow-clones',
1089 b'server.stream-narrow-clones',
1090 default=False,
1090 default=False,
1091 )
1091 )
1092 coreconfigitem(
1092 coreconfigitem(
1093 b'experimental',
1093 b'experimental',
1094 b'sharesafe-auto-downgrade-shares',
1094 b'sharesafe-auto-downgrade-shares',
1095 default=False,
1095 default=False,
1096 )
1096 )
1097 coreconfigitem(
1097 coreconfigitem(
1098 b'experimental',
1098 b'experimental',
1099 b'sharesafe-auto-upgrade-shares',
1099 b'sharesafe-auto-upgrade-shares',
1100 default=False,
1100 default=False,
1101 )
1101 )
1102 coreconfigitem(
1102 coreconfigitem(
1103 b'experimental',
1103 b'experimental',
1104 b'sharesafe-auto-upgrade-fail-error',
1104 b'sharesafe-auto-upgrade-fail-error',
1105 default=False,
1105 default=False,
1106 )
1106 )
1107 coreconfigitem(
1107 coreconfigitem(
1108 b'experimental',
1108 b'experimental',
1109 b'sharesafe-warn-outdated-shares',
1109 b'sharesafe-warn-outdated-shares',
1110 default=True,
1110 default=True,
1111 )
1111 )
1112 coreconfigitem(
1112 coreconfigitem(
1113 b'experimental',
1113 b'experimental',
1114 b'single-head-per-branch',
1114 b'single-head-per-branch',
1115 default=False,
1115 default=False,
1116 )
1116 )
1117 coreconfigitem(
1117 coreconfigitem(
1118 b'experimental',
1118 b'experimental',
1119 b'single-head-per-branch:account-closed-heads',
1119 b'single-head-per-branch:account-closed-heads',
1120 default=False,
1120 default=False,
1121 )
1121 )
1122 coreconfigitem(
1122 coreconfigitem(
1123 b'experimental',
1123 b'experimental',
1124 b'single-head-per-branch:public-changes-only',
1124 b'single-head-per-branch:public-changes-only',
1125 default=False,
1125 default=False,
1126 )
1126 )
1127 coreconfigitem(
1127 coreconfigitem(
1128 b'experimental',
1128 b'experimental',
1129 b'sshserver.support-v2',
1129 b'sshserver.support-v2',
1130 default=False,
1130 default=False,
1131 )
1131 )
1132 coreconfigitem(
1132 coreconfigitem(
1133 b'experimental',
1133 b'experimental',
1134 b'sparse-read',
1134 b'sparse-read',
1135 default=False,
1135 default=False,
1136 )
1136 )
1137 coreconfigitem(
1137 coreconfigitem(
1138 b'experimental',
1138 b'experimental',
1139 b'sparse-read.density-threshold',
1139 b'sparse-read.density-threshold',
1140 default=0.50,
1140 default=0.50,
1141 )
1141 )
1142 coreconfigitem(
1142 coreconfigitem(
1143 b'experimental',
1143 b'experimental',
1144 b'sparse-read.min-gap-size',
1144 b'sparse-read.min-gap-size',
1145 default=b'65K',
1145 default=b'65K',
1146 )
1146 )
1147 coreconfigitem(
1147 coreconfigitem(
1148 b'experimental',
1148 b'experimental',
1149 b'treemanifest',
1149 b'treemanifest',
1150 default=False,
1150 default=False,
1151 )
1151 )
1152 coreconfigitem(
1152 coreconfigitem(
1153 b'experimental',
1153 b'experimental',
1154 b'update.atomic-file',
1154 b'update.atomic-file',
1155 default=False,
1155 default=False,
1156 )
1156 )
1157 coreconfigitem(
1157 coreconfigitem(
1158 b'experimental',
1158 b'experimental',
1159 b'sshpeer.advertise-v2',
1159 b'sshpeer.advertise-v2',
1160 default=False,
1160 default=False,
1161 )
1161 )
1162 coreconfigitem(
1162 coreconfigitem(
1163 b'experimental',
1163 b'experimental',
1164 b'web.apiserver',
1164 b'web.apiserver',
1165 default=False,
1165 default=False,
1166 )
1166 )
1167 coreconfigitem(
1167 coreconfigitem(
1168 b'experimental',
1168 b'experimental',
1169 b'web.api.http-v2',
1169 b'web.api.http-v2',
1170 default=False,
1170 default=False,
1171 )
1171 )
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'web.api.debugreflect',
1174 b'web.api.debugreflect',
1175 default=False,
1175 default=False,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'worker.wdir-get-thread-safe',
1179 b'worker.wdir-get-thread-safe',
1180 default=False,
1180 default=False,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'worker.repository-upgrade',
1184 b'worker.repository-upgrade',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'xdiff',
1189 b'xdiff',
1190 default=False,
1190 default=False,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'extensions',
1193 b'extensions',
1194 b'.*',
1194 b'.*',
1195 default=None,
1195 default=None,
1196 generic=True,
1196 generic=True,
1197 )
1197 )
1198 coreconfigitem(
1198 coreconfigitem(
1199 b'extdata',
1199 b'extdata',
1200 b'.*',
1200 b'.*',
1201 default=None,
1201 default=None,
1202 generic=True,
1202 generic=True,
1203 )
1203 )
1204 coreconfigitem(
1204 coreconfigitem(
1205 b'format',
1205 b'format',
1206 b'bookmarks-in-store',
1206 b'bookmarks-in-store',
1207 default=False,
1207 default=False,
1208 )
1208 )
1209 coreconfigitem(
1209 coreconfigitem(
1210 b'format',
1210 b'format',
1211 b'chunkcachesize',
1211 b'chunkcachesize',
1212 default=None,
1212 default=None,
1213 experimental=True,
1213 experimental=True,
1214 )
1214 )
1215 coreconfigitem(
1215 coreconfigitem(
1216 b'format',
1216 b'format',
1217 b'dotencode',
1217 b'dotencode',
1218 default=True,
1218 default=True,
1219 )
1219 )
1220 coreconfigitem(
1220 coreconfigitem(
1221 b'format',
1221 b'format',
1222 b'generaldelta',
1222 b'generaldelta',
1223 default=False,
1223 default=False,
1224 experimental=True,
1224 experimental=True,
1225 )
1225 )
1226 coreconfigitem(
1226 coreconfigitem(
1227 b'format',
1227 b'format',
1228 b'manifestcachesize',
1228 b'manifestcachesize',
1229 default=None,
1229 default=None,
1230 experimental=True,
1230 experimental=True,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'format',
1233 b'format',
1234 b'maxchainlen',
1234 b'maxchainlen',
1235 default=dynamicdefault,
1235 default=dynamicdefault,
1236 experimental=True,
1236 experimental=True,
1237 )
1237 )
1238 coreconfigitem(
1238 coreconfigitem(
1239 b'format',
1239 b'format',
1240 b'obsstore-version',
1240 b'obsstore-version',
1241 default=None,
1241 default=None,
1242 )
1242 )
1243 coreconfigitem(
1243 coreconfigitem(
1244 b'format',
1244 b'format',
1245 b'sparse-revlog',
1245 b'sparse-revlog',
1246 default=True,
1246 default=True,
1247 )
1247 )
1248 coreconfigitem(
1248 coreconfigitem(
1249 b'format',
1249 b'format',
1250 b'revlog-compression',
1250 b'revlog-compression',
1251 default=lambda: [b'zlib'],
1251 default=lambda: [b'zlib'],
1252 alias=[(b'experimental', b'format.compression')],
1252 alias=[(b'experimental', b'format.compression')],
1253 )
1253 )
1254 coreconfigitem(
1254 coreconfigitem(
1255 b'format',
1255 b'format',
1256 b'usefncache',
1256 b'usefncache',
1257 default=True,
1257 default=True,
1258 )
1258 )
1259 coreconfigitem(
1259 coreconfigitem(
1260 b'format',
1260 b'format',
1261 b'usegeneraldelta',
1261 b'usegeneraldelta',
1262 default=True,
1262 default=True,
1263 )
1263 )
1264 coreconfigitem(
1264 coreconfigitem(
1265 b'format',
1265 b'format',
1266 b'usestore',
1266 b'usestore',
1267 default=True,
1267 default=True,
1268 )
1268 )
1269 # Right now, the only efficient implement of the nodemap logic is in Rust,
1269 # Right now, the only efficient implement of the nodemap logic is in Rust,
1270 #
1270 #
1271 # The case was discussed that the 5.6 sprint and the following was decided for
1271 # The case was discussed that the 5.6 sprint and the following was decided for
1272 # feature that have an optional fast implementation (and are a performance
1272 # feature that have an optional fast implementation (and are a performance
1273 # regression in the others)
1273 # regression in the others)
1274 #
1274 #
1275 # * If the fast implementation is not available, Mercurial will refuse to
1275 # * If the fast implementation is not available, Mercurial will refuse to
1276 # access repository that requires it. Pointing to proper documentation
1276 # access repository that requires it. Pointing to proper documentation
1277 #
1277 #
1278 # * An option exist to lift that limitation and allow repository access.
1278 # * An option exist to lift that limitation and allow repository access.
1279 #
1279 #
1280 # Such access will emit a warning unless configured not to.
1280 # Such access will emit a warning unless configured not to.
1281 #
1281 #
1282 # * When sufficiently mature, the feature can be enabled by default only for
1282 # * When sufficiently mature, the feature can be enabled by default only for
1283 # installation that supports it.
1283 # installation that supports it.
1284 coreconfigitem(
1284 coreconfigitem(
1285 b'format', b'use-persistent-nodemap', default=False, experimental=True
1285 b'format', b'use-persistent-nodemap', default=False, experimental=True
1286 )
1286 )
1287 coreconfigitem(
1287 coreconfigitem(
1288 b'format',
1288 b'format',
1289 b'exp-use-copies-side-data-changeset',
1289 b'exp-use-copies-side-data-changeset',
1290 default=False,
1290 default=False,
1291 experimental=True,
1291 experimental=True,
1292 )
1292 )
1293 coreconfigitem(
1293 coreconfigitem(
1294 b'format',
1294 b'format',
1295 b'exp-use-side-data',
1295 b'exp-use-side-data',
1296 default=False,
1296 default=False,
1297 experimental=True,
1297 experimental=True,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'exp-share-safe',
1301 b'exp-share-safe',
1302 default=False,
1302 default=False,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 b'format',
1306 b'format',
1307 b'internal-phase',
1307 b'internal-phase',
1308 default=False,
1308 default=False,
1309 experimental=True,
1309 experimental=True,
1310 )
1310 )
1311 coreconfigitem(
1311 coreconfigitem(
1312 b'fsmonitor',
1312 b'fsmonitor',
1313 b'warn_when_unused',
1313 b'warn_when_unused',
1314 default=True,
1314 default=True,
1315 )
1315 )
1316 coreconfigitem(
1316 coreconfigitem(
1317 b'fsmonitor',
1317 b'fsmonitor',
1318 b'warn_update_file_count',
1318 b'warn_update_file_count',
1319 default=50000,
1319 default=50000,
1320 )
1320 )
1321 coreconfigitem(
1321 coreconfigitem(
1322 b'fsmonitor',
1322 b'fsmonitor',
1323 b'warn_update_file_count_rust',
1323 b'warn_update_file_count_rust',
1324 default=400000,
1324 default=400000,
1325 )
1325 )
1326 coreconfigitem(
1326 coreconfigitem(
1327 b'help',
1327 b'help',
1328 br'hidden-command\..*',
1328 br'hidden-command\..*',
1329 default=False,
1329 default=False,
1330 generic=True,
1330 generic=True,
1331 )
1331 )
1332 coreconfigitem(
1332 coreconfigitem(
1333 b'help',
1333 b'help',
1334 br'hidden-topic\..*',
1334 br'hidden-topic\..*',
1335 default=False,
1335 default=False,
1336 generic=True,
1336 generic=True,
1337 )
1337 )
1338 coreconfigitem(
1338 coreconfigitem(
1339 b'hooks',
1339 b'hooks',
1340 b'.*',
1340 b'.*',
1341 default=dynamicdefault,
1341 default=dynamicdefault,
1342 generic=True,
1342 generic=True,
1343 )
1343 )
1344 coreconfigitem(
1344 coreconfigitem(
1345 b'hgweb-paths',
1345 b'hgweb-paths',
1346 b'.*',
1346 b'.*',
1347 default=list,
1347 default=list,
1348 generic=True,
1348 generic=True,
1349 )
1349 )
1350 coreconfigitem(
1350 coreconfigitem(
1351 b'hostfingerprints',
1351 b'hostfingerprints',
1352 b'.*',
1352 b'.*',
1353 default=list,
1353 default=list,
1354 generic=True,
1354 generic=True,
1355 )
1355 )
1356 coreconfigitem(
1356 coreconfigitem(
1357 b'hostsecurity',
1357 b'hostsecurity',
1358 b'ciphers',
1358 b'ciphers',
1359 default=None,
1359 default=None,
1360 )
1360 )
1361 coreconfigitem(
1361 coreconfigitem(
1362 b'hostsecurity',
1362 b'hostsecurity',
1363 b'minimumprotocol',
1363 b'minimumprotocol',
1364 default=dynamicdefault,
1364 default=dynamicdefault,
1365 )
1365 )
1366 coreconfigitem(
1366 coreconfigitem(
1367 b'hostsecurity',
1367 b'hostsecurity',
1368 b'.*:minimumprotocol$',
1368 b'.*:minimumprotocol$',
1369 default=dynamicdefault,
1369 default=dynamicdefault,
1370 generic=True,
1370 generic=True,
1371 )
1371 )
1372 coreconfigitem(
1372 coreconfigitem(
1373 b'hostsecurity',
1373 b'hostsecurity',
1374 b'.*:ciphers$',
1374 b'.*:ciphers$',
1375 default=dynamicdefault,
1375 default=dynamicdefault,
1376 generic=True,
1376 generic=True,
1377 )
1377 )
1378 coreconfigitem(
1378 coreconfigitem(
1379 b'hostsecurity',
1379 b'hostsecurity',
1380 b'.*:fingerprints$',
1380 b'.*:fingerprints$',
1381 default=list,
1381 default=list,
1382 generic=True,
1382 generic=True,
1383 )
1383 )
1384 coreconfigitem(
1384 coreconfigitem(
1385 b'hostsecurity',
1385 b'hostsecurity',
1386 b'.*:verifycertsfile$',
1386 b'.*:verifycertsfile$',
1387 default=None,
1387 default=None,
1388 generic=True,
1388 generic=True,
1389 )
1389 )
1390
1390
1391 coreconfigitem(
1391 coreconfigitem(
1392 b'http_proxy',
1392 b'http_proxy',
1393 b'always',
1393 b'always',
1394 default=False,
1394 default=False,
1395 )
1395 )
1396 coreconfigitem(
1396 coreconfigitem(
1397 b'http_proxy',
1397 b'http_proxy',
1398 b'host',
1398 b'host',
1399 default=None,
1399 default=None,
1400 )
1400 )
1401 coreconfigitem(
1401 coreconfigitem(
1402 b'http_proxy',
1402 b'http_proxy',
1403 b'no',
1403 b'no',
1404 default=list,
1404 default=list,
1405 )
1405 )
1406 coreconfigitem(
1406 coreconfigitem(
1407 b'http_proxy',
1407 b'http_proxy',
1408 b'passwd',
1408 b'passwd',
1409 default=None,
1409 default=None,
1410 )
1410 )
1411 coreconfigitem(
1411 coreconfigitem(
1412 b'http_proxy',
1412 b'http_proxy',
1413 b'user',
1413 b'user',
1414 default=None,
1414 default=None,
1415 )
1415 )
1416
1416
1417 coreconfigitem(
1417 coreconfigitem(
1418 b'http',
1418 b'http',
1419 b'timeout',
1419 b'timeout',
1420 default=None,
1420 default=None,
1421 )
1421 )
1422
1422
1423 coreconfigitem(
1423 coreconfigitem(
1424 b'logtoprocess',
1424 b'logtoprocess',
1425 b'commandexception',
1425 b'commandexception',
1426 default=None,
1426 default=None,
1427 )
1427 )
1428 coreconfigitem(
1428 coreconfigitem(
1429 b'logtoprocess',
1429 b'logtoprocess',
1430 b'commandfinish',
1430 b'commandfinish',
1431 default=None,
1431 default=None,
1432 )
1432 )
1433 coreconfigitem(
1433 coreconfigitem(
1434 b'logtoprocess',
1434 b'logtoprocess',
1435 b'command',
1435 b'command',
1436 default=None,
1436 default=None,
1437 )
1437 )
1438 coreconfigitem(
1438 coreconfigitem(
1439 b'logtoprocess',
1439 b'logtoprocess',
1440 b'develwarn',
1440 b'develwarn',
1441 default=None,
1441 default=None,
1442 )
1442 )
1443 coreconfigitem(
1443 coreconfigitem(
1444 b'logtoprocess',
1444 b'logtoprocess',
1445 b'uiblocked',
1445 b'uiblocked',
1446 default=None,
1446 default=None,
1447 )
1447 )
1448 coreconfigitem(
1448 coreconfigitem(
1449 b'merge',
1449 b'merge',
1450 b'checkunknown',
1450 b'checkunknown',
1451 default=b'abort',
1451 default=b'abort',
1452 )
1452 )
1453 coreconfigitem(
1453 coreconfigitem(
1454 b'merge',
1454 b'merge',
1455 b'checkignored',
1455 b'checkignored',
1456 default=b'abort',
1456 default=b'abort',
1457 )
1457 )
1458 coreconfigitem(
1458 coreconfigitem(
1459 b'experimental',
1459 b'experimental',
1460 b'merge.checkpathconflicts',
1460 b'merge.checkpathconflicts',
1461 default=False,
1461 default=False,
1462 )
1462 )
1463 coreconfigitem(
1463 coreconfigitem(
1464 b'merge',
1464 b'merge',
1465 b'followcopies',
1465 b'followcopies',
1466 default=True,
1466 default=True,
1467 )
1467 )
1468 coreconfigitem(
1468 coreconfigitem(
1469 b'merge',
1469 b'merge',
1470 b'on-failure',
1470 b'on-failure',
1471 default=b'continue',
1471 default=b'continue',
1472 )
1472 )
1473 coreconfigitem(
1473 coreconfigitem(
1474 b'merge',
1474 b'merge',
1475 b'preferancestor',
1475 b'preferancestor',
1476 default=lambda: [b'*'],
1476 default=lambda: [b'*'],
1477 experimental=True,
1477 experimental=True,
1478 )
1478 )
1479 coreconfigitem(
1479 coreconfigitem(
1480 b'merge',
1480 b'merge',
1481 b'strict-capability-check',
1481 b'strict-capability-check',
1482 default=False,
1482 default=False,
1483 )
1483 )
1484 coreconfigitem(
1484 coreconfigitem(
1485 b'merge-tools',
1485 b'merge-tools',
1486 b'.*',
1486 b'.*',
1487 default=None,
1487 default=None,
1488 generic=True,
1488 generic=True,
1489 )
1489 )
1490 coreconfigitem(
1490 coreconfigitem(
1491 b'merge-tools',
1491 b'merge-tools',
1492 br'.*\.args$',
1492 br'.*\.args$',
1493 default=b"$local $base $other",
1493 default=b"$local $base $other",
1494 generic=True,
1494 generic=True,
1495 priority=-1,
1495 priority=-1,
1496 )
1496 )
1497 coreconfigitem(
1497 coreconfigitem(
1498 b'merge-tools',
1498 b'merge-tools',
1499 br'.*\.binary$',
1499 br'.*\.binary$',
1500 default=False,
1500 default=False,
1501 generic=True,
1501 generic=True,
1502 priority=-1,
1502 priority=-1,
1503 )
1503 )
1504 coreconfigitem(
1504 coreconfigitem(
1505 b'merge-tools',
1505 b'merge-tools',
1506 br'.*\.check$',
1506 br'.*\.check$',
1507 default=list,
1507 default=list,
1508 generic=True,
1508 generic=True,
1509 priority=-1,
1509 priority=-1,
1510 )
1510 )
1511 coreconfigitem(
1511 coreconfigitem(
1512 b'merge-tools',
1512 b'merge-tools',
1513 br'.*\.checkchanged$',
1513 br'.*\.checkchanged$',
1514 default=False,
1514 default=False,
1515 generic=True,
1515 generic=True,
1516 priority=-1,
1516 priority=-1,
1517 )
1517 )
1518 coreconfigitem(
1518 coreconfigitem(
1519 b'merge-tools',
1519 b'merge-tools',
1520 br'.*\.executable$',
1520 br'.*\.executable$',
1521 default=dynamicdefault,
1521 default=dynamicdefault,
1522 generic=True,
1522 generic=True,
1523 priority=-1,
1523 priority=-1,
1524 )
1524 )
1525 coreconfigitem(
1525 coreconfigitem(
1526 b'merge-tools',
1526 b'merge-tools',
1527 br'.*\.fixeol$',
1527 br'.*\.fixeol$',
1528 default=False,
1528 default=False,
1529 generic=True,
1529 generic=True,
1530 priority=-1,
1530 priority=-1,
1531 )
1531 )
1532 coreconfigitem(
1532 coreconfigitem(
1533 b'merge-tools',
1533 b'merge-tools',
1534 br'.*\.gui$',
1534 br'.*\.gui$',
1535 default=False,
1535 default=False,
1536 generic=True,
1536 generic=True,
1537 priority=-1,
1537 priority=-1,
1538 )
1538 )
1539 coreconfigitem(
1539 coreconfigitem(
1540 b'merge-tools',
1540 b'merge-tools',
1541 br'.*\.mergemarkers$',
1541 br'.*\.mergemarkers$',
1542 default=b'basic',
1542 default=b'basic',
1543 generic=True,
1543 generic=True,
1544 priority=-1,
1544 priority=-1,
1545 )
1545 )
1546 coreconfigitem(
1546 coreconfigitem(
1547 b'merge-tools',
1547 b'merge-tools',
1548 br'.*\.mergemarkertemplate$',
1548 br'.*\.mergemarkertemplate$',
1549 default=dynamicdefault, # take from command-templates.mergemarker
1549 default=dynamicdefault, # take from command-templates.mergemarker
1550 generic=True,
1550 generic=True,
1551 priority=-1,
1551 priority=-1,
1552 )
1552 )
1553 coreconfigitem(
1553 coreconfigitem(
1554 b'merge-tools',
1554 b'merge-tools',
1555 br'.*\.priority$',
1555 br'.*\.priority$',
1556 default=0,
1556 default=0,
1557 generic=True,
1557 generic=True,
1558 priority=-1,
1558 priority=-1,
1559 )
1559 )
1560 coreconfigitem(
1560 coreconfigitem(
1561 b'merge-tools',
1561 b'merge-tools',
1562 br'.*\.premerge$',
1562 br'.*\.premerge$',
1563 default=dynamicdefault,
1563 default=dynamicdefault,
1564 generic=True,
1564 generic=True,
1565 priority=-1,
1565 priority=-1,
1566 )
1566 )
1567 coreconfigitem(
1567 coreconfigitem(
1568 b'merge-tools',
1568 b'merge-tools',
1569 br'.*\.symlink$',
1569 br'.*\.symlink$',
1570 default=False,
1570 default=False,
1571 generic=True,
1571 generic=True,
1572 priority=-1,
1572 priority=-1,
1573 )
1573 )
1574 coreconfigitem(
1574 coreconfigitem(
1575 b'pager',
1575 b'pager',
1576 b'attend-.*',
1576 b'attend-.*',
1577 default=dynamicdefault,
1577 default=dynamicdefault,
1578 generic=True,
1578 generic=True,
1579 )
1579 )
1580 coreconfigitem(
1580 coreconfigitem(
1581 b'pager',
1581 b'pager',
1582 b'ignore',
1582 b'ignore',
1583 default=list,
1583 default=list,
1584 )
1584 )
1585 coreconfigitem(
1585 coreconfigitem(
1586 b'pager',
1586 b'pager',
1587 b'pager',
1587 b'pager',
1588 default=dynamicdefault,
1588 default=dynamicdefault,
1589 )
1589 )
1590 coreconfigitem(
1590 coreconfigitem(
1591 b'patch',
1591 b'patch',
1592 b'eol',
1592 b'eol',
1593 default=b'strict',
1593 default=b'strict',
1594 )
1594 )
1595 coreconfigitem(
1595 coreconfigitem(
1596 b'patch',
1596 b'patch',
1597 b'fuzz',
1597 b'fuzz',
1598 default=2,
1598 default=2,
1599 )
1599 )
1600 coreconfigitem(
1600 coreconfigitem(
1601 b'paths',
1601 b'paths',
1602 b'default',
1602 b'default',
1603 default=None,
1603 default=None,
1604 )
1604 )
1605 coreconfigitem(
1605 coreconfigitem(
1606 b'paths',
1606 b'paths',
1607 b'default-push',
1607 b'default-push',
1608 default=None,
1608 default=None,
1609 )
1609 )
1610 coreconfigitem(
1610 coreconfigitem(
1611 b'paths',
1611 b'paths',
1612 b'.*',
1612 b'.*',
1613 default=None,
1613 default=None,
1614 generic=True,
1614 generic=True,
1615 )
1615 )
1616 coreconfigitem(
1616 coreconfigitem(
1617 b'phases',
1617 b'phases',
1618 b'checksubrepos',
1618 b'checksubrepos',
1619 default=b'follow',
1619 default=b'follow',
1620 )
1620 )
1621 coreconfigitem(
1621 coreconfigitem(
1622 b'phases',
1622 b'phases',
1623 b'new-commit',
1623 b'new-commit',
1624 default=b'draft',
1624 default=b'draft',
1625 )
1625 )
1626 coreconfigitem(
1626 coreconfigitem(
1627 b'phases',
1627 b'phases',
1628 b'publish',
1628 b'publish',
1629 default=True,
1629 default=True,
1630 )
1630 )
1631 coreconfigitem(
1631 coreconfigitem(
1632 b'profiling',
1632 b'profiling',
1633 b'enabled',
1633 b'enabled',
1634 default=False,
1634 default=False,
1635 )
1635 )
1636 coreconfigitem(
1636 coreconfigitem(
1637 b'profiling',
1637 b'profiling',
1638 b'format',
1638 b'format',
1639 default=b'text',
1639 default=b'text',
1640 )
1640 )
1641 coreconfigitem(
1641 coreconfigitem(
1642 b'profiling',
1642 b'profiling',
1643 b'freq',
1643 b'freq',
1644 default=1000,
1644 default=1000,
1645 )
1645 )
1646 coreconfigitem(
1646 coreconfigitem(
1647 b'profiling',
1647 b'profiling',
1648 b'limit',
1648 b'limit',
1649 default=30,
1649 default=30,
1650 )
1650 )
1651 coreconfigitem(
1651 coreconfigitem(
1652 b'profiling',
1652 b'profiling',
1653 b'nested',
1653 b'nested',
1654 default=0,
1654 default=0,
1655 )
1655 )
1656 coreconfigitem(
1656 coreconfigitem(
1657 b'profiling',
1657 b'profiling',
1658 b'output',
1658 b'output',
1659 default=None,
1659 default=None,
1660 )
1660 )
1661 coreconfigitem(
1661 coreconfigitem(
1662 b'profiling',
1662 b'profiling',
1663 b'showmax',
1663 b'showmax',
1664 default=0.999,
1664 default=0.999,
1665 )
1665 )
1666 coreconfigitem(
1666 coreconfigitem(
1667 b'profiling',
1667 b'profiling',
1668 b'showmin',
1668 b'showmin',
1669 default=dynamicdefault,
1669 default=dynamicdefault,
1670 )
1670 )
1671 coreconfigitem(
1671 coreconfigitem(
1672 b'profiling',
1672 b'profiling',
1673 b'showtime',
1673 b'showtime',
1674 default=True,
1674 default=True,
1675 )
1675 )
1676 coreconfigitem(
1676 coreconfigitem(
1677 b'profiling',
1677 b'profiling',
1678 b'sort',
1678 b'sort',
1679 default=b'inlinetime',
1679 default=b'inlinetime',
1680 )
1680 )
1681 coreconfigitem(
1681 coreconfigitem(
1682 b'profiling',
1682 b'profiling',
1683 b'statformat',
1683 b'statformat',
1684 default=b'hotpath',
1684 default=b'hotpath',
1685 )
1685 )
1686 coreconfigitem(
1686 coreconfigitem(
1687 b'profiling',
1687 b'profiling',
1688 b'time-track',
1688 b'time-track',
1689 default=dynamicdefault,
1689 default=dynamicdefault,
1690 )
1690 )
1691 coreconfigitem(
1691 coreconfigitem(
1692 b'profiling',
1692 b'profiling',
1693 b'type',
1693 b'type',
1694 default=b'stat',
1694 default=b'stat',
1695 )
1695 )
1696 coreconfigitem(
1696 coreconfigitem(
1697 b'progress',
1697 b'progress',
1698 b'assume-tty',
1698 b'assume-tty',
1699 default=False,
1699 default=False,
1700 )
1700 )
1701 coreconfigitem(
1701 coreconfigitem(
1702 b'progress',
1702 b'progress',
1703 b'changedelay',
1703 b'changedelay',
1704 default=1,
1704 default=1,
1705 )
1705 )
1706 coreconfigitem(
1706 coreconfigitem(
1707 b'progress',
1707 b'progress',
1708 b'clear-complete',
1708 b'clear-complete',
1709 default=True,
1709 default=True,
1710 )
1710 )
1711 coreconfigitem(
1711 coreconfigitem(
1712 b'progress',
1712 b'progress',
1713 b'debug',
1713 b'debug',
1714 default=False,
1714 default=False,
1715 )
1715 )
1716 coreconfigitem(
1716 coreconfigitem(
1717 b'progress',
1717 b'progress',
1718 b'delay',
1718 b'delay',
1719 default=3,
1719 default=3,
1720 )
1720 )
1721 coreconfigitem(
1721 coreconfigitem(
1722 b'progress',
1722 b'progress',
1723 b'disable',
1723 b'disable',
1724 default=False,
1724 default=False,
1725 )
1725 )
1726 coreconfigitem(
1726 coreconfigitem(
1727 b'progress',
1727 b'progress',
1728 b'estimateinterval',
1728 b'estimateinterval',
1729 default=60.0,
1729 default=60.0,
1730 )
1730 )
1731 coreconfigitem(
1731 coreconfigitem(
1732 b'progress',
1732 b'progress',
1733 b'format',
1733 b'format',
1734 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1734 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1735 )
1735 )
1736 coreconfigitem(
1736 coreconfigitem(
1737 b'progress',
1737 b'progress',
1738 b'refresh',
1738 b'refresh',
1739 default=0.1,
1739 default=0.1,
1740 )
1740 )
1741 coreconfigitem(
1741 coreconfigitem(
1742 b'progress',
1742 b'progress',
1743 b'width',
1743 b'width',
1744 default=dynamicdefault,
1744 default=dynamicdefault,
1745 )
1745 )
1746 coreconfigitem(
1746 coreconfigitem(
1747 b'pull',
1747 b'pull',
1748 b'confirm',
1748 b'confirm',
1749 default=False,
1749 default=False,
1750 )
1750 )
1751 coreconfigitem(
1751 coreconfigitem(
1752 b'push',
1752 b'push',
1753 b'pushvars.server',
1753 b'pushvars.server',
1754 default=False,
1754 default=False,
1755 )
1755 )
1756 coreconfigitem(
1756 coreconfigitem(
1757 b'rewrite',
1757 b'rewrite',
1758 b'backup-bundle',
1758 b'backup-bundle',
1759 default=True,
1759 default=True,
1760 alias=[(b'ui', b'history-editing-backup')],
1760 alias=[(b'ui', b'history-editing-backup')],
1761 )
1761 )
1762 coreconfigitem(
1762 coreconfigitem(
1763 b'rewrite',
1763 b'rewrite',
1764 b'update-timestamp',
1764 b'update-timestamp',
1765 default=False,
1765 default=False,
1766 )
1766 )
1767 coreconfigitem(
1767 coreconfigitem(
1768 b'rewrite',
1768 b'rewrite',
1769 b'empty-successor',
1769 b'empty-successor',
1770 default=b'skip',
1770 default=b'skip',
1771 experimental=True,
1771 experimental=True,
1772 )
1772 )
1773 coreconfigitem(
1773 coreconfigitem(
1774 b'storage',
1774 b'storage',
1775 b'new-repo-backend',
1775 b'new-repo-backend',
1776 default=b'revlogv1',
1776 default=b'revlogv1',
1777 experimental=True,
1777 experimental=True,
1778 )
1778 )
1779 coreconfigitem(
1779 coreconfigitem(
1780 b'storage',
1780 b'storage',
1781 b'revlog.optimize-delta-parent-choice',
1781 b'revlog.optimize-delta-parent-choice',
1782 default=True,
1782 default=True,
1783 alias=[(b'format', b'aggressivemergedeltas')],
1783 alias=[(b'format', b'aggressivemergedeltas')],
1784 )
1784 )
1785 # experimental as long as rust is experimental (or a C version is implemented)
1785 # experimental as long as rust is experimental (or a C version is implemented)
1786 coreconfigitem(
1786 coreconfigitem(
1787 b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
1787 b'storage',
1788 b'revlog.persistent-nodemap.mmap',
1789 default=True,
1790 experimental=True,
1788 )
1791 )
1789 # experimental as long as format.use-persistent-nodemap is.
1792 # experimental as long as format.use-persistent-nodemap is.
1790 coreconfigitem(
1793 coreconfigitem(
1791 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1794 b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
1792 )
1795 )
1793 coreconfigitem(
1796 coreconfigitem(
1794 b'storage',
1797 b'storage',
1795 b'revlog.reuse-external-delta',
1798 b'revlog.reuse-external-delta',
1796 default=True,
1799 default=True,
1797 )
1800 )
1798 coreconfigitem(
1801 coreconfigitem(
1799 b'storage',
1802 b'storage',
1800 b'revlog.reuse-external-delta-parent',
1803 b'revlog.reuse-external-delta-parent',
1801 default=None,
1804 default=None,
1802 )
1805 )
1803 coreconfigitem(
1806 coreconfigitem(
1804 b'storage',
1807 b'storage',
1805 b'revlog.zlib.level',
1808 b'revlog.zlib.level',
1806 default=None,
1809 default=None,
1807 )
1810 )
1808 coreconfigitem(
1811 coreconfigitem(
1809 b'storage',
1812 b'storage',
1810 b'revlog.zstd.level',
1813 b'revlog.zstd.level',
1811 default=None,
1814 default=None,
1812 )
1815 )
1813 coreconfigitem(
1816 coreconfigitem(
1814 b'server',
1817 b'server',
1815 b'bookmarks-pushkey-compat',
1818 b'bookmarks-pushkey-compat',
1816 default=True,
1819 default=True,
1817 )
1820 )
1818 coreconfigitem(
1821 coreconfigitem(
1819 b'server',
1822 b'server',
1820 b'bundle1',
1823 b'bundle1',
1821 default=True,
1824 default=True,
1822 )
1825 )
1823 coreconfigitem(
1826 coreconfigitem(
1824 b'server',
1827 b'server',
1825 b'bundle1gd',
1828 b'bundle1gd',
1826 default=None,
1829 default=None,
1827 )
1830 )
1828 coreconfigitem(
1831 coreconfigitem(
1829 b'server',
1832 b'server',
1830 b'bundle1.pull',
1833 b'bundle1.pull',
1831 default=None,
1834 default=None,
1832 )
1835 )
1833 coreconfigitem(
1836 coreconfigitem(
1834 b'server',
1837 b'server',
1835 b'bundle1gd.pull',
1838 b'bundle1gd.pull',
1836 default=None,
1839 default=None,
1837 )
1840 )
1838 coreconfigitem(
1841 coreconfigitem(
1839 b'server',
1842 b'server',
1840 b'bundle1.push',
1843 b'bundle1.push',
1841 default=None,
1844 default=None,
1842 )
1845 )
1843 coreconfigitem(
1846 coreconfigitem(
1844 b'server',
1847 b'server',
1845 b'bundle1gd.push',
1848 b'bundle1gd.push',
1846 default=None,
1849 default=None,
1847 )
1850 )
1848 coreconfigitem(
1851 coreconfigitem(
1849 b'server',
1852 b'server',
1850 b'bundle2.stream',
1853 b'bundle2.stream',
1851 default=True,
1854 default=True,
1852 alias=[(b'experimental', b'bundle2.stream')],
1855 alias=[(b'experimental', b'bundle2.stream')],
1853 )
1856 )
1854 coreconfigitem(
1857 coreconfigitem(
1855 b'server',
1858 b'server',
1856 b'compressionengines',
1859 b'compressionengines',
1857 default=list,
1860 default=list,
1858 )
1861 )
1859 coreconfigitem(
1862 coreconfigitem(
1860 b'server',
1863 b'server',
1861 b'concurrent-push-mode',
1864 b'concurrent-push-mode',
1862 default=b'check-related',
1865 default=b'check-related',
1863 )
1866 )
1864 coreconfigitem(
1867 coreconfigitem(
1865 b'server',
1868 b'server',
1866 b'disablefullbundle',
1869 b'disablefullbundle',
1867 default=False,
1870 default=False,
1868 )
1871 )
1869 coreconfigitem(
1872 coreconfigitem(
1870 b'server',
1873 b'server',
1871 b'maxhttpheaderlen',
1874 b'maxhttpheaderlen',
1872 default=1024,
1875 default=1024,
1873 )
1876 )
1874 coreconfigitem(
1877 coreconfigitem(
1875 b'server',
1878 b'server',
1876 b'pullbundle',
1879 b'pullbundle',
1877 default=False,
1880 default=False,
1878 )
1881 )
1879 coreconfigitem(
1882 coreconfigitem(
1880 b'server',
1883 b'server',
1881 b'preferuncompressed',
1884 b'preferuncompressed',
1882 default=False,
1885 default=False,
1883 )
1886 )
1884 coreconfigitem(
1887 coreconfigitem(
1885 b'server',
1888 b'server',
1886 b'streamunbundle',
1889 b'streamunbundle',
1887 default=False,
1890 default=False,
1888 )
1891 )
1889 coreconfigitem(
1892 coreconfigitem(
1890 b'server',
1893 b'server',
1891 b'uncompressed',
1894 b'uncompressed',
1892 default=True,
1895 default=True,
1893 )
1896 )
1894 coreconfigitem(
1897 coreconfigitem(
1895 b'server',
1898 b'server',
1896 b'uncompressedallowsecret',
1899 b'uncompressedallowsecret',
1897 default=False,
1900 default=False,
1898 )
1901 )
1899 coreconfigitem(
1902 coreconfigitem(
1900 b'server',
1903 b'server',
1901 b'view',
1904 b'view',
1902 default=b'served',
1905 default=b'served',
1903 )
1906 )
1904 coreconfigitem(
1907 coreconfigitem(
1905 b'server',
1908 b'server',
1906 b'validate',
1909 b'validate',
1907 default=False,
1910 default=False,
1908 )
1911 )
1909 coreconfigitem(
1912 coreconfigitem(
1910 b'server',
1913 b'server',
1911 b'zliblevel',
1914 b'zliblevel',
1912 default=-1,
1915 default=-1,
1913 )
1916 )
1914 coreconfigitem(
1917 coreconfigitem(
1915 b'server',
1918 b'server',
1916 b'zstdlevel',
1919 b'zstdlevel',
1917 default=3,
1920 default=3,
1918 )
1921 )
1919 coreconfigitem(
1922 coreconfigitem(
1920 b'share',
1923 b'share',
1921 b'pool',
1924 b'pool',
1922 default=None,
1925 default=None,
1923 )
1926 )
1924 coreconfigitem(
1927 coreconfigitem(
1925 b'share',
1928 b'share',
1926 b'poolnaming',
1929 b'poolnaming',
1927 default=b'identity',
1930 default=b'identity',
1928 )
1931 )
1929 coreconfigitem(
1932 coreconfigitem(
1930 b'shelve',
1933 b'shelve',
1931 b'maxbackups',
1934 b'maxbackups',
1932 default=10,
1935 default=10,
1933 )
1936 )
1934 coreconfigitem(
1937 coreconfigitem(
1935 b'smtp',
1938 b'smtp',
1936 b'host',
1939 b'host',
1937 default=None,
1940 default=None,
1938 )
1941 )
1939 coreconfigitem(
1942 coreconfigitem(
1940 b'smtp',
1943 b'smtp',
1941 b'local_hostname',
1944 b'local_hostname',
1942 default=None,
1945 default=None,
1943 )
1946 )
1944 coreconfigitem(
1947 coreconfigitem(
1945 b'smtp',
1948 b'smtp',
1946 b'password',
1949 b'password',
1947 default=None,
1950 default=None,
1948 )
1951 )
1949 coreconfigitem(
1952 coreconfigitem(
1950 b'smtp',
1953 b'smtp',
1951 b'port',
1954 b'port',
1952 default=dynamicdefault,
1955 default=dynamicdefault,
1953 )
1956 )
1954 coreconfigitem(
1957 coreconfigitem(
1955 b'smtp',
1958 b'smtp',
1956 b'tls',
1959 b'tls',
1957 default=b'none',
1960 default=b'none',
1958 )
1961 )
1959 coreconfigitem(
1962 coreconfigitem(
1960 b'smtp',
1963 b'smtp',
1961 b'username',
1964 b'username',
1962 default=None,
1965 default=None,
1963 )
1966 )
1964 coreconfigitem(
1967 coreconfigitem(
1965 b'sparse',
1968 b'sparse',
1966 b'missingwarning',
1969 b'missingwarning',
1967 default=True,
1970 default=True,
1968 experimental=True,
1971 experimental=True,
1969 )
1972 )
1970 coreconfigitem(
1973 coreconfigitem(
1971 b'subrepos',
1974 b'subrepos',
1972 b'allowed',
1975 b'allowed',
1973 default=dynamicdefault, # to make backporting simpler
1976 default=dynamicdefault, # to make backporting simpler
1974 )
1977 )
1975 coreconfigitem(
1978 coreconfigitem(
1976 b'subrepos',
1979 b'subrepos',
1977 b'hg:allowed',
1980 b'hg:allowed',
1978 default=dynamicdefault,
1981 default=dynamicdefault,
1979 )
1982 )
1980 coreconfigitem(
1983 coreconfigitem(
1981 b'subrepos',
1984 b'subrepos',
1982 b'git:allowed',
1985 b'git:allowed',
1983 default=dynamicdefault,
1986 default=dynamicdefault,
1984 )
1987 )
1985 coreconfigitem(
1988 coreconfigitem(
1986 b'subrepos',
1989 b'subrepos',
1987 b'svn:allowed',
1990 b'svn:allowed',
1988 default=dynamicdefault,
1991 default=dynamicdefault,
1989 )
1992 )
1990 coreconfigitem(
1993 coreconfigitem(
1991 b'templates',
1994 b'templates',
1992 b'.*',
1995 b'.*',
1993 default=None,
1996 default=None,
1994 generic=True,
1997 generic=True,
1995 )
1998 )
1996 coreconfigitem(
1999 coreconfigitem(
1997 b'templateconfig',
2000 b'templateconfig',
1998 b'.*',
2001 b'.*',
1999 default=dynamicdefault,
2002 default=dynamicdefault,
2000 generic=True,
2003 generic=True,
2001 )
2004 )
2002 coreconfigitem(
2005 coreconfigitem(
2003 b'trusted',
2006 b'trusted',
2004 b'groups',
2007 b'groups',
2005 default=list,
2008 default=list,
2006 )
2009 )
2007 coreconfigitem(
2010 coreconfigitem(
2008 b'trusted',
2011 b'trusted',
2009 b'users',
2012 b'users',
2010 default=list,
2013 default=list,
2011 )
2014 )
2012 coreconfigitem(
2015 coreconfigitem(
2013 b'ui',
2016 b'ui',
2014 b'_usedassubrepo',
2017 b'_usedassubrepo',
2015 default=False,
2018 default=False,
2016 )
2019 )
2017 coreconfigitem(
2020 coreconfigitem(
2018 b'ui',
2021 b'ui',
2019 b'allowemptycommit',
2022 b'allowemptycommit',
2020 default=False,
2023 default=False,
2021 )
2024 )
2022 coreconfigitem(
2025 coreconfigitem(
2023 b'ui',
2026 b'ui',
2024 b'archivemeta',
2027 b'archivemeta',
2025 default=True,
2028 default=True,
2026 )
2029 )
2027 coreconfigitem(
2030 coreconfigitem(
2028 b'ui',
2031 b'ui',
2029 b'askusername',
2032 b'askusername',
2030 default=False,
2033 default=False,
2031 )
2034 )
2032 coreconfigitem(
2035 coreconfigitem(
2033 b'ui',
2036 b'ui',
2034 b'available-memory',
2037 b'available-memory',
2035 default=None,
2038 default=None,
2036 )
2039 )
2037
2040
2038 coreconfigitem(
2041 coreconfigitem(
2039 b'ui',
2042 b'ui',
2040 b'clonebundlefallback',
2043 b'clonebundlefallback',
2041 default=False,
2044 default=False,
2042 )
2045 )
2043 coreconfigitem(
2046 coreconfigitem(
2044 b'ui',
2047 b'ui',
2045 b'clonebundleprefers',
2048 b'clonebundleprefers',
2046 default=list,
2049 default=list,
2047 )
2050 )
2048 coreconfigitem(
2051 coreconfigitem(
2049 b'ui',
2052 b'ui',
2050 b'clonebundles',
2053 b'clonebundles',
2051 default=True,
2054 default=True,
2052 )
2055 )
2053 coreconfigitem(
2056 coreconfigitem(
2054 b'ui',
2057 b'ui',
2055 b'color',
2058 b'color',
2056 default=b'auto',
2059 default=b'auto',
2057 )
2060 )
2058 coreconfigitem(
2061 coreconfigitem(
2059 b'ui',
2062 b'ui',
2060 b'commitsubrepos',
2063 b'commitsubrepos',
2061 default=False,
2064 default=False,
2062 )
2065 )
2063 coreconfigitem(
2066 coreconfigitem(
2064 b'ui',
2067 b'ui',
2065 b'debug',
2068 b'debug',
2066 default=False,
2069 default=False,
2067 )
2070 )
2068 coreconfigitem(
2071 coreconfigitem(
2069 b'ui',
2072 b'ui',
2070 b'debugger',
2073 b'debugger',
2071 default=None,
2074 default=None,
2072 )
2075 )
2073 coreconfigitem(
2076 coreconfigitem(
2074 b'ui',
2077 b'ui',
2075 b'editor',
2078 b'editor',
2076 default=dynamicdefault,
2079 default=dynamicdefault,
2077 )
2080 )
2078 coreconfigitem(
2081 coreconfigitem(
2079 b'ui',
2082 b'ui',
2080 b'detailed-exit-code',
2083 b'detailed-exit-code',
2081 default=False,
2084 default=False,
2082 experimental=True,
2085 experimental=True,
2083 )
2086 )
2084 coreconfigitem(
2087 coreconfigitem(
2085 b'ui',
2088 b'ui',
2086 b'fallbackencoding',
2089 b'fallbackencoding',
2087 default=None,
2090 default=None,
2088 )
2091 )
2089 coreconfigitem(
2092 coreconfigitem(
2090 b'ui',
2093 b'ui',
2091 b'forcecwd',
2094 b'forcecwd',
2092 default=None,
2095 default=None,
2093 )
2096 )
2094 coreconfigitem(
2097 coreconfigitem(
2095 b'ui',
2098 b'ui',
2096 b'forcemerge',
2099 b'forcemerge',
2097 default=None,
2100 default=None,
2098 )
2101 )
2099 coreconfigitem(
2102 coreconfigitem(
2100 b'ui',
2103 b'ui',
2101 b'formatdebug',
2104 b'formatdebug',
2102 default=False,
2105 default=False,
2103 )
2106 )
2104 coreconfigitem(
2107 coreconfigitem(
2105 b'ui',
2108 b'ui',
2106 b'formatjson',
2109 b'formatjson',
2107 default=False,
2110 default=False,
2108 )
2111 )
2109 coreconfigitem(
2112 coreconfigitem(
2110 b'ui',
2113 b'ui',
2111 b'formatted',
2114 b'formatted',
2112 default=None,
2115 default=None,
2113 )
2116 )
2114 coreconfigitem(
2117 coreconfigitem(
2115 b'ui',
2118 b'ui',
2116 b'interactive',
2119 b'interactive',
2117 default=None,
2120 default=None,
2118 )
2121 )
2119 coreconfigitem(
2122 coreconfigitem(
2120 b'ui',
2123 b'ui',
2121 b'interface',
2124 b'interface',
2122 default=None,
2125 default=None,
2123 )
2126 )
2124 coreconfigitem(
2127 coreconfigitem(
2125 b'ui',
2128 b'ui',
2126 b'interface.chunkselector',
2129 b'interface.chunkselector',
2127 default=None,
2130 default=None,
2128 )
2131 )
2129 coreconfigitem(
2132 coreconfigitem(
2130 b'ui',
2133 b'ui',
2131 b'large-file-limit',
2134 b'large-file-limit',
2132 default=10000000,
2135 default=10000000,
2133 )
2136 )
2134 coreconfigitem(
2137 coreconfigitem(
2135 b'ui',
2138 b'ui',
2136 b'logblockedtimes',
2139 b'logblockedtimes',
2137 default=False,
2140 default=False,
2138 )
2141 )
2139 coreconfigitem(
2142 coreconfigitem(
2140 b'ui',
2143 b'ui',
2141 b'merge',
2144 b'merge',
2142 default=None,
2145 default=None,
2143 )
2146 )
2144 coreconfigitem(
2147 coreconfigitem(
2145 b'ui',
2148 b'ui',
2146 b'mergemarkers',
2149 b'mergemarkers',
2147 default=b'basic',
2150 default=b'basic',
2148 )
2151 )
2149 coreconfigitem(
2152 coreconfigitem(
2150 b'ui',
2153 b'ui',
2151 b'message-output',
2154 b'message-output',
2152 default=b'stdio',
2155 default=b'stdio',
2153 )
2156 )
2154 coreconfigitem(
2157 coreconfigitem(
2155 b'ui',
2158 b'ui',
2156 b'nontty',
2159 b'nontty',
2157 default=False,
2160 default=False,
2158 )
2161 )
2159 coreconfigitem(
2162 coreconfigitem(
2160 b'ui',
2163 b'ui',
2161 b'origbackuppath',
2164 b'origbackuppath',
2162 default=None,
2165 default=None,
2163 )
2166 )
2164 coreconfigitem(
2167 coreconfigitem(
2165 b'ui',
2168 b'ui',
2166 b'paginate',
2169 b'paginate',
2167 default=True,
2170 default=True,
2168 )
2171 )
2169 coreconfigitem(
2172 coreconfigitem(
2170 b'ui',
2173 b'ui',
2171 b'patch',
2174 b'patch',
2172 default=None,
2175 default=None,
2173 )
2176 )
2174 coreconfigitem(
2177 coreconfigitem(
2175 b'ui',
2178 b'ui',
2176 b'portablefilenames',
2179 b'portablefilenames',
2177 default=b'warn',
2180 default=b'warn',
2178 )
2181 )
2179 coreconfigitem(
2182 coreconfigitem(
2180 b'ui',
2183 b'ui',
2181 b'promptecho',
2184 b'promptecho',
2182 default=False,
2185 default=False,
2183 )
2186 )
2184 coreconfigitem(
2187 coreconfigitem(
2185 b'ui',
2188 b'ui',
2186 b'quiet',
2189 b'quiet',
2187 default=False,
2190 default=False,
2188 )
2191 )
2189 coreconfigitem(
2192 coreconfigitem(
2190 b'ui',
2193 b'ui',
2191 b'quietbookmarkmove',
2194 b'quietbookmarkmove',
2192 default=False,
2195 default=False,
2193 )
2196 )
2194 coreconfigitem(
2197 coreconfigitem(
2195 b'ui',
2198 b'ui',
2196 b'relative-paths',
2199 b'relative-paths',
2197 default=b'legacy',
2200 default=b'legacy',
2198 )
2201 )
2199 coreconfigitem(
2202 coreconfigitem(
2200 b'ui',
2203 b'ui',
2201 b'remotecmd',
2204 b'remotecmd',
2202 default=b'hg',
2205 default=b'hg',
2203 )
2206 )
2204 coreconfigitem(
2207 coreconfigitem(
2205 b'ui',
2208 b'ui',
2206 b'report_untrusted',
2209 b'report_untrusted',
2207 default=True,
2210 default=True,
2208 )
2211 )
2209 coreconfigitem(
2212 coreconfigitem(
2210 b'ui',
2213 b'ui',
2211 b'rollback',
2214 b'rollback',
2212 default=True,
2215 default=True,
2213 )
2216 )
2214 coreconfigitem(
2217 coreconfigitem(
2215 b'ui',
2218 b'ui',
2216 b'signal-safe-lock',
2219 b'signal-safe-lock',
2217 default=True,
2220 default=True,
2218 )
2221 )
2219 coreconfigitem(
2222 coreconfigitem(
2220 b'ui',
2223 b'ui',
2221 b'slash',
2224 b'slash',
2222 default=False,
2225 default=False,
2223 )
2226 )
2224 coreconfigitem(
2227 coreconfigitem(
2225 b'ui',
2228 b'ui',
2226 b'ssh',
2229 b'ssh',
2227 default=b'ssh',
2230 default=b'ssh',
2228 )
2231 )
2229 coreconfigitem(
2232 coreconfigitem(
2230 b'ui',
2233 b'ui',
2231 b'ssherrorhint',
2234 b'ssherrorhint',
2232 default=None,
2235 default=None,
2233 )
2236 )
2234 coreconfigitem(
2237 coreconfigitem(
2235 b'ui',
2238 b'ui',
2236 b'statuscopies',
2239 b'statuscopies',
2237 default=False,
2240 default=False,
2238 )
2241 )
2239 coreconfigitem(
2242 coreconfigitem(
2240 b'ui',
2243 b'ui',
2241 b'strict',
2244 b'strict',
2242 default=False,
2245 default=False,
2243 )
2246 )
2244 coreconfigitem(
2247 coreconfigitem(
2245 b'ui',
2248 b'ui',
2246 b'style',
2249 b'style',
2247 default=b'',
2250 default=b'',
2248 )
2251 )
2249 coreconfigitem(
2252 coreconfigitem(
2250 b'ui',
2253 b'ui',
2251 b'supportcontact',
2254 b'supportcontact',
2252 default=None,
2255 default=None,
2253 )
2256 )
2254 coreconfigitem(
2257 coreconfigitem(
2255 b'ui',
2258 b'ui',
2256 b'textwidth',
2259 b'textwidth',
2257 default=78,
2260 default=78,
2258 )
2261 )
2259 coreconfigitem(
2262 coreconfigitem(
2260 b'ui',
2263 b'ui',
2261 b'timeout',
2264 b'timeout',
2262 default=b'600',
2265 default=b'600',
2263 )
2266 )
2264 coreconfigitem(
2267 coreconfigitem(
2265 b'ui',
2268 b'ui',
2266 b'timeout.warn',
2269 b'timeout.warn',
2267 default=0,
2270 default=0,
2268 )
2271 )
2269 coreconfigitem(
2272 coreconfigitem(
2270 b'ui',
2273 b'ui',
2271 b'timestamp-output',
2274 b'timestamp-output',
2272 default=False,
2275 default=False,
2273 )
2276 )
2274 coreconfigitem(
2277 coreconfigitem(
2275 b'ui',
2278 b'ui',
2276 b'traceback',
2279 b'traceback',
2277 default=False,
2280 default=False,
2278 )
2281 )
2279 coreconfigitem(
2282 coreconfigitem(
2280 b'ui',
2283 b'ui',
2281 b'tweakdefaults',
2284 b'tweakdefaults',
2282 default=False,
2285 default=False,
2283 )
2286 )
2284 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2287 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2285 coreconfigitem(
2288 coreconfigitem(
2286 b'ui',
2289 b'ui',
2287 b'verbose',
2290 b'verbose',
2288 default=False,
2291 default=False,
2289 )
2292 )
2290 coreconfigitem(
2293 coreconfigitem(
2291 b'verify',
2294 b'verify',
2292 b'skipflags',
2295 b'skipflags',
2293 default=None,
2296 default=None,
2294 )
2297 )
2295 coreconfigitem(
2298 coreconfigitem(
2296 b'web',
2299 b'web',
2297 b'allowbz2',
2300 b'allowbz2',
2298 default=False,
2301 default=False,
2299 )
2302 )
2300 coreconfigitem(
2303 coreconfigitem(
2301 b'web',
2304 b'web',
2302 b'allowgz',
2305 b'allowgz',
2303 default=False,
2306 default=False,
2304 )
2307 )
2305 coreconfigitem(
2308 coreconfigitem(
2306 b'web',
2309 b'web',
2307 b'allow-pull',
2310 b'allow-pull',
2308 alias=[(b'web', b'allowpull')],
2311 alias=[(b'web', b'allowpull')],
2309 default=True,
2312 default=True,
2310 )
2313 )
2311 coreconfigitem(
2314 coreconfigitem(
2312 b'web',
2315 b'web',
2313 b'allow-push',
2316 b'allow-push',
2314 alias=[(b'web', b'allow_push')],
2317 alias=[(b'web', b'allow_push')],
2315 default=list,
2318 default=list,
2316 )
2319 )
2317 coreconfigitem(
2320 coreconfigitem(
2318 b'web',
2321 b'web',
2319 b'allowzip',
2322 b'allowzip',
2320 default=False,
2323 default=False,
2321 )
2324 )
2322 coreconfigitem(
2325 coreconfigitem(
2323 b'web',
2326 b'web',
2324 b'archivesubrepos',
2327 b'archivesubrepos',
2325 default=False,
2328 default=False,
2326 )
2329 )
2327 coreconfigitem(
2330 coreconfigitem(
2328 b'web',
2331 b'web',
2329 b'cache',
2332 b'cache',
2330 default=True,
2333 default=True,
2331 )
2334 )
2332 coreconfigitem(
2335 coreconfigitem(
2333 b'web',
2336 b'web',
2334 b'comparisoncontext',
2337 b'comparisoncontext',
2335 default=5,
2338 default=5,
2336 )
2339 )
2337 coreconfigitem(
2340 coreconfigitem(
2338 b'web',
2341 b'web',
2339 b'contact',
2342 b'contact',
2340 default=None,
2343 default=None,
2341 )
2344 )
2342 coreconfigitem(
2345 coreconfigitem(
2343 b'web',
2346 b'web',
2344 b'deny_push',
2347 b'deny_push',
2345 default=list,
2348 default=list,
2346 )
2349 )
2347 coreconfigitem(
2350 coreconfigitem(
2348 b'web',
2351 b'web',
2349 b'guessmime',
2352 b'guessmime',
2350 default=False,
2353 default=False,
2351 )
2354 )
2352 coreconfigitem(
2355 coreconfigitem(
2353 b'web',
2356 b'web',
2354 b'hidden',
2357 b'hidden',
2355 default=False,
2358 default=False,
2356 )
2359 )
2357 coreconfigitem(
2360 coreconfigitem(
2358 b'web',
2361 b'web',
2359 b'labels',
2362 b'labels',
2360 default=list,
2363 default=list,
2361 )
2364 )
2362 coreconfigitem(
2365 coreconfigitem(
2363 b'web',
2366 b'web',
2364 b'logoimg',
2367 b'logoimg',
2365 default=b'hglogo.png',
2368 default=b'hglogo.png',
2366 )
2369 )
2367 coreconfigitem(
2370 coreconfigitem(
2368 b'web',
2371 b'web',
2369 b'logourl',
2372 b'logourl',
2370 default=b'https://mercurial-scm.org/',
2373 default=b'https://mercurial-scm.org/',
2371 )
2374 )
2372 coreconfigitem(
2375 coreconfigitem(
2373 b'web',
2376 b'web',
2374 b'accesslog',
2377 b'accesslog',
2375 default=b'-',
2378 default=b'-',
2376 )
2379 )
2377 coreconfigitem(
2380 coreconfigitem(
2378 b'web',
2381 b'web',
2379 b'address',
2382 b'address',
2380 default=b'',
2383 default=b'',
2381 )
2384 )
2382 coreconfigitem(
2385 coreconfigitem(
2383 b'web',
2386 b'web',
2384 b'allow-archive',
2387 b'allow-archive',
2385 alias=[(b'web', b'allow_archive')],
2388 alias=[(b'web', b'allow_archive')],
2386 default=list,
2389 default=list,
2387 )
2390 )
2388 coreconfigitem(
2391 coreconfigitem(
2389 b'web',
2392 b'web',
2390 b'allow_read',
2393 b'allow_read',
2391 default=list,
2394 default=list,
2392 )
2395 )
2393 coreconfigitem(
2396 coreconfigitem(
2394 b'web',
2397 b'web',
2395 b'baseurl',
2398 b'baseurl',
2396 default=None,
2399 default=None,
2397 )
2400 )
2398 coreconfigitem(
2401 coreconfigitem(
2399 b'web',
2402 b'web',
2400 b'cacerts',
2403 b'cacerts',
2401 default=None,
2404 default=None,
2402 )
2405 )
2403 coreconfigitem(
2406 coreconfigitem(
2404 b'web',
2407 b'web',
2405 b'certificate',
2408 b'certificate',
2406 default=None,
2409 default=None,
2407 )
2410 )
2408 coreconfigitem(
2411 coreconfigitem(
2409 b'web',
2412 b'web',
2410 b'collapse',
2413 b'collapse',
2411 default=False,
2414 default=False,
2412 )
2415 )
2413 coreconfigitem(
2416 coreconfigitem(
2414 b'web',
2417 b'web',
2415 b'csp',
2418 b'csp',
2416 default=None,
2419 default=None,
2417 )
2420 )
2418 coreconfigitem(
2421 coreconfigitem(
2419 b'web',
2422 b'web',
2420 b'deny_read',
2423 b'deny_read',
2421 default=list,
2424 default=list,
2422 )
2425 )
2423 coreconfigitem(
2426 coreconfigitem(
2424 b'web',
2427 b'web',
2425 b'descend',
2428 b'descend',
2426 default=True,
2429 default=True,
2427 )
2430 )
2428 coreconfigitem(
2431 coreconfigitem(
2429 b'web',
2432 b'web',
2430 b'description',
2433 b'description',
2431 default=b"",
2434 default=b"",
2432 )
2435 )
2433 coreconfigitem(
2436 coreconfigitem(
2434 b'web',
2437 b'web',
2435 b'encoding',
2438 b'encoding',
2436 default=lambda: encoding.encoding,
2439 default=lambda: encoding.encoding,
2437 )
2440 )
2438 coreconfigitem(
2441 coreconfigitem(
2439 b'web',
2442 b'web',
2440 b'errorlog',
2443 b'errorlog',
2441 default=b'-',
2444 default=b'-',
2442 )
2445 )
2443 coreconfigitem(
2446 coreconfigitem(
2444 b'web',
2447 b'web',
2445 b'ipv6',
2448 b'ipv6',
2446 default=False,
2449 default=False,
2447 )
2450 )
2448 coreconfigitem(
2451 coreconfigitem(
2449 b'web',
2452 b'web',
2450 b'maxchanges',
2453 b'maxchanges',
2451 default=10,
2454 default=10,
2452 )
2455 )
2453 coreconfigitem(
2456 coreconfigitem(
2454 b'web',
2457 b'web',
2455 b'maxfiles',
2458 b'maxfiles',
2456 default=10,
2459 default=10,
2457 )
2460 )
2458 coreconfigitem(
2461 coreconfigitem(
2459 b'web',
2462 b'web',
2460 b'maxshortchanges',
2463 b'maxshortchanges',
2461 default=60,
2464 default=60,
2462 )
2465 )
2463 coreconfigitem(
2466 coreconfigitem(
2464 b'web',
2467 b'web',
2465 b'motd',
2468 b'motd',
2466 default=b'',
2469 default=b'',
2467 )
2470 )
2468 coreconfigitem(
2471 coreconfigitem(
2469 b'web',
2472 b'web',
2470 b'name',
2473 b'name',
2471 default=dynamicdefault,
2474 default=dynamicdefault,
2472 )
2475 )
2473 coreconfigitem(
2476 coreconfigitem(
2474 b'web',
2477 b'web',
2475 b'port',
2478 b'port',
2476 default=8000,
2479 default=8000,
2477 )
2480 )
2478 coreconfigitem(
2481 coreconfigitem(
2479 b'web',
2482 b'web',
2480 b'prefix',
2483 b'prefix',
2481 default=b'',
2484 default=b'',
2482 )
2485 )
2483 coreconfigitem(
2486 coreconfigitem(
2484 b'web',
2487 b'web',
2485 b'push_ssl',
2488 b'push_ssl',
2486 default=True,
2489 default=True,
2487 )
2490 )
2488 coreconfigitem(
2491 coreconfigitem(
2489 b'web',
2492 b'web',
2490 b'refreshinterval',
2493 b'refreshinterval',
2491 default=20,
2494 default=20,
2492 )
2495 )
2493 coreconfigitem(
2496 coreconfigitem(
2494 b'web',
2497 b'web',
2495 b'server-header',
2498 b'server-header',
2496 default=None,
2499 default=None,
2497 )
2500 )
2498 coreconfigitem(
2501 coreconfigitem(
2499 b'web',
2502 b'web',
2500 b'static',
2503 b'static',
2501 default=None,
2504 default=None,
2502 )
2505 )
2503 coreconfigitem(
2506 coreconfigitem(
2504 b'web',
2507 b'web',
2505 b'staticurl',
2508 b'staticurl',
2506 default=None,
2509 default=None,
2507 )
2510 )
2508 coreconfigitem(
2511 coreconfigitem(
2509 b'web',
2512 b'web',
2510 b'stripes',
2513 b'stripes',
2511 default=1,
2514 default=1,
2512 )
2515 )
2513 coreconfigitem(
2516 coreconfigitem(
2514 b'web',
2517 b'web',
2515 b'style',
2518 b'style',
2516 default=b'paper',
2519 default=b'paper',
2517 )
2520 )
2518 coreconfigitem(
2521 coreconfigitem(
2519 b'web',
2522 b'web',
2520 b'templates',
2523 b'templates',
2521 default=None,
2524 default=None,
2522 )
2525 )
2523 coreconfigitem(
2526 coreconfigitem(
2524 b'web',
2527 b'web',
2525 b'view',
2528 b'view',
2526 default=b'served',
2529 default=b'served',
2527 experimental=True,
2530 experimental=True,
2528 )
2531 )
2529 coreconfigitem(
2532 coreconfigitem(
2530 b'worker',
2533 b'worker',
2531 b'backgroundclose',
2534 b'backgroundclose',
2532 default=dynamicdefault,
2535 default=dynamicdefault,
2533 )
2536 )
2534 # Windows defaults to a limit of 512 open files. A buffer of 128
2537 # Windows defaults to a limit of 512 open files. A buffer of 128
2535 # should give us enough headway.
2538 # should give us enough headway.
2536 coreconfigitem(
2539 coreconfigitem(
2537 b'worker',
2540 b'worker',
2538 b'backgroundclosemaxqueue',
2541 b'backgroundclosemaxqueue',
2539 default=384,
2542 default=384,
2540 )
2543 )
2541 coreconfigitem(
2544 coreconfigitem(
2542 b'worker',
2545 b'worker',
2543 b'backgroundcloseminfilecount',
2546 b'backgroundcloseminfilecount',
2544 default=2048,
2547 default=2048,
2545 )
2548 )
2546 coreconfigitem(
2549 coreconfigitem(
2547 b'worker',
2550 b'worker',
2548 b'backgroundclosethreadcount',
2551 b'backgroundclosethreadcount',
2549 default=4,
2552 default=4,
2550 )
2553 )
2551 coreconfigitem(
2554 coreconfigitem(
2552 b'worker',
2555 b'worker',
2553 b'enabled',
2556 b'enabled',
2554 default=True,
2557 default=True,
2555 )
2558 )
2556 coreconfigitem(
2559 coreconfigitem(
2557 b'worker',
2560 b'worker',
2558 b'numcpus',
2561 b'numcpus',
2559 default=None,
2562 default=None,
2560 )
2563 )
2561
2564
2562 # Rebase related configuration moved to core because other extension are doing
2565 # Rebase related configuration moved to core because other extension are doing
2563 # strange things. For example, shelve import the extensions to reuse some bit
2566 # strange things. For example, shelve import the extensions to reuse some bit
2564 # without formally loading it.
2567 # without formally loading it.
2565 coreconfigitem(
2568 coreconfigitem(
2566 b'commands',
2569 b'commands',
2567 b'rebase.requiredest',
2570 b'rebase.requiredest',
2568 default=False,
2571 default=False,
2569 )
2572 )
2570 coreconfigitem(
2573 coreconfigitem(
2571 b'experimental',
2574 b'experimental',
2572 b'rebaseskipobsolete',
2575 b'rebaseskipobsolete',
2573 default=True,
2576 default=True,
2574 )
2577 )
2575 coreconfigitem(
2578 coreconfigitem(
2576 b'rebase',
2579 b'rebase',
2577 b'singletransaction',
2580 b'singletransaction',
2578 default=False,
2581 default=False,
2579 )
2582 )
2580 coreconfigitem(
2583 coreconfigitem(
2581 b'rebase',
2584 b'rebase',
2582 b'experimental.inmemory',
2585 b'experimental.inmemory',
2583 default=False,
2586 default=False,
2584 )
2587 )
@@ -1,3619 +1,3619 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revset,
62 revset,
63 revsetlang,
63 revsetlang,
64 scmutil,
64 scmutil,
65 sparse,
65 sparse,
66 store as storemod,
66 store as storemod,
67 subrepoutil,
67 subrepoutil,
68 tags as tagsmod,
68 tags as tagsmod,
69 transaction,
69 transaction,
70 txnutil,
70 txnutil,
71 util,
71 util,
72 vfs as vfsmod,
72 vfs as vfsmod,
73 )
73 )
74
74
75 from .interfaces import (
75 from .interfaces import (
76 repository,
76 repository,
77 util as interfaceutil,
77 util as interfaceutil,
78 )
78 )
79
79
80 from .utils import (
80 from .utils import (
81 hashutil,
81 hashutil,
82 procutil,
82 procutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 from .revlogutils import constants as revlogconst
86 from .revlogutils import constants as revlogconst
87
87
88 release = lockmod.release
88 release = lockmod.release
89 urlerr = util.urlerr
89 urlerr = util.urlerr
90 urlreq = util.urlreq
90 urlreq = util.urlreq
91
91
92 # set of (path, vfs-location) tuples. vfs-location is:
92 # set of (path, vfs-location) tuples. vfs-location is:
93 # - 'plain for vfs relative paths
93 # - 'plain for vfs relative paths
94 # - '' for svfs relative paths
94 # - '' for svfs relative paths
95 _cachedfiles = set()
95 _cachedfiles = set()
96
96
97
97
98 class _basefilecache(scmutil.filecache):
98 class _basefilecache(scmutil.filecache):
99 """All filecache usage on repo are done for logic that should be unfiltered"""
99 """All filecache usage on repo are done for logic that should be unfiltered"""
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 """peer extension which implements legacy methods too; used for tests with
402 """peer extension which implements legacy methods too; used for tests with
403 restricted capabilities"""
403 restricted capabilities"""
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """returns the vfs object pointing to root of shared source
442 """returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """reads the require file present at root of this vfs
467 """reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548 shared = (
548 shared = (
549 requirementsmod.SHARED_REQUIREMENT in requirements
549 requirementsmod.SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 )
551 )
552 storevfs = None
552 storevfs = None
553 if shared:
553 if shared:
554 # This is a shared repo
554 # This is a shared repo
555 sharedvfs = _getsharedvfs(hgvfs, requirements)
555 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
556 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 else:
557 else:
558 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
558 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559
559
560 # if .hg/requires contains the sharesafe requirement, it means
560 # if .hg/requires contains the sharesafe requirement, it means
561 # there exists a `.hg/store/requires` too and we should read it
561 # there exists a `.hg/store/requires` too and we should read it
562 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
562 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
563 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 # is not present, refer checkrequirementscompat() for that
564 # is not present, refer checkrequirementscompat() for that
565 #
565 #
566 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
566 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 # repository was shared the old way. We check the share source .hg/requires
567 # repository was shared the old way. We check the share source .hg/requires
568 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
568 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 # to be reshared
569 # to be reshared
570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571
571
572 if (
572 if (
573 shared
573 shared
574 and requirementsmod.SHARESAFE_REQUIREMENT
574 and requirementsmod.SHARESAFE_REQUIREMENT
575 not in _readrequires(sharedvfs, True)
575 not in _readrequires(sharedvfs, True)
576 ):
576 ):
577 if ui.configbool(
577 if ui.configbool(
578 b'experimental', b'sharesafe-auto-downgrade-shares'
578 b'experimental', b'sharesafe-auto-downgrade-shares'
579 ):
579 ):
580 # prevent cyclic import localrepo -> upgrade -> localrepo
580 # prevent cyclic import localrepo -> upgrade -> localrepo
581 from . import upgrade
581 from . import upgrade
582
582
583 upgrade.downgrade_share_to_non_safe(
583 upgrade.downgrade_share_to_non_safe(
584 ui,
584 ui,
585 hgvfs,
585 hgvfs,
586 sharedvfs,
586 sharedvfs,
587 requirements,
587 requirements,
588 )
588 )
589 else:
589 else:
590 raise error.Abort(
590 raise error.Abort(
591 _(
591 _(
592 b"share source does not support exp-sharesafe requirement"
592 b"share source does not support exp-sharesafe requirement"
593 )
593 )
594 )
594 )
595 else:
595 else:
596 requirements |= _readrequires(storevfs, False)
596 requirements |= _readrequires(storevfs, False)
597 elif shared:
597 elif shared:
598 sourcerequires = _readrequires(sharedvfs, False)
598 sourcerequires = _readrequires(sharedvfs, False)
599 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
599 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
600 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
600 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
601 # prevent cyclic import localrepo -> upgrade -> localrepo
601 # prevent cyclic import localrepo -> upgrade -> localrepo
602 from . import upgrade
602 from . import upgrade
603
603
604 upgrade.upgrade_share_to_safe(
604 upgrade.upgrade_share_to_safe(
605 ui,
605 ui,
606 hgvfs,
606 hgvfs,
607 storevfs,
607 storevfs,
608 requirements,
608 requirements,
609 )
609 )
610 elif ui.configbool(
610 elif ui.configbool(
611 b'experimental', b'sharesafe-warn-outdated-shares'
611 b'experimental', b'sharesafe-warn-outdated-shares'
612 ):
612 ):
613 ui.warn(
613 ui.warn(
614 _(
614 _(
615 b'warning: source repository supports share-safe functionality.'
615 b'warning: source repository supports share-safe functionality.'
616 b' Reshare to upgrade.\n'
616 b' Reshare to upgrade.\n'
617 )
617 )
618 )
618 )
619
619
620 # The .hg/hgrc file may load extensions or contain config options
620 # The .hg/hgrc file may load extensions or contain config options
621 # that influence repository construction. Attempt to load it and
621 # that influence repository construction. Attempt to load it and
622 # process any new extensions that it may have pulled in.
622 # process any new extensions that it may have pulled in.
623 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
623 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
624 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
624 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
625 extensions.loadall(ui)
625 extensions.loadall(ui)
626 extensions.populateui(ui)
626 extensions.populateui(ui)
627
627
628 # Set of module names of extensions loaded for this repository.
628 # Set of module names of extensions loaded for this repository.
629 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
629 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
630
630
631 supportedrequirements = gathersupportedrequirements(ui)
631 supportedrequirements = gathersupportedrequirements(ui)
632
632
633 # We first validate the requirements are known.
633 # We first validate the requirements are known.
634 ensurerequirementsrecognized(requirements, supportedrequirements)
634 ensurerequirementsrecognized(requirements, supportedrequirements)
635
635
636 # Then we validate that the known set is reasonable to use together.
636 # Then we validate that the known set is reasonable to use together.
637 ensurerequirementscompatible(ui, requirements)
637 ensurerequirementscompatible(ui, requirements)
638
638
639 # TODO there are unhandled edge cases related to opening repositories with
639 # TODO there are unhandled edge cases related to opening repositories with
640 # shared storage. If storage is shared, we should also test for requirements
640 # shared storage. If storage is shared, we should also test for requirements
641 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
641 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
642 # that repo, as that repo may load extensions needed to open it. This is a
642 # that repo, as that repo may load extensions needed to open it. This is a
643 # bit complicated because we don't want the other hgrc to overwrite settings
643 # bit complicated because we don't want the other hgrc to overwrite settings
644 # in this hgrc.
644 # in this hgrc.
645 #
645 #
646 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
646 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
647 # file when sharing repos. But if a requirement is added after the share is
647 # file when sharing repos. But if a requirement is added after the share is
648 # performed, thereby introducing a new requirement for the opener, we may
648 # performed, thereby introducing a new requirement for the opener, we may
649 # will not see that and could encounter a run-time error interacting with
649 # will not see that and could encounter a run-time error interacting with
650 # that shared store since it has an unknown-to-us requirement.
650 # that shared store since it has an unknown-to-us requirement.
651
651
652 # At this point, we know we should be capable of opening the repository.
652 # At this point, we know we should be capable of opening the repository.
653 # Now get on with doing that.
653 # Now get on with doing that.
654
654
655 features = set()
655 features = set()
656
656
657 # The "store" part of the repository holds versioned data. How it is
657 # The "store" part of the repository holds versioned data. How it is
658 # accessed is determined by various requirements. If `shared` or
658 # accessed is determined by various requirements. If `shared` or
659 # `relshared` requirements are present, this indicates current repository
659 # `relshared` requirements are present, this indicates current repository
660 # is a share and store exists in path mentioned in `.hg/sharedpath`
660 # is a share and store exists in path mentioned in `.hg/sharedpath`
661 if shared:
661 if shared:
662 storebasepath = sharedvfs.base
662 storebasepath = sharedvfs.base
663 cachepath = sharedvfs.join(b'cache')
663 cachepath = sharedvfs.join(b'cache')
664 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
664 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
665 else:
665 else:
666 storebasepath = hgvfs.base
666 storebasepath = hgvfs.base
667 cachepath = hgvfs.join(b'cache')
667 cachepath = hgvfs.join(b'cache')
668 wcachepath = hgvfs.join(b'wcache')
668 wcachepath = hgvfs.join(b'wcache')
669
669
670 # The store has changed over time and the exact layout is dictated by
670 # The store has changed over time and the exact layout is dictated by
671 # requirements. The store interface abstracts differences across all
671 # requirements. The store interface abstracts differences across all
672 # of them.
672 # of them.
673 store = makestore(
673 store = makestore(
674 requirements,
674 requirements,
675 storebasepath,
675 storebasepath,
676 lambda base: vfsmod.vfs(base, cacheaudited=True),
676 lambda base: vfsmod.vfs(base, cacheaudited=True),
677 )
677 )
678 hgvfs.createmode = store.createmode
678 hgvfs.createmode = store.createmode
679
679
680 storevfs = store.vfs
680 storevfs = store.vfs
681 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
681 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
682
682
683 # The cache vfs is used to manage cache files.
683 # The cache vfs is used to manage cache files.
684 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
684 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
685 cachevfs.createmode = store.createmode
685 cachevfs.createmode = store.createmode
686 # The cache vfs is used to manage cache files related to the working copy
686 # The cache vfs is used to manage cache files related to the working copy
687 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
687 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
688 wcachevfs.createmode = store.createmode
688 wcachevfs.createmode = store.createmode
689
689
690 # Now resolve the type for the repository object. We do this by repeatedly
690 # Now resolve the type for the repository object. We do this by repeatedly
691 # calling a factory function to produces types for specific aspects of the
691 # calling a factory function to produces types for specific aspects of the
692 # repo's operation. The aggregate returned types are used as base classes
692 # repo's operation. The aggregate returned types are used as base classes
693 # for a dynamically-derived type, which will represent our new repository.
693 # for a dynamically-derived type, which will represent our new repository.
694
694
695 bases = []
695 bases = []
696 extrastate = {}
696 extrastate = {}
697
697
698 for iface, fn in REPO_INTERFACES:
698 for iface, fn in REPO_INTERFACES:
699 # We pass all potentially useful state to give extensions tons of
699 # We pass all potentially useful state to give extensions tons of
700 # flexibility.
700 # flexibility.
701 typ = fn()(
701 typ = fn()(
702 ui=ui,
702 ui=ui,
703 intents=intents,
703 intents=intents,
704 requirements=requirements,
704 requirements=requirements,
705 features=features,
705 features=features,
706 wdirvfs=wdirvfs,
706 wdirvfs=wdirvfs,
707 hgvfs=hgvfs,
707 hgvfs=hgvfs,
708 store=store,
708 store=store,
709 storevfs=storevfs,
709 storevfs=storevfs,
710 storeoptions=storevfs.options,
710 storeoptions=storevfs.options,
711 cachevfs=cachevfs,
711 cachevfs=cachevfs,
712 wcachevfs=wcachevfs,
712 wcachevfs=wcachevfs,
713 extensionmodulenames=extensionmodulenames,
713 extensionmodulenames=extensionmodulenames,
714 extrastate=extrastate,
714 extrastate=extrastate,
715 baseclasses=bases,
715 baseclasses=bases,
716 )
716 )
717
717
718 if not isinstance(typ, type):
718 if not isinstance(typ, type):
719 raise error.ProgrammingError(
719 raise error.ProgrammingError(
720 b'unable to construct type for %s' % iface
720 b'unable to construct type for %s' % iface
721 )
721 )
722
722
723 bases.append(typ)
723 bases.append(typ)
724
724
725 # type() allows you to use characters in type names that wouldn't be
725 # type() allows you to use characters in type names that wouldn't be
726 # recognized as Python symbols in source code. We abuse that to add
726 # recognized as Python symbols in source code. We abuse that to add
727 # rich information about our constructed repo.
727 # rich information about our constructed repo.
728 name = pycompat.sysstr(
728 name = pycompat.sysstr(
729 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
729 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
730 )
730 )
731
731
732 cls = type(name, tuple(bases), {})
732 cls = type(name, tuple(bases), {})
733
733
734 return cls(
734 return cls(
735 baseui=baseui,
735 baseui=baseui,
736 ui=ui,
736 ui=ui,
737 origroot=path,
737 origroot=path,
738 wdirvfs=wdirvfs,
738 wdirvfs=wdirvfs,
739 hgvfs=hgvfs,
739 hgvfs=hgvfs,
740 requirements=requirements,
740 requirements=requirements,
741 supportedrequirements=supportedrequirements,
741 supportedrequirements=supportedrequirements,
742 sharedpath=storebasepath,
742 sharedpath=storebasepath,
743 store=store,
743 store=store,
744 cachevfs=cachevfs,
744 cachevfs=cachevfs,
745 wcachevfs=wcachevfs,
745 wcachevfs=wcachevfs,
746 features=features,
746 features=features,
747 intents=intents,
747 intents=intents,
748 )
748 )
749
749
750
750
751 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
751 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
752 """Load hgrc files/content into a ui instance.
752 """Load hgrc files/content into a ui instance.
753
753
754 This is called during repository opening to load any additional
754 This is called during repository opening to load any additional
755 config files or settings relevant to the current repository.
755 config files or settings relevant to the current repository.
756
756
757 Returns a bool indicating whether any additional configs were loaded.
757 Returns a bool indicating whether any additional configs were loaded.
758
758
759 Extensions should monkeypatch this function to modify how per-repo
759 Extensions should monkeypatch this function to modify how per-repo
760 configs are loaded. For example, an extension may wish to pull in
760 configs are loaded. For example, an extension may wish to pull in
761 configs from alternate files or sources.
761 configs from alternate files or sources.
762
762
763 sharedvfs is vfs object pointing to source repo if the current one is a
763 sharedvfs is vfs object pointing to source repo if the current one is a
764 shared one
764 shared one
765 """
765 """
766 if not rcutil.use_repo_hgrc():
766 if not rcutil.use_repo_hgrc():
767 return False
767 return False
768
768
769 ret = False
769 ret = False
770 # first load config from shared source if we has to
770 # first load config from shared source if we has to
771 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
771 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
772 try:
772 try:
773 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
773 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
774 ret = True
774 ret = True
775 except IOError:
775 except IOError:
776 pass
776 pass
777
777
778 try:
778 try:
779 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
779 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
780 ret = True
780 ret = True
781 except IOError:
781 except IOError:
782 pass
782 pass
783
783
784 try:
784 try:
785 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
785 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
786 ret = True
786 ret = True
787 except IOError:
787 except IOError:
788 pass
788 pass
789
789
790 return ret
790 return ret
791
791
792
792
793 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
793 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
794 """Perform additional actions after .hg/hgrc is loaded.
794 """Perform additional actions after .hg/hgrc is loaded.
795
795
796 This function is called during repository loading immediately after
796 This function is called during repository loading immediately after
797 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
797 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
798
798
799 The function can be used to validate configs, automatically add
799 The function can be used to validate configs, automatically add
800 options (including extensions) based on requirements, etc.
800 options (including extensions) based on requirements, etc.
801 """
801 """
802
802
803 # Map of requirements to list of extensions to load automatically when
803 # Map of requirements to list of extensions to load automatically when
804 # requirement is present.
804 # requirement is present.
805 autoextensions = {
805 autoextensions = {
806 b'git': [b'git'],
806 b'git': [b'git'],
807 b'largefiles': [b'largefiles'],
807 b'largefiles': [b'largefiles'],
808 b'lfs': [b'lfs'],
808 b'lfs': [b'lfs'],
809 }
809 }
810
810
811 for requirement, names in sorted(autoextensions.items()):
811 for requirement, names in sorted(autoextensions.items()):
812 if requirement not in requirements:
812 if requirement not in requirements:
813 continue
813 continue
814
814
815 for name in names:
815 for name in names:
816 if not ui.hasconfig(b'extensions', name):
816 if not ui.hasconfig(b'extensions', name):
817 ui.setconfig(b'extensions', name, b'', source=b'autoload')
817 ui.setconfig(b'extensions', name, b'', source=b'autoload')
818
818
819
819
820 def gathersupportedrequirements(ui):
820 def gathersupportedrequirements(ui):
821 """Determine the complete set of recognized requirements."""
821 """Determine the complete set of recognized requirements."""
822 # Start with all requirements supported by this file.
822 # Start with all requirements supported by this file.
823 supported = set(localrepository._basesupported)
823 supported = set(localrepository._basesupported)
824
824
825 # Execute ``featuresetupfuncs`` entries if they belong to an extension
825 # Execute ``featuresetupfuncs`` entries if they belong to an extension
826 # relevant to this ui instance.
826 # relevant to this ui instance.
827 modules = {m.__name__ for n, m in extensions.extensions(ui)}
827 modules = {m.__name__ for n, m in extensions.extensions(ui)}
828
828
829 for fn in featuresetupfuncs:
829 for fn in featuresetupfuncs:
830 if fn.__module__ in modules:
830 if fn.__module__ in modules:
831 fn(ui, supported)
831 fn(ui, supported)
832
832
833 # Add derived requirements from registered compression engines.
833 # Add derived requirements from registered compression engines.
834 for name in util.compengines:
834 for name in util.compengines:
835 engine = util.compengines[name]
835 engine = util.compengines[name]
836 if engine.available() and engine.revlogheader():
836 if engine.available() and engine.revlogheader():
837 supported.add(b'exp-compression-%s' % name)
837 supported.add(b'exp-compression-%s' % name)
838 if engine.name() == b'zstd':
838 if engine.name() == b'zstd':
839 supported.add(b'revlog-compression-zstd')
839 supported.add(b'revlog-compression-zstd')
840
840
841 return supported
841 return supported
842
842
843
843
844 def ensurerequirementsrecognized(requirements, supported):
844 def ensurerequirementsrecognized(requirements, supported):
845 """Validate that a set of local requirements is recognized.
845 """Validate that a set of local requirements is recognized.
846
846
847 Receives a set of requirements. Raises an ``error.RepoError`` if there
847 Receives a set of requirements. Raises an ``error.RepoError`` if there
848 exists any requirement in that set that currently loaded code doesn't
848 exists any requirement in that set that currently loaded code doesn't
849 recognize.
849 recognize.
850
850
851 Returns a set of supported requirements.
851 Returns a set of supported requirements.
852 """
852 """
853 missing = set()
853 missing = set()
854
854
855 for requirement in requirements:
855 for requirement in requirements:
856 if requirement in supported:
856 if requirement in supported:
857 continue
857 continue
858
858
859 if not requirement or not requirement[0:1].isalnum():
859 if not requirement or not requirement[0:1].isalnum():
860 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
860 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
861
861
862 missing.add(requirement)
862 missing.add(requirement)
863
863
864 if missing:
864 if missing:
865 raise error.RequirementError(
865 raise error.RequirementError(
866 _(b'repository requires features unknown to this Mercurial: %s')
866 _(b'repository requires features unknown to this Mercurial: %s')
867 % b' '.join(sorted(missing)),
867 % b' '.join(sorted(missing)),
868 hint=_(
868 hint=_(
869 b'see https://mercurial-scm.org/wiki/MissingRequirement '
869 b'see https://mercurial-scm.org/wiki/MissingRequirement '
870 b'for more information'
870 b'for more information'
871 ),
871 ),
872 )
872 )
873
873
874
874
875 def ensurerequirementscompatible(ui, requirements):
875 def ensurerequirementscompatible(ui, requirements):
876 """Validates that a set of recognized requirements is mutually compatible.
876 """Validates that a set of recognized requirements is mutually compatible.
877
877
878 Some requirements may not be compatible with others or require
878 Some requirements may not be compatible with others or require
879 config options that aren't enabled. This function is called during
879 config options that aren't enabled. This function is called during
880 repository opening to ensure that the set of requirements needed
880 repository opening to ensure that the set of requirements needed
881 to open a repository is sane and compatible with config options.
881 to open a repository is sane and compatible with config options.
882
882
883 Extensions can monkeypatch this function to perform additional
883 Extensions can monkeypatch this function to perform additional
884 checking.
884 checking.
885
885
886 ``error.RepoError`` should be raised on failure.
886 ``error.RepoError`` should be raised on failure.
887 """
887 """
888 if (
888 if (
889 requirementsmod.SPARSE_REQUIREMENT in requirements
889 requirementsmod.SPARSE_REQUIREMENT in requirements
890 and not sparse.enabled
890 and not sparse.enabled
891 ):
891 ):
892 raise error.RepoError(
892 raise error.RepoError(
893 _(
893 _(
894 b'repository is using sparse feature but '
894 b'repository is using sparse feature but '
895 b'sparse is not enabled; enable the '
895 b'sparse is not enabled; enable the '
896 b'"sparse" extensions to access'
896 b'"sparse" extensions to access'
897 )
897 )
898 )
898 )
899
899
900
900
901 def makestore(requirements, path, vfstype):
901 def makestore(requirements, path, vfstype):
902 """Construct a storage object for a repository."""
902 """Construct a storage object for a repository."""
903 if b'store' in requirements:
903 if b'store' in requirements:
904 if b'fncache' in requirements:
904 if b'fncache' in requirements:
905 return storemod.fncachestore(
905 return storemod.fncachestore(
906 path, vfstype, b'dotencode' in requirements
906 path, vfstype, b'dotencode' in requirements
907 )
907 )
908
908
909 return storemod.encodedstore(path, vfstype)
909 return storemod.encodedstore(path, vfstype)
910
910
911 return storemod.basicstore(path, vfstype)
911 return storemod.basicstore(path, vfstype)
912
912
913
913
914 def resolvestorevfsoptions(ui, requirements, features):
914 def resolvestorevfsoptions(ui, requirements, features):
915 """Resolve the options to pass to the store vfs opener.
915 """Resolve the options to pass to the store vfs opener.
916
916
917 The returned dict is used to influence behavior of the storage layer.
917 The returned dict is used to influence behavior of the storage layer.
918 """
918 """
919 options = {}
919 options = {}
920
920
921 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
921 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
922 options[b'treemanifest'] = True
922 options[b'treemanifest'] = True
923
923
924 # experimental config: format.manifestcachesize
924 # experimental config: format.manifestcachesize
925 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
925 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
926 if manifestcachesize is not None:
926 if manifestcachesize is not None:
927 options[b'manifestcachesize'] = manifestcachesize
927 options[b'manifestcachesize'] = manifestcachesize
928
928
929 # In the absence of another requirement superseding a revlog-related
929 # In the absence of another requirement superseding a revlog-related
930 # requirement, we have to assume the repo is using revlog version 0.
930 # requirement, we have to assume the repo is using revlog version 0.
931 # This revlog format is super old and we don't bother trying to parse
931 # This revlog format is super old and we don't bother trying to parse
932 # opener options for it because those options wouldn't do anything
932 # opener options for it because those options wouldn't do anything
933 # meaningful on such old repos.
933 # meaningful on such old repos.
934 if (
934 if (
935 b'revlogv1' in requirements
935 b'revlogv1' in requirements
936 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
936 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
937 ):
937 ):
938 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
938 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
939 else: # explicitly mark repo as using revlogv0
939 else: # explicitly mark repo as using revlogv0
940 options[b'revlogv0'] = True
940 options[b'revlogv0'] = True
941
941
942 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
942 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
943 options[b'copies-storage'] = b'changeset-sidedata'
943 options[b'copies-storage'] = b'changeset-sidedata'
944 else:
944 else:
945 writecopiesto = ui.config(b'experimental', b'copies.write-to')
945 writecopiesto = ui.config(b'experimental', b'copies.write-to')
946 copiesextramode = (b'changeset-only', b'compatibility')
946 copiesextramode = (b'changeset-only', b'compatibility')
947 if writecopiesto in copiesextramode:
947 if writecopiesto in copiesextramode:
948 options[b'copies-storage'] = b'extra'
948 options[b'copies-storage'] = b'extra'
949
949
950 return options
950 return options
951
951
952
952
953 def resolverevlogstorevfsoptions(ui, requirements, features):
953 def resolverevlogstorevfsoptions(ui, requirements, features):
954 """Resolve opener options specific to revlogs."""
954 """Resolve opener options specific to revlogs."""
955
955
956 options = {}
956 options = {}
957 options[b'flagprocessors'] = {}
957 options[b'flagprocessors'] = {}
958
958
959 if b'revlogv1' in requirements:
959 if b'revlogv1' in requirements:
960 options[b'revlogv1'] = True
960 options[b'revlogv1'] = True
961 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
961 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
962 options[b'revlogv2'] = True
962 options[b'revlogv2'] = True
963
963
964 if b'generaldelta' in requirements:
964 if b'generaldelta' in requirements:
965 options[b'generaldelta'] = True
965 options[b'generaldelta'] = True
966
966
967 # experimental config: format.chunkcachesize
967 # experimental config: format.chunkcachesize
968 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
968 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
969 if chunkcachesize is not None:
969 if chunkcachesize is not None:
970 options[b'chunkcachesize'] = chunkcachesize
970 options[b'chunkcachesize'] = chunkcachesize
971
971
972 deltabothparents = ui.configbool(
972 deltabothparents = ui.configbool(
973 b'storage', b'revlog.optimize-delta-parent-choice'
973 b'storage', b'revlog.optimize-delta-parent-choice'
974 )
974 )
975 options[b'deltabothparents'] = deltabothparents
975 options[b'deltabothparents'] = deltabothparents
976
976
977 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
977 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
978 lazydeltabase = False
978 lazydeltabase = False
979 if lazydelta:
979 if lazydelta:
980 lazydeltabase = ui.configbool(
980 lazydeltabase = ui.configbool(
981 b'storage', b'revlog.reuse-external-delta-parent'
981 b'storage', b'revlog.reuse-external-delta-parent'
982 )
982 )
983 if lazydeltabase is None:
983 if lazydeltabase is None:
984 lazydeltabase = not scmutil.gddeltaconfig(ui)
984 lazydeltabase = not scmutil.gddeltaconfig(ui)
985 options[b'lazydelta'] = lazydelta
985 options[b'lazydelta'] = lazydelta
986 options[b'lazydeltabase'] = lazydeltabase
986 options[b'lazydeltabase'] = lazydeltabase
987
987
988 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
988 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
989 if 0 <= chainspan:
989 if 0 <= chainspan:
990 options[b'maxdeltachainspan'] = chainspan
990 options[b'maxdeltachainspan'] = chainspan
991
991
992 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
992 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
993 if mmapindexthreshold is not None:
993 if mmapindexthreshold is not None:
994 options[b'mmapindexthreshold'] = mmapindexthreshold
994 options[b'mmapindexthreshold'] = mmapindexthreshold
995
995
996 withsparseread = ui.configbool(b'experimental', b'sparse-read')
996 withsparseread = ui.configbool(b'experimental', b'sparse-read')
997 srdensitythres = float(
997 srdensitythres = float(
998 ui.config(b'experimental', b'sparse-read.density-threshold')
998 ui.config(b'experimental', b'sparse-read.density-threshold')
999 )
999 )
1000 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1000 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1001 options[b'with-sparse-read'] = withsparseread
1001 options[b'with-sparse-read'] = withsparseread
1002 options[b'sparse-read-density-threshold'] = srdensitythres
1002 options[b'sparse-read-density-threshold'] = srdensitythres
1003 options[b'sparse-read-min-gap-size'] = srmingapsize
1003 options[b'sparse-read-min-gap-size'] = srmingapsize
1004
1004
1005 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1005 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1006 options[b'sparse-revlog'] = sparserevlog
1006 options[b'sparse-revlog'] = sparserevlog
1007 if sparserevlog:
1007 if sparserevlog:
1008 options[b'generaldelta'] = True
1008 options[b'generaldelta'] = True
1009
1009
1010 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1010 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1011 options[b'side-data'] = sidedata
1011 options[b'side-data'] = sidedata
1012
1012
1013 maxchainlen = None
1013 maxchainlen = None
1014 if sparserevlog:
1014 if sparserevlog:
1015 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1015 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1016 # experimental config: format.maxchainlen
1016 # experimental config: format.maxchainlen
1017 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1017 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1018 if maxchainlen is not None:
1018 if maxchainlen is not None:
1019 options[b'maxchainlen'] = maxchainlen
1019 options[b'maxchainlen'] = maxchainlen
1020
1020
1021 for r in requirements:
1021 for r in requirements:
1022 # we allow multiple compression engine requirement to co-exist because
1022 # we allow multiple compression engine requirement to co-exist because
1023 # strickly speaking, revlog seems to support mixed compression style.
1023 # strickly speaking, revlog seems to support mixed compression style.
1024 #
1024 #
1025 # The compression used for new entries will be "the last one"
1025 # The compression used for new entries will be "the last one"
1026 prefix = r.startswith
1026 prefix = r.startswith
1027 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1027 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1028 options[b'compengine'] = r.split(b'-', 2)[2]
1028 options[b'compengine'] = r.split(b'-', 2)[2]
1029
1029
1030 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1030 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1031 if options[b'zlib.level'] is not None:
1031 if options[b'zlib.level'] is not None:
1032 if not (0 <= options[b'zlib.level'] <= 9):
1032 if not (0 <= options[b'zlib.level'] <= 9):
1033 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1033 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1034 raise error.Abort(msg % options[b'zlib.level'])
1034 raise error.Abort(msg % options[b'zlib.level'])
1035 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1035 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1036 if options[b'zstd.level'] is not None:
1036 if options[b'zstd.level'] is not None:
1037 if not (0 <= options[b'zstd.level'] <= 22):
1037 if not (0 <= options[b'zstd.level'] <= 22):
1038 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1038 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1039 raise error.Abort(msg % options[b'zstd.level'])
1039 raise error.Abort(msg % options[b'zstd.level'])
1040
1040
1041 if requirementsmod.NARROW_REQUIREMENT in requirements:
1041 if requirementsmod.NARROW_REQUIREMENT in requirements:
1042 options[b'enableellipsis'] = True
1042 options[b'enableellipsis'] = True
1043
1043
1044 if ui.configbool(b'experimental', b'rust.index'):
1044 if ui.configbool(b'experimental', b'rust.index'):
1045 options[b'rust.index'] = True
1045 options[b'rust.index'] = True
1046 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1046 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1047 options[b'persistent-nodemap'] = True
1047 options[b'persistent-nodemap'] = True
1048 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1048 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1049 options[b'persistent-nodemap.mmap'] = True
1049 options[b'persistent-nodemap.mmap'] = True
1050 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1050 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1051 options[b'persistent-nodemap.mode'] = epnm
1051 options[b'persistent-nodemap.mode'] = epnm
1052 if ui.configbool(b'devel', b'persistent-nodemap'):
1052 if ui.configbool(b'devel', b'persistent-nodemap'):
1053 options[b'devel-force-nodemap'] = True
1053 options[b'devel-force-nodemap'] = True
1054
1054
1055 return options
1055 return options
1056
1056
1057
1057
1058 def makemain(**kwargs):
1058 def makemain(**kwargs):
1059 """Produce a type conforming to ``ilocalrepositorymain``."""
1059 """Produce a type conforming to ``ilocalrepositorymain``."""
1060 return localrepository
1060 return localrepository
1061
1061
1062
1062
1063 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1063 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1064 class revlogfilestorage(object):
1064 class revlogfilestorage(object):
1065 """File storage when using revlogs."""
1065 """File storage when using revlogs."""
1066
1066
1067 def file(self, path):
1067 def file(self, path):
1068 if path[0] == b'/':
1068 if path[0] == b'/':
1069 path = path[1:]
1069 path = path[1:]
1070
1070
1071 return filelog.filelog(self.svfs, path)
1071 return filelog.filelog(self.svfs, path)
1072
1072
1073
1073
1074 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1074 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1075 class revlognarrowfilestorage(object):
1075 class revlognarrowfilestorage(object):
1076 """File storage when using revlogs and narrow files."""
1076 """File storage when using revlogs and narrow files."""
1077
1077
1078 def file(self, path):
1078 def file(self, path):
1079 if path[0] == b'/':
1079 if path[0] == b'/':
1080 path = path[1:]
1080 path = path[1:]
1081
1081
1082 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1082 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1083
1083
1084
1084
1085 def makefilestorage(requirements, features, **kwargs):
1085 def makefilestorage(requirements, features, **kwargs):
1086 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1086 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1087 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1087 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1088 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1088 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1089
1089
1090 if requirementsmod.NARROW_REQUIREMENT in requirements:
1090 if requirementsmod.NARROW_REQUIREMENT in requirements:
1091 return revlognarrowfilestorage
1091 return revlognarrowfilestorage
1092 else:
1092 else:
1093 return revlogfilestorage
1093 return revlogfilestorage
1094
1094
1095
1095
1096 # List of repository interfaces and factory functions for them. Each
1096 # List of repository interfaces and factory functions for them. Each
1097 # will be called in order during ``makelocalrepository()`` to iteratively
1097 # will be called in order during ``makelocalrepository()`` to iteratively
1098 # derive the final type for a local repository instance. We capture the
1098 # derive the final type for a local repository instance. We capture the
1099 # function as a lambda so we don't hold a reference and the module-level
1099 # function as a lambda so we don't hold a reference and the module-level
1100 # functions can be wrapped.
1100 # functions can be wrapped.
1101 REPO_INTERFACES = [
1101 REPO_INTERFACES = [
1102 (repository.ilocalrepositorymain, lambda: makemain),
1102 (repository.ilocalrepositorymain, lambda: makemain),
1103 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1103 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1104 ]
1104 ]
1105
1105
1106
1106
1107 @interfaceutil.implementer(repository.ilocalrepositorymain)
1107 @interfaceutil.implementer(repository.ilocalrepositorymain)
1108 class localrepository(object):
1108 class localrepository(object):
1109 """Main class for representing local repositories.
1109 """Main class for representing local repositories.
1110
1110
1111 All local repositories are instances of this class.
1111 All local repositories are instances of this class.
1112
1112
1113 Constructed on its own, instances of this class are not usable as
1113 Constructed on its own, instances of this class are not usable as
1114 repository objects. To obtain a usable repository object, call
1114 repository objects. To obtain a usable repository object, call
1115 ``hg.repository()``, ``localrepo.instance()``, or
1115 ``hg.repository()``, ``localrepo.instance()``, or
1116 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1116 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1117 ``instance()`` adds support for creating new repositories.
1117 ``instance()`` adds support for creating new repositories.
1118 ``hg.repository()`` adds more extension integration, including calling
1118 ``hg.repository()`` adds more extension integration, including calling
1119 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1119 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1120 used.
1120 used.
1121 """
1121 """
1122
1122
1123 # obsolete experimental requirements:
1123 # obsolete experimental requirements:
1124 # - manifestv2: An experimental new manifest format that allowed
1124 # - manifestv2: An experimental new manifest format that allowed
1125 # for stem compression of long paths. Experiment ended up not
1125 # for stem compression of long paths. Experiment ended up not
1126 # being successful (repository sizes went up due to worse delta
1126 # being successful (repository sizes went up due to worse delta
1127 # chains), and the code was deleted in 4.6.
1127 # chains), and the code was deleted in 4.6.
1128 supportedformats = {
1128 supportedformats = {
1129 b'revlogv1',
1129 b'revlogv1',
1130 b'generaldelta',
1130 b'generaldelta',
1131 requirementsmod.TREEMANIFEST_REQUIREMENT,
1131 requirementsmod.TREEMANIFEST_REQUIREMENT,
1132 requirementsmod.COPIESSDC_REQUIREMENT,
1132 requirementsmod.COPIESSDC_REQUIREMENT,
1133 requirementsmod.REVLOGV2_REQUIREMENT,
1133 requirementsmod.REVLOGV2_REQUIREMENT,
1134 requirementsmod.SIDEDATA_REQUIREMENT,
1134 requirementsmod.SIDEDATA_REQUIREMENT,
1135 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1135 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1136 requirementsmod.NODEMAP_REQUIREMENT,
1136 requirementsmod.NODEMAP_REQUIREMENT,
1137 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1137 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1138 requirementsmod.SHARESAFE_REQUIREMENT,
1138 requirementsmod.SHARESAFE_REQUIREMENT,
1139 }
1139 }
1140 _basesupported = supportedformats | {
1140 _basesupported = supportedformats | {
1141 b'store',
1141 b'store',
1142 b'fncache',
1142 b'fncache',
1143 requirementsmod.SHARED_REQUIREMENT,
1143 requirementsmod.SHARED_REQUIREMENT,
1144 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1144 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1145 b'dotencode',
1145 b'dotencode',
1146 requirementsmod.SPARSE_REQUIREMENT,
1146 requirementsmod.SPARSE_REQUIREMENT,
1147 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1147 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1148 }
1148 }
1149
1149
1150 # list of prefix for file which can be written without 'wlock'
1150 # list of prefix for file which can be written without 'wlock'
1151 # Extensions should extend this list when needed
1151 # Extensions should extend this list when needed
1152 _wlockfreeprefix = {
1152 _wlockfreeprefix = {
1153 # We migh consider requiring 'wlock' for the next
1153 # We migh consider requiring 'wlock' for the next
1154 # two, but pretty much all the existing code assume
1154 # two, but pretty much all the existing code assume
1155 # wlock is not needed so we keep them excluded for
1155 # wlock is not needed so we keep them excluded for
1156 # now.
1156 # now.
1157 b'hgrc',
1157 b'hgrc',
1158 b'requires',
1158 b'requires',
1159 # XXX cache is a complicatged business someone
1159 # XXX cache is a complicatged business someone
1160 # should investigate this in depth at some point
1160 # should investigate this in depth at some point
1161 b'cache/',
1161 b'cache/',
1162 # XXX shouldn't be dirstate covered by the wlock?
1162 # XXX shouldn't be dirstate covered by the wlock?
1163 b'dirstate',
1163 b'dirstate',
1164 # XXX bisect was still a bit too messy at the time
1164 # XXX bisect was still a bit too messy at the time
1165 # this changeset was introduced. Someone should fix
1165 # this changeset was introduced. Someone should fix
1166 # the remainig bit and drop this line
1166 # the remainig bit and drop this line
1167 b'bisect.state',
1167 b'bisect.state',
1168 }
1168 }
1169
1169
1170 def __init__(
1170 def __init__(
1171 self,
1171 self,
1172 baseui,
1172 baseui,
1173 ui,
1173 ui,
1174 origroot,
1174 origroot,
1175 wdirvfs,
1175 wdirvfs,
1176 hgvfs,
1176 hgvfs,
1177 requirements,
1177 requirements,
1178 supportedrequirements,
1178 supportedrequirements,
1179 sharedpath,
1179 sharedpath,
1180 store,
1180 store,
1181 cachevfs,
1181 cachevfs,
1182 wcachevfs,
1182 wcachevfs,
1183 features,
1183 features,
1184 intents=None,
1184 intents=None,
1185 ):
1185 ):
1186 """Create a new local repository instance.
1186 """Create a new local repository instance.
1187
1187
1188 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1188 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1189 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1189 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1190 object.
1190 object.
1191
1191
1192 Arguments:
1192 Arguments:
1193
1193
1194 baseui
1194 baseui
1195 ``ui.ui`` instance that ``ui`` argument was based off of.
1195 ``ui.ui`` instance that ``ui`` argument was based off of.
1196
1196
1197 ui
1197 ui
1198 ``ui.ui`` instance for use by the repository.
1198 ``ui.ui`` instance for use by the repository.
1199
1199
1200 origroot
1200 origroot
1201 ``bytes`` path to working directory root of this repository.
1201 ``bytes`` path to working directory root of this repository.
1202
1202
1203 wdirvfs
1203 wdirvfs
1204 ``vfs.vfs`` rooted at the working directory.
1204 ``vfs.vfs`` rooted at the working directory.
1205
1205
1206 hgvfs
1206 hgvfs
1207 ``vfs.vfs`` rooted at .hg/
1207 ``vfs.vfs`` rooted at .hg/
1208
1208
1209 requirements
1209 requirements
1210 ``set`` of bytestrings representing repository opening requirements.
1210 ``set`` of bytestrings representing repository opening requirements.
1211
1211
1212 supportedrequirements
1212 supportedrequirements
1213 ``set`` of bytestrings representing repository requirements that we
1213 ``set`` of bytestrings representing repository requirements that we
1214 know how to open. May be a supetset of ``requirements``.
1214 know how to open. May be a supetset of ``requirements``.
1215
1215
1216 sharedpath
1216 sharedpath
1217 ``bytes`` Defining path to storage base directory. Points to a
1217 ``bytes`` Defining path to storage base directory. Points to a
1218 ``.hg/`` directory somewhere.
1218 ``.hg/`` directory somewhere.
1219
1219
1220 store
1220 store
1221 ``store.basicstore`` (or derived) instance providing access to
1221 ``store.basicstore`` (or derived) instance providing access to
1222 versioned storage.
1222 versioned storage.
1223
1223
1224 cachevfs
1224 cachevfs
1225 ``vfs.vfs`` used for cache files.
1225 ``vfs.vfs`` used for cache files.
1226
1226
1227 wcachevfs
1227 wcachevfs
1228 ``vfs.vfs`` used for cache files related to the working copy.
1228 ``vfs.vfs`` used for cache files related to the working copy.
1229
1229
1230 features
1230 features
1231 ``set`` of bytestrings defining features/capabilities of this
1231 ``set`` of bytestrings defining features/capabilities of this
1232 instance.
1232 instance.
1233
1233
1234 intents
1234 intents
1235 ``set`` of system strings indicating what this repo will be used
1235 ``set`` of system strings indicating what this repo will be used
1236 for.
1236 for.
1237 """
1237 """
1238 self.baseui = baseui
1238 self.baseui = baseui
1239 self.ui = ui
1239 self.ui = ui
1240 self.origroot = origroot
1240 self.origroot = origroot
1241 # vfs rooted at working directory.
1241 # vfs rooted at working directory.
1242 self.wvfs = wdirvfs
1242 self.wvfs = wdirvfs
1243 self.root = wdirvfs.base
1243 self.root = wdirvfs.base
1244 # vfs rooted at .hg/. Used to access most non-store paths.
1244 # vfs rooted at .hg/. Used to access most non-store paths.
1245 self.vfs = hgvfs
1245 self.vfs = hgvfs
1246 self.path = hgvfs.base
1246 self.path = hgvfs.base
1247 self.requirements = requirements
1247 self.requirements = requirements
1248 self.supported = supportedrequirements
1248 self.supported = supportedrequirements
1249 self.sharedpath = sharedpath
1249 self.sharedpath = sharedpath
1250 self.store = store
1250 self.store = store
1251 self.cachevfs = cachevfs
1251 self.cachevfs = cachevfs
1252 self.wcachevfs = wcachevfs
1252 self.wcachevfs = wcachevfs
1253 self.features = features
1253 self.features = features
1254
1254
1255 self.filtername = None
1255 self.filtername = None
1256
1256
1257 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1257 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1258 b'devel', b'check-locks'
1258 b'devel', b'check-locks'
1259 ):
1259 ):
1260 self.vfs.audit = self._getvfsward(self.vfs.audit)
1260 self.vfs.audit = self._getvfsward(self.vfs.audit)
1261 # A list of callback to shape the phase if no data were found.
1261 # A list of callback to shape the phase if no data were found.
1262 # Callback are in the form: func(repo, roots) --> processed root.
1262 # Callback are in the form: func(repo, roots) --> processed root.
1263 # This list it to be filled by extension during repo setup
1263 # This list it to be filled by extension during repo setup
1264 self._phasedefaults = []
1264 self._phasedefaults = []
1265
1265
1266 color.setup(self.ui)
1266 color.setup(self.ui)
1267
1267
1268 self.spath = self.store.path
1268 self.spath = self.store.path
1269 self.svfs = self.store.vfs
1269 self.svfs = self.store.vfs
1270 self.sjoin = self.store.join
1270 self.sjoin = self.store.join
1271 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1271 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1272 b'devel', b'check-locks'
1272 b'devel', b'check-locks'
1273 ):
1273 ):
1274 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1274 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1275 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1275 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1276 else: # standard vfs
1276 else: # standard vfs
1277 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1277 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1278
1278
1279 self._dirstatevalidatewarned = False
1279 self._dirstatevalidatewarned = False
1280
1280
1281 self._branchcaches = branchmap.BranchMapCache()
1281 self._branchcaches = branchmap.BranchMapCache()
1282 self._revbranchcache = None
1282 self._revbranchcache = None
1283 self._filterpats = {}
1283 self._filterpats = {}
1284 self._datafilters = {}
1284 self._datafilters = {}
1285 self._transref = self._lockref = self._wlockref = None
1285 self._transref = self._lockref = self._wlockref = None
1286
1286
1287 # A cache for various files under .hg/ that tracks file changes,
1287 # A cache for various files under .hg/ that tracks file changes,
1288 # (used by the filecache decorator)
1288 # (used by the filecache decorator)
1289 #
1289 #
1290 # Maps a property name to its util.filecacheentry
1290 # Maps a property name to its util.filecacheentry
1291 self._filecache = {}
1291 self._filecache = {}
1292
1292
1293 # hold sets of revision to be filtered
1293 # hold sets of revision to be filtered
1294 # should be cleared when something might have changed the filter value:
1294 # should be cleared when something might have changed the filter value:
1295 # - new changesets,
1295 # - new changesets,
1296 # - phase change,
1296 # - phase change,
1297 # - new obsolescence marker,
1297 # - new obsolescence marker,
1298 # - working directory parent change,
1298 # - working directory parent change,
1299 # - bookmark changes
1299 # - bookmark changes
1300 self.filteredrevcache = {}
1300 self.filteredrevcache = {}
1301
1301
1302 # post-dirstate-status hooks
1302 # post-dirstate-status hooks
1303 self._postdsstatus = []
1303 self._postdsstatus = []
1304
1304
1305 # generic mapping between names and nodes
1305 # generic mapping between names and nodes
1306 self.names = namespaces.namespaces()
1306 self.names = namespaces.namespaces()
1307
1307
1308 # Key to signature value.
1308 # Key to signature value.
1309 self._sparsesignaturecache = {}
1309 self._sparsesignaturecache = {}
1310 # Signature to cached matcher instance.
1310 # Signature to cached matcher instance.
1311 self._sparsematchercache = {}
1311 self._sparsematchercache = {}
1312
1312
1313 self._extrafilterid = repoview.extrafilter(ui)
1313 self._extrafilterid = repoview.extrafilter(ui)
1314
1314
1315 self.filecopiesmode = None
1315 self.filecopiesmode = None
1316 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1316 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1317 self.filecopiesmode = b'changeset-sidedata'
1317 self.filecopiesmode = b'changeset-sidedata'
1318
1318
1319 def _getvfsward(self, origfunc):
1319 def _getvfsward(self, origfunc):
1320 """build a ward for self.vfs"""
1320 """build a ward for self.vfs"""
1321 rref = weakref.ref(self)
1321 rref = weakref.ref(self)
1322
1322
1323 def checkvfs(path, mode=None):
1323 def checkvfs(path, mode=None):
1324 ret = origfunc(path, mode=mode)
1324 ret = origfunc(path, mode=mode)
1325 repo = rref()
1325 repo = rref()
1326 if (
1326 if (
1327 repo is None
1327 repo is None
1328 or not util.safehasattr(repo, b'_wlockref')
1328 or not util.safehasattr(repo, b'_wlockref')
1329 or not util.safehasattr(repo, b'_lockref')
1329 or not util.safehasattr(repo, b'_lockref')
1330 ):
1330 ):
1331 return
1331 return
1332 if mode in (None, b'r', b'rb'):
1332 if mode in (None, b'r', b'rb'):
1333 return
1333 return
1334 if path.startswith(repo.path):
1334 if path.startswith(repo.path):
1335 # truncate name relative to the repository (.hg)
1335 # truncate name relative to the repository (.hg)
1336 path = path[len(repo.path) + 1 :]
1336 path = path[len(repo.path) + 1 :]
1337 if path.startswith(b'cache/'):
1337 if path.startswith(b'cache/'):
1338 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1338 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1339 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1339 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1340 # path prefixes covered by 'lock'
1340 # path prefixes covered by 'lock'
1341 vfs_path_prefixes = (
1341 vfs_path_prefixes = (
1342 b'journal.',
1342 b'journal.',
1343 b'undo.',
1343 b'undo.',
1344 b'strip-backup/',
1344 b'strip-backup/',
1345 b'cache/',
1345 b'cache/',
1346 )
1346 )
1347 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1347 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1348 if repo._currentlock(repo._lockref) is None:
1348 if repo._currentlock(repo._lockref) is None:
1349 repo.ui.develwarn(
1349 repo.ui.develwarn(
1350 b'write with no lock: "%s"' % path,
1350 b'write with no lock: "%s"' % path,
1351 stacklevel=3,
1351 stacklevel=3,
1352 config=b'check-locks',
1352 config=b'check-locks',
1353 )
1353 )
1354 elif repo._currentlock(repo._wlockref) is None:
1354 elif repo._currentlock(repo._wlockref) is None:
1355 # rest of vfs files are covered by 'wlock'
1355 # rest of vfs files are covered by 'wlock'
1356 #
1356 #
1357 # exclude special files
1357 # exclude special files
1358 for prefix in self._wlockfreeprefix:
1358 for prefix in self._wlockfreeprefix:
1359 if path.startswith(prefix):
1359 if path.startswith(prefix):
1360 return
1360 return
1361 repo.ui.develwarn(
1361 repo.ui.develwarn(
1362 b'write with no wlock: "%s"' % path,
1362 b'write with no wlock: "%s"' % path,
1363 stacklevel=3,
1363 stacklevel=3,
1364 config=b'check-locks',
1364 config=b'check-locks',
1365 )
1365 )
1366 return ret
1366 return ret
1367
1367
1368 return checkvfs
1368 return checkvfs
1369
1369
1370 def _getsvfsward(self, origfunc):
1370 def _getsvfsward(self, origfunc):
1371 """build a ward for self.svfs"""
1371 """build a ward for self.svfs"""
1372 rref = weakref.ref(self)
1372 rref = weakref.ref(self)
1373
1373
1374 def checksvfs(path, mode=None):
1374 def checksvfs(path, mode=None):
1375 ret = origfunc(path, mode=mode)
1375 ret = origfunc(path, mode=mode)
1376 repo = rref()
1376 repo = rref()
1377 if repo is None or not util.safehasattr(repo, b'_lockref'):
1377 if repo is None or not util.safehasattr(repo, b'_lockref'):
1378 return
1378 return
1379 if mode in (None, b'r', b'rb'):
1379 if mode in (None, b'r', b'rb'):
1380 return
1380 return
1381 if path.startswith(repo.sharedpath):
1381 if path.startswith(repo.sharedpath):
1382 # truncate name relative to the repository (.hg)
1382 # truncate name relative to the repository (.hg)
1383 path = path[len(repo.sharedpath) + 1 :]
1383 path = path[len(repo.sharedpath) + 1 :]
1384 if repo._currentlock(repo._lockref) is None:
1384 if repo._currentlock(repo._lockref) is None:
1385 repo.ui.develwarn(
1385 repo.ui.develwarn(
1386 b'write with no lock: "%s"' % path, stacklevel=4
1386 b'write with no lock: "%s"' % path, stacklevel=4
1387 )
1387 )
1388 return ret
1388 return ret
1389
1389
1390 return checksvfs
1390 return checksvfs
1391
1391
1392 def close(self):
1392 def close(self):
1393 self._writecaches()
1393 self._writecaches()
1394
1394
1395 def _writecaches(self):
1395 def _writecaches(self):
1396 if self._revbranchcache:
1396 if self._revbranchcache:
1397 self._revbranchcache.write()
1397 self._revbranchcache.write()
1398
1398
1399 def _restrictcapabilities(self, caps):
1399 def _restrictcapabilities(self, caps):
1400 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1400 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1401 caps = set(caps)
1401 caps = set(caps)
1402 capsblob = bundle2.encodecaps(
1402 capsblob = bundle2.encodecaps(
1403 bundle2.getrepocaps(self, role=b'client')
1403 bundle2.getrepocaps(self, role=b'client')
1404 )
1404 )
1405 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1405 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1406 return caps
1406 return caps
1407
1407
1408 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1408 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1409 # self -> auditor -> self._checknested -> self
1409 # self -> auditor -> self._checknested -> self
1410
1410
1411 @property
1411 @property
1412 def auditor(self):
1412 def auditor(self):
1413 # This is only used by context.workingctx.match in order to
1413 # This is only used by context.workingctx.match in order to
1414 # detect files in subrepos.
1414 # detect files in subrepos.
1415 return pathutil.pathauditor(self.root, callback=self._checknested)
1415 return pathutil.pathauditor(self.root, callback=self._checknested)
1416
1416
1417 @property
1417 @property
1418 def nofsauditor(self):
1418 def nofsauditor(self):
1419 # This is only used by context.basectx.match in order to detect
1419 # This is only used by context.basectx.match in order to detect
1420 # files in subrepos.
1420 # files in subrepos.
1421 return pathutil.pathauditor(
1421 return pathutil.pathauditor(
1422 self.root, callback=self._checknested, realfs=False, cached=True
1422 self.root, callback=self._checknested, realfs=False, cached=True
1423 )
1423 )
1424
1424
1425 def _checknested(self, path):
1425 def _checknested(self, path):
1426 """Determine if path is a legal nested repository."""
1426 """Determine if path is a legal nested repository."""
1427 if not path.startswith(self.root):
1427 if not path.startswith(self.root):
1428 return False
1428 return False
1429 subpath = path[len(self.root) + 1 :]
1429 subpath = path[len(self.root) + 1 :]
1430 normsubpath = util.pconvert(subpath)
1430 normsubpath = util.pconvert(subpath)
1431
1431
1432 # XXX: Checking against the current working copy is wrong in
1432 # XXX: Checking against the current working copy is wrong in
1433 # the sense that it can reject things like
1433 # the sense that it can reject things like
1434 #
1434 #
1435 # $ hg cat -r 10 sub/x.txt
1435 # $ hg cat -r 10 sub/x.txt
1436 #
1436 #
1437 # if sub/ is no longer a subrepository in the working copy
1437 # if sub/ is no longer a subrepository in the working copy
1438 # parent revision.
1438 # parent revision.
1439 #
1439 #
1440 # However, it can of course also allow things that would have
1440 # However, it can of course also allow things that would have
1441 # been rejected before, such as the above cat command if sub/
1441 # been rejected before, such as the above cat command if sub/
1442 # is a subrepository now, but was a normal directory before.
1442 # is a subrepository now, but was a normal directory before.
1443 # The old path auditor would have rejected by mistake since it
1443 # The old path auditor would have rejected by mistake since it
1444 # panics when it sees sub/.hg/.
1444 # panics when it sees sub/.hg/.
1445 #
1445 #
1446 # All in all, checking against the working copy seems sensible
1446 # All in all, checking against the working copy seems sensible
1447 # since we want to prevent access to nested repositories on
1447 # since we want to prevent access to nested repositories on
1448 # the filesystem *now*.
1448 # the filesystem *now*.
1449 ctx = self[None]
1449 ctx = self[None]
1450 parts = util.splitpath(subpath)
1450 parts = util.splitpath(subpath)
1451 while parts:
1451 while parts:
1452 prefix = b'/'.join(parts)
1452 prefix = b'/'.join(parts)
1453 if prefix in ctx.substate:
1453 if prefix in ctx.substate:
1454 if prefix == normsubpath:
1454 if prefix == normsubpath:
1455 return True
1455 return True
1456 else:
1456 else:
1457 sub = ctx.sub(prefix)
1457 sub = ctx.sub(prefix)
1458 return sub.checknested(subpath[len(prefix) + 1 :])
1458 return sub.checknested(subpath[len(prefix) + 1 :])
1459 else:
1459 else:
1460 parts.pop()
1460 parts.pop()
1461 return False
1461 return False
1462
1462
1463 def peer(self):
1463 def peer(self):
1464 return localpeer(self) # not cached to avoid reference cycle
1464 return localpeer(self) # not cached to avoid reference cycle
1465
1465
1466 def unfiltered(self):
1466 def unfiltered(self):
1467 """Return unfiltered version of the repository
1467 """Return unfiltered version of the repository
1468
1468
1469 Intended to be overwritten by filtered repo."""
1469 Intended to be overwritten by filtered repo."""
1470 return self
1470 return self
1471
1471
1472 def filtered(self, name, visibilityexceptions=None):
1472 def filtered(self, name, visibilityexceptions=None):
1473 """Return a filtered version of a repository
1473 """Return a filtered version of a repository
1474
1474
1475 The `name` parameter is the identifier of the requested view. This
1475 The `name` parameter is the identifier of the requested view. This
1476 will return a repoview object set "exactly" to the specified view.
1476 will return a repoview object set "exactly" to the specified view.
1477
1477
1478 This function does not apply recursive filtering to a repository. For
1478 This function does not apply recursive filtering to a repository. For
1479 example calling `repo.filtered("served")` will return a repoview using
1479 example calling `repo.filtered("served")` will return a repoview using
1480 the "served" view, regardless of the initial view used by `repo`.
1480 the "served" view, regardless of the initial view used by `repo`.
1481
1481
1482 In other word, there is always only one level of `repoview` "filtering".
1482 In other word, there is always only one level of `repoview` "filtering".
1483 """
1483 """
1484 if self._extrafilterid is not None and b'%' not in name:
1484 if self._extrafilterid is not None and b'%' not in name:
1485 name = name + b'%' + self._extrafilterid
1485 name = name + b'%' + self._extrafilterid
1486
1486
1487 cls = repoview.newtype(self.unfiltered().__class__)
1487 cls = repoview.newtype(self.unfiltered().__class__)
1488 return cls(self, name, visibilityexceptions)
1488 return cls(self, name, visibilityexceptions)
1489
1489
1490 @mixedrepostorecache(
1490 @mixedrepostorecache(
1491 (b'bookmarks', b'plain'),
1491 (b'bookmarks', b'plain'),
1492 (b'bookmarks.current', b'plain'),
1492 (b'bookmarks.current', b'plain'),
1493 (b'bookmarks', b''),
1493 (b'bookmarks', b''),
1494 (b'00changelog.i', b''),
1494 (b'00changelog.i', b''),
1495 )
1495 )
1496 def _bookmarks(self):
1496 def _bookmarks(self):
1497 # Since the multiple files involved in the transaction cannot be
1497 # Since the multiple files involved in the transaction cannot be
1498 # written atomically (with current repository format), there is a race
1498 # written atomically (with current repository format), there is a race
1499 # condition here.
1499 # condition here.
1500 #
1500 #
1501 # 1) changelog content A is read
1501 # 1) changelog content A is read
1502 # 2) outside transaction update changelog to content B
1502 # 2) outside transaction update changelog to content B
1503 # 3) outside transaction update bookmark file referring to content B
1503 # 3) outside transaction update bookmark file referring to content B
1504 # 4) bookmarks file content is read and filtered against changelog-A
1504 # 4) bookmarks file content is read and filtered against changelog-A
1505 #
1505 #
1506 # When this happens, bookmarks against nodes missing from A are dropped.
1506 # When this happens, bookmarks against nodes missing from A are dropped.
1507 #
1507 #
1508 # Having this happening during read is not great, but it become worse
1508 # Having this happening during read is not great, but it become worse
1509 # when this happen during write because the bookmarks to the "unknown"
1509 # when this happen during write because the bookmarks to the "unknown"
1510 # nodes will be dropped for good. However, writes happen within locks.
1510 # nodes will be dropped for good. However, writes happen within locks.
1511 # This locking makes it possible to have a race free consistent read.
1511 # This locking makes it possible to have a race free consistent read.
1512 # For this purpose data read from disc before locking are
1512 # For this purpose data read from disc before locking are
1513 # "invalidated" right after the locks are taken. This invalidations are
1513 # "invalidated" right after the locks are taken. This invalidations are
1514 # "light", the `filecache` mechanism keep the data in memory and will
1514 # "light", the `filecache` mechanism keep the data in memory and will
1515 # reuse them if the underlying files did not changed. Not parsing the
1515 # reuse them if the underlying files did not changed. Not parsing the
1516 # same data multiple times helps performances.
1516 # same data multiple times helps performances.
1517 #
1517 #
1518 # Unfortunately in the case describe above, the files tracked by the
1518 # Unfortunately in the case describe above, the files tracked by the
1519 # bookmarks file cache might not have changed, but the in-memory
1519 # bookmarks file cache might not have changed, but the in-memory
1520 # content is still "wrong" because we used an older changelog content
1520 # content is still "wrong" because we used an older changelog content
1521 # to process the on-disk data. So after locking, the changelog would be
1521 # to process the on-disk data. So after locking, the changelog would be
1522 # refreshed but `_bookmarks` would be preserved.
1522 # refreshed but `_bookmarks` would be preserved.
1523 # Adding `00changelog.i` to the list of tracked file is not
1523 # Adding `00changelog.i` to the list of tracked file is not
1524 # enough, because at the time we build the content for `_bookmarks` in
1524 # enough, because at the time we build the content for `_bookmarks` in
1525 # (4), the changelog file has already diverged from the content used
1525 # (4), the changelog file has already diverged from the content used
1526 # for loading `changelog` in (1)
1526 # for loading `changelog` in (1)
1527 #
1527 #
1528 # To prevent the issue, we force the changelog to be explicitly
1528 # To prevent the issue, we force the changelog to be explicitly
1529 # reloaded while computing `_bookmarks`. The data race can still happen
1529 # reloaded while computing `_bookmarks`. The data race can still happen
1530 # without the lock (with a narrower window), but it would no longer go
1530 # without the lock (with a narrower window), but it would no longer go
1531 # undetected during the lock time refresh.
1531 # undetected during the lock time refresh.
1532 #
1532 #
1533 # The new schedule is as follow
1533 # The new schedule is as follow
1534 #
1534 #
1535 # 1) filecache logic detect that `_bookmarks` needs to be computed
1535 # 1) filecache logic detect that `_bookmarks` needs to be computed
1536 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1536 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1537 # 3) We force `changelog` filecache to be tested
1537 # 3) We force `changelog` filecache to be tested
1538 # 4) cachestat for `changelog` are captured (for changelog)
1538 # 4) cachestat for `changelog` are captured (for changelog)
1539 # 5) `_bookmarks` is computed and cached
1539 # 5) `_bookmarks` is computed and cached
1540 #
1540 #
1541 # The step in (3) ensure we have a changelog at least as recent as the
1541 # The step in (3) ensure we have a changelog at least as recent as the
1542 # cache stat computed in (1). As a result at locking time:
1542 # cache stat computed in (1). As a result at locking time:
1543 # * if the changelog did not changed since (1) -> we can reuse the data
1543 # * if the changelog did not changed since (1) -> we can reuse the data
1544 # * otherwise -> the bookmarks get refreshed.
1544 # * otherwise -> the bookmarks get refreshed.
1545 self._refreshchangelog()
1545 self._refreshchangelog()
1546 return bookmarks.bmstore(self)
1546 return bookmarks.bmstore(self)
1547
1547
1548 def _refreshchangelog(self):
1548 def _refreshchangelog(self):
1549 """make sure the in memory changelog match the on-disk one"""
1549 """make sure the in memory changelog match the on-disk one"""
1550 if 'changelog' in vars(self) and self.currenttransaction() is None:
1550 if 'changelog' in vars(self) and self.currenttransaction() is None:
1551 del self.changelog
1551 del self.changelog
1552
1552
1553 @property
1553 @property
1554 def _activebookmark(self):
1554 def _activebookmark(self):
1555 return self._bookmarks.active
1555 return self._bookmarks.active
1556
1556
1557 # _phasesets depend on changelog. what we need is to call
1557 # _phasesets depend on changelog. what we need is to call
1558 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1558 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1559 # can't be easily expressed in filecache mechanism.
1559 # can't be easily expressed in filecache mechanism.
1560 @storecache(b'phaseroots', b'00changelog.i')
1560 @storecache(b'phaseroots', b'00changelog.i')
1561 def _phasecache(self):
1561 def _phasecache(self):
1562 return phases.phasecache(self, self._phasedefaults)
1562 return phases.phasecache(self, self._phasedefaults)
1563
1563
1564 @storecache(b'obsstore')
1564 @storecache(b'obsstore')
1565 def obsstore(self):
1565 def obsstore(self):
1566 return obsolete.makestore(self.ui, self)
1566 return obsolete.makestore(self.ui, self)
1567
1567
1568 @storecache(b'00changelog.i')
1568 @storecache(b'00changelog.i')
1569 def changelog(self):
1569 def changelog(self):
1570 # load dirstate before changelog to avoid race see issue6303
1570 # load dirstate before changelog to avoid race see issue6303
1571 self.dirstate.prefetch_parents()
1571 self.dirstate.prefetch_parents()
1572 return self.store.changelog(txnutil.mayhavepending(self.root))
1572 return self.store.changelog(txnutil.mayhavepending(self.root))
1573
1573
1574 @storecache(b'00manifest.i')
1574 @storecache(b'00manifest.i')
1575 def manifestlog(self):
1575 def manifestlog(self):
1576 return self.store.manifestlog(self, self._storenarrowmatch)
1576 return self.store.manifestlog(self, self._storenarrowmatch)
1577
1577
1578 @repofilecache(b'dirstate')
1578 @repofilecache(b'dirstate')
1579 def dirstate(self):
1579 def dirstate(self):
1580 return self._makedirstate()
1580 return self._makedirstate()
1581
1581
1582 def _makedirstate(self):
1582 def _makedirstate(self):
1583 """Extension point for wrapping the dirstate per-repo."""
1583 """Extension point for wrapping the dirstate per-repo."""
1584 sparsematchfn = lambda: sparse.matcher(self)
1584 sparsematchfn = lambda: sparse.matcher(self)
1585
1585
1586 return dirstate.dirstate(
1586 return dirstate.dirstate(
1587 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1587 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1588 )
1588 )
1589
1589
1590 def _dirstatevalidate(self, node):
1590 def _dirstatevalidate(self, node):
1591 try:
1591 try:
1592 self.changelog.rev(node)
1592 self.changelog.rev(node)
1593 return node
1593 return node
1594 except error.LookupError:
1594 except error.LookupError:
1595 if not self._dirstatevalidatewarned:
1595 if not self._dirstatevalidatewarned:
1596 self._dirstatevalidatewarned = True
1596 self._dirstatevalidatewarned = True
1597 self.ui.warn(
1597 self.ui.warn(
1598 _(b"warning: ignoring unknown working parent %s!\n")
1598 _(b"warning: ignoring unknown working parent %s!\n")
1599 % short(node)
1599 % short(node)
1600 )
1600 )
1601 return nullid
1601 return nullid
1602
1602
1603 @storecache(narrowspec.FILENAME)
1603 @storecache(narrowspec.FILENAME)
1604 def narrowpats(self):
1604 def narrowpats(self):
1605 """matcher patterns for this repository's narrowspec
1605 """matcher patterns for this repository's narrowspec
1606
1606
1607 A tuple of (includes, excludes).
1607 A tuple of (includes, excludes).
1608 """
1608 """
1609 return narrowspec.load(self)
1609 return narrowspec.load(self)
1610
1610
1611 @storecache(narrowspec.FILENAME)
1611 @storecache(narrowspec.FILENAME)
1612 def _storenarrowmatch(self):
1612 def _storenarrowmatch(self):
1613 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1613 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1614 return matchmod.always()
1614 return matchmod.always()
1615 include, exclude = self.narrowpats
1615 include, exclude = self.narrowpats
1616 return narrowspec.match(self.root, include=include, exclude=exclude)
1616 return narrowspec.match(self.root, include=include, exclude=exclude)
1617
1617
1618 @storecache(narrowspec.FILENAME)
1618 @storecache(narrowspec.FILENAME)
1619 def _narrowmatch(self):
1619 def _narrowmatch(self):
1620 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1620 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1621 return matchmod.always()
1621 return matchmod.always()
1622 narrowspec.checkworkingcopynarrowspec(self)
1622 narrowspec.checkworkingcopynarrowspec(self)
1623 include, exclude = self.narrowpats
1623 include, exclude = self.narrowpats
1624 return narrowspec.match(self.root, include=include, exclude=exclude)
1624 return narrowspec.match(self.root, include=include, exclude=exclude)
1625
1625
1626 def narrowmatch(self, match=None, includeexact=False):
1626 def narrowmatch(self, match=None, includeexact=False):
1627 """matcher corresponding the the repo's narrowspec
1627 """matcher corresponding the the repo's narrowspec
1628
1628
1629 If `match` is given, then that will be intersected with the narrow
1629 If `match` is given, then that will be intersected with the narrow
1630 matcher.
1630 matcher.
1631
1631
1632 If `includeexact` is True, then any exact matches from `match` will
1632 If `includeexact` is True, then any exact matches from `match` will
1633 be included even if they're outside the narrowspec.
1633 be included even if they're outside the narrowspec.
1634 """
1634 """
1635 if match:
1635 if match:
1636 if includeexact and not self._narrowmatch.always():
1636 if includeexact and not self._narrowmatch.always():
1637 # do not exclude explicitly-specified paths so that they can
1637 # do not exclude explicitly-specified paths so that they can
1638 # be warned later on
1638 # be warned later on
1639 em = matchmod.exact(match.files())
1639 em = matchmod.exact(match.files())
1640 nm = matchmod.unionmatcher([self._narrowmatch, em])
1640 nm = matchmod.unionmatcher([self._narrowmatch, em])
1641 return matchmod.intersectmatchers(match, nm)
1641 return matchmod.intersectmatchers(match, nm)
1642 return matchmod.intersectmatchers(match, self._narrowmatch)
1642 return matchmod.intersectmatchers(match, self._narrowmatch)
1643 return self._narrowmatch
1643 return self._narrowmatch
1644
1644
1645 def setnarrowpats(self, newincludes, newexcludes):
1645 def setnarrowpats(self, newincludes, newexcludes):
1646 narrowspec.save(self, newincludes, newexcludes)
1646 narrowspec.save(self, newincludes, newexcludes)
1647 self.invalidate(clearfilecache=True)
1647 self.invalidate(clearfilecache=True)
1648
1648
1649 @unfilteredpropertycache
1649 @unfilteredpropertycache
1650 def _quick_access_changeid_null(self):
1650 def _quick_access_changeid_null(self):
1651 return {
1651 return {
1652 b'null': (nullrev, nullid),
1652 b'null': (nullrev, nullid),
1653 nullrev: (nullrev, nullid),
1653 nullrev: (nullrev, nullid),
1654 nullid: (nullrev, nullid),
1654 nullid: (nullrev, nullid),
1655 }
1655 }
1656
1656
1657 @unfilteredpropertycache
1657 @unfilteredpropertycache
1658 def _quick_access_changeid_wc(self):
1658 def _quick_access_changeid_wc(self):
1659 # also fast path access to the working copy parents
1659 # also fast path access to the working copy parents
1660 # however, only do it for filter that ensure wc is visible.
1660 # however, only do it for filter that ensure wc is visible.
1661 quick = self._quick_access_changeid_null.copy()
1661 quick = self._quick_access_changeid_null.copy()
1662 cl = self.unfiltered().changelog
1662 cl = self.unfiltered().changelog
1663 for node in self.dirstate.parents():
1663 for node in self.dirstate.parents():
1664 if node == nullid:
1664 if node == nullid:
1665 continue
1665 continue
1666 rev = cl.index.get_rev(node)
1666 rev = cl.index.get_rev(node)
1667 if rev is None:
1667 if rev is None:
1668 # unknown working copy parent case:
1668 # unknown working copy parent case:
1669 #
1669 #
1670 # skip the fast path and let higher code deal with it
1670 # skip the fast path and let higher code deal with it
1671 continue
1671 continue
1672 pair = (rev, node)
1672 pair = (rev, node)
1673 quick[rev] = pair
1673 quick[rev] = pair
1674 quick[node] = pair
1674 quick[node] = pair
1675 # also add the parents of the parents
1675 # also add the parents of the parents
1676 for r in cl.parentrevs(rev):
1676 for r in cl.parentrevs(rev):
1677 if r == nullrev:
1677 if r == nullrev:
1678 continue
1678 continue
1679 n = cl.node(r)
1679 n = cl.node(r)
1680 pair = (r, n)
1680 pair = (r, n)
1681 quick[r] = pair
1681 quick[r] = pair
1682 quick[n] = pair
1682 quick[n] = pair
1683 p1node = self.dirstate.p1()
1683 p1node = self.dirstate.p1()
1684 if p1node != nullid:
1684 if p1node != nullid:
1685 quick[b'.'] = quick[p1node]
1685 quick[b'.'] = quick[p1node]
1686 return quick
1686 return quick
1687
1687
1688 @unfilteredmethod
1688 @unfilteredmethod
1689 def _quick_access_changeid_invalidate(self):
1689 def _quick_access_changeid_invalidate(self):
1690 if '_quick_access_changeid_wc' in vars(self):
1690 if '_quick_access_changeid_wc' in vars(self):
1691 del self.__dict__['_quick_access_changeid_wc']
1691 del self.__dict__['_quick_access_changeid_wc']
1692
1692
1693 @property
1693 @property
1694 def _quick_access_changeid(self):
1694 def _quick_access_changeid(self):
1695 """an helper dictionnary for __getitem__ calls
1695 """an helper dictionnary for __getitem__ calls
1696
1696
1697 This contains a list of symbol we can recognise right away without
1697 This contains a list of symbol we can recognise right away without
1698 further processing.
1698 further processing.
1699 """
1699 """
1700 if self.filtername in repoview.filter_has_wc:
1700 if self.filtername in repoview.filter_has_wc:
1701 return self._quick_access_changeid_wc
1701 return self._quick_access_changeid_wc
1702 return self._quick_access_changeid_null
1702 return self._quick_access_changeid_null
1703
1703
1704 def __getitem__(self, changeid):
1704 def __getitem__(self, changeid):
1705 # dealing with special cases
1705 # dealing with special cases
1706 if changeid is None:
1706 if changeid is None:
1707 return context.workingctx(self)
1707 return context.workingctx(self)
1708 if isinstance(changeid, context.basectx):
1708 if isinstance(changeid, context.basectx):
1709 return changeid
1709 return changeid
1710
1710
1711 # dealing with multiple revisions
1711 # dealing with multiple revisions
1712 if isinstance(changeid, slice):
1712 if isinstance(changeid, slice):
1713 # wdirrev isn't contiguous so the slice shouldn't include it
1713 # wdirrev isn't contiguous so the slice shouldn't include it
1714 return [
1714 return [
1715 self[i]
1715 self[i]
1716 for i in pycompat.xrange(*changeid.indices(len(self)))
1716 for i in pycompat.xrange(*changeid.indices(len(self)))
1717 if i not in self.changelog.filteredrevs
1717 if i not in self.changelog.filteredrevs
1718 ]
1718 ]
1719
1719
1720 # dealing with some special values
1720 # dealing with some special values
1721 quick_access = self._quick_access_changeid.get(changeid)
1721 quick_access = self._quick_access_changeid.get(changeid)
1722 if quick_access is not None:
1722 if quick_access is not None:
1723 rev, node = quick_access
1723 rev, node = quick_access
1724 return context.changectx(self, rev, node, maybe_filtered=False)
1724 return context.changectx(self, rev, node, maybe_filtered=False)
1725 if changeid == b'tip':
1725 if changeid == b'tip':
1726 node = self.changelog.tip()
1726 node = self.changelog.tip()
1727 rev = self.changelog.rev(node)
1727 rev = self.changelog.rev(node)
1728 return context.changectx(self, rev, node)
1728 return context.changectx(self, rev, node)
1729
1729
1730 # dealing with arbitrary values
1730 # dealing with arbitrary values
1731 try:
1731 try:
1732 if isinstance(changeid, int):
1732 if isinstance(changeid, int):
1733 node = self.changelog.node(changeid)
1733 node = self.changelog.node(changeid)
1734 rev = changeid
1734 rev = changeid
1735 elif changeid == b'.':
1735 elif changeid == b'.':
1736 # this is a hack to delay/avoid loading obsmarkers
1736 # this is a hack to delay/avoid loading obsmarkers
1737 # when we know that '.' won't be hidden
1737 # when we know that '.' won't be hidden
1738 node = self.dirstate.p1()
1738 node = self.dirstate.p1()
1739 rev = self.unfiltered().changelog.rev(node)
1739 rev = self.unfiltered().changelog.rev(node)
1740 elif len(changeid) == 20:
1740 elif len(changeid) == 20:
1741 try:
1741 try:
1742 node = changeid
1742 node = changeid
1743 rev = self.changelog.rev(changeid)
1743 rev = self.changelog.rev(changeid)
1744 except error.FilteredLookupError:
1744 except error.FilteredLookupError:
1745 changeid = hex(changeid) # for the error message
1745 changeid = hex(changeid) # for the error message
1746 raise
1746 raise
1747 except LookupError:
1747 except LookupError:
1748 # check if it might have come from damaged dirstate
1748 # check if it might have come from damaged dirstate
1749 #
1749 #
1750 # XXX we could avoid the unfiltered if we had a recognizable
1750 # XXX we could avoid the unfiltered if we had a recognizable
1751 # exception for filtered changeset access
1751 # exception for filtered changeset access
1752 if (
1752 if (
1753 self.local()
1753 self.local()
1754 and changeid in self.unfiltered().dirstate.parents()
1754 and changeid in self.unfiltered().dirstate.parents()
1755 ):
1755 ):
1756 msg = _(b"working directory has unknown parent '%s'!")
1756 msg = _(b"working directory has unknown parent '%s'!")
1757 raise error.Abort(msg % short(changeid))
1757 raise error.Abort(msg % short(changeid))
1758 changeid = hex(changeid) # for the error message
1758 changeid = hex(changeid) # for the error message
1759 raise
1759 raise
1760
1760
1761 elif len(changeid) == 40:
1761 elif len(changeid) == 40:
1762 node = bin(changeid)
1762 node = bin(changeid)
1763 rev = self.changelog.rev(node)
1763 rev = self.changelog.rev(node)
1764 else:
1764 else:
1765 raise error.ProgrammingError(
1765 raise error.ProgrammingError(
1766 b"unsupported changeid '%s' of type %s"
1766 b"unsupported changeid '%s' of type %s"
1767 % (changeid, pycompat.bytestr(type(changeid)))
1767 % (changeid, pycompat.bytestr(type(changeid)))
1768 )
1768 )
1769
1769
1770 return context.changectx(self, rev, node)
1770 return context.changectx(self, rev, node)
1771
1771
1772 except (error.FilteredIndexError, error.FilteredLookupError):
1772 except (error.FilteredIndexError, error.FilteredLookupError):
1773 raise error.FilteredRepoLookupError(
1773 raise error.FilteredRepoLookupError(
1774 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1774 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1775 )
1775 )
1776 except (IndexError, LookupError):
1776 except (IndexError, LookupError):
1777 raise error.RepoLookupError(
1777 raise error.RepoLookupError(
1778 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1778 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1779 )
1779 )
1780 except error.WdirUnsupported:
1780 except error.WdirUnsupported:
1781 return context.workingctx(self)
1781 return context.workingctx(self)
1782
1782
1783 def __contains__(self, changeid):
1783 def __contains__(self, changeid):
1784 """True if the given changeid exists"""
1784 """True if the given changeid exists"""
1785 try:
1785 try:
1786 self[changeid]
1786 self[changeid]
1787 return True
1787 return True
1788 except error.RepoLookupError:
1788 except error.RepoLookupError:
1789 return False
1789 return False
1790
1790
1791 def __nonzero__(self):
1791 def __nonzero__(self):
1792 return True
1792 return True
1793
1793
1794 __bool__ = __nonzero__
1794 __bool__ = __nonzero__
1795
1795
1796 def __len__(self):
1796 def __len__(self):
1797 # no need to pay the cost of repoview.changelog
1797 # no need to pay the cost of repoview.changelog
1798 unfi = self.unfiltered()
1798 unfi = self.unfiltered()
1799 return len(unfi.changelog)
1799 return len(unfi.changelog)
1800
1800
1801 def __iter__(self):
1801 def __iter__(self):
1802 return iter(self.changelog)
1802 return iter(self.changelog)
1803
1803
1804 def revs(self, expr, *args):
1804 def revs(self, expr, *args):
1805 """Find revisions matching a revset.
1805 """Find revisions matching a revset.
1806
1806
1807 The revset is specified as a string ``expr`` that may contain
1807 The revset is specified as a string ``expr`` that may contain
1808 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1808 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1809
1809
1810 Revset aliases from the configuration are not expanded. To expand
1810 Revset aliases from the configuration are not expanded. To expand
1811 user aliases, consider calling ``scmutil.revrange()`` or
1811 user aliases, consider calling ``scmutil.revrange()`` or
1812 ``repo.anyrevs([expr], user=True)``.
1812 ``repo.anyrevs([expr], user=True)``.
1813
1813
1814 Returns a smartset.abstractsmartset, which is a list-like interface
1814 Returns a smartset.abstractsmartset, which is a list-like interface
1815 that contains integer revisions.
1815 that contains integer revisions.
1816 """
1816 """
1817 tree = revsetlang.spectree(expr, *args)
1817 tree = revsetlang.spectree(expr, *args)
1818 return revset.makematcher(tree)(self)
1818 return revset.makematcher(tree)(self)
1819
1819
1820 def set(self, expr, *args):
1820 def set(self, expr, *args):
1821 """Find revisions matching a revset and emit changectx instances.
1821 """Find revisions matching a revset and emit changectx instances.
1822
1822
1823 This is a convenience wrapper around ``revs()`` that iterates the
1823 This is a convenience wrapper around ``revs()`` that iterates the
1824 result and is a generator of changectx instances.
1824 result and is a generator of changectx instances.
1825
1825
1826 Revset aliases from the configuration are not expanded. To expand
1826 Revset aliases from the configuration are not expanded. To expand
1827 user aliases, consider calling ``scmutil.revrange()``.
1827 user aliases, consider calling ``scmutil.revrange()``.
1828 """
1828 """
1829 for r in self.revs(expr, *args):
1829 for r in self.revs(expr, *args):
1830 yield self[r]
1830 yield self[r]
1831
1831
1832 def anyrevs(self, specs, user=False, localalias=None):
1832 def anyrevs(self, specs, user=False, localalias=None):
1833 """Find revisions matching one of the given revsets.
1833 """Find revisions matching one of the given revsets.
1834
1834
1835 Revset aliases from the configuration are not expanded by default. To
1835 Revset aliases from the configuration are not expanded by default. To
1836 expand user aliases, specify ``user=True``. To provide some local
1836 expand user aliases, specify ``user=True``. To provide some local
1837 definitions overriding user aliases, set ``localalias`` to
1837 definitions overriding user aliases, set ``localalias`` to
1838 ``{name: definitionstring}``.
1838 ``{name: definitionstring}``.
1839 """
1839 """
1840 if specs == [b'null']:
1840 if specs == [b'null']:
1841 return revset.baseset([nullrev])
1841 return revset.baseset([nullrev])
1842 if specs == [b'.']:
1842 if specs == [b'.']:
1843 quick_data = self._quick_access_changeid.get(b'.')
1843 quick_data = self._quick_access_changeid.get(b'.')
1844 if quick_data is not None:
1844 if quick_data is not None:
1845 return revset.baseset([quick_data[0]])
1845 return revset.baseset([quick_data[0]])
1846 if user:
1846 if user:
1847 m = revset.matchany(
1847 m = revset.matchany(
1848 self.ui,
1848 self.ui,
1849 specs,
1849 specs,
1850 lookup=revset.lookupfn(self),
1850 lookup=revset.lookupfn(self),
1851 localalias=localalias,
1851 localalias=localalias,
1852 )
1852 )
1853 else:
1853 else:
1854 m = revset.matchany(None, specs, localalias=localalias)
1854 m = revset.matchany(None, specs, localalias=localalias)
1855 return m(self)
1855 return m(self)
1856
1856
1857 def url(self):
1857 def url(self):
1858 return b'file:' + self.root
1858 return b'file:' + self.root
1859
1859
1860 def hook(self, name, throw=False, **args):
1860 def hook(self, name, throw=False, **args):
1861 """Call a hook, passing this repo instance.
1861 """Call a hook, passing this repo instance.
1862
1862
1863 This a convenience method to aid invoking hooks. Extensions likely
1863 This a convenience method to aid invoking hooks. Extensions likely
1864 won't call this unless they have registered a custom hook or are
1864 won't call this unless they have registered a custom hook or are
1865 replacing code that is expected to call a hook.
1865 replacing code that is expected to call a hook.
1866 """
1866 """
1867 return hook.hook(self.ui, self, name, throw, **args)
1867 return hook.hook(self.ui, self, name, throw, **args)
1868
1868
1869 @filteredpropertycache
1869 @filteredpropertycache
1870 def _tagscache(self):
1870 def _tagscache(self):
1871 """Returns a tagscache object that contains various tags related
1871 """Returns a tagscache object that contains various tags related
1872 caches."""
1872 caches."""
1873
1873
1874 # This simplifies its cache management by having one decorated
1874 # This simplifies its cache management by having one decorated
1875 # function (this one) and the rest simply fetch things from it.
1875 # function (this one) and the rest simply fetch things from it.
1876 class tagscache(object):
1876 class tagscache(object):
1877 def __init__(self):
1877 def __init__(self):
1878 # These two define the set of tags for this repository. tags
1878 # These two define the set of tags for this repository. tags
1879 # maps tag name to node; tagtypes maps tag name to 'global' or
1879 # maps tag name to node; tagtypes maps tag name to 'global' or
1880 # 'local'. (Global tags are defined by .hgtags across all
1880 # 'local'. (Global tags are defined by .hgtags across all
1881 # heads, and local tags are defined in .hg/localtags.)
1881 # heads, and local tags are defined in .hg/localtags.)
1882 # They constitute the in-memory cache of tags.
1882 # They constitute the in-memory cache of tags.
1883 self.tags = self.tagtypes = None
1883 self.tags = self.tagtypes = None
1884
1884
1885 self.nodetagscache = self.tagslist = None
1885 self.nodetagscache = self.tagslist = None
1886
1886
1887 cache = tagscache()
1887 cache = tagscache()
1888 cache.tags, cache.tagtypes = self._findtags()
1888 cache.tags, cache.tagtypes = self._findtags()
1889
1889
1890 return cache
1890 return cache
1891
1891
1892 def tags(self):
1892 def tags(self):
1893 '''return a mapping of tag to node'''
1893 '''return a mapping of tag to node'''
1894 t = {}
1894 t = {}
1895 if self.changelog.filteredrevs:
1895 if self.changelog.filteredrevs:
1896 tags, tt = self._findtags()
1896 tags, tt = self._findtags()
1897 else:
1897 else:
1898 tags = self._tagscache.tags
1898 tags = self._tagscache.tags
1899 rev = self.changelog.rev
1899 rev = self.changelog.rev
1900 for k, v in pycompat.iteritems(tags):
1900 for k, v in pycompat.iteritems(tags):
1901 try:
1901 try:
1902 # ignore tags to unknown nodes
1902 # ignore tags to unknown nodes
1903 rev(v)
1903 rev(v)
1904 t[k] = v
1904 t[k] = v
1905 except (error.LookupError, ValueError):
1905 except (error.LookupError, ValueError):
1906 pass
1906 pass
1907 return t
1907 return t
1908
1908
1909 def _findtags(self):
1909 def _findtags(self):
1910 """Do the hard work of finding tags. Return a pair of dicts
1910 """Do the hard work of finding tags. Return a pair of dicts
1911 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1911 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1912 maps tag name to a string like \'global\' or \'local\'.
1912 maps tag name to a string like \'global\' or \'local\'.
1913 Subclasses or extensions are free to add their own tags, but
1913 Subclasses or extensions are free to add their own tags, but
1914 should be aware that the returned dicts will be retained for the
1914 should be aware that the returned dicts will be retained for the
1915 duration of the localrepo object."""
1915 duration of the localrepo object."""
1916
1916
1917 # XXX what tagtype should subclasses/extensions use? Currently
1917 # XXX what tagtype should subclasses/extensions use? Currently
1918 # mq and bookmarks add tags, but do not set the tagtype at all.
1918 # mq and bookmarks add tags, but do not set the tagtype at all.
1919 # Should each extension invent its own tag type? Should there
1919 # Should each extension invent its own tag type? Should there
1920 # be one tagtype for all such "virtual" tags? Or is the status
1920 # be one tagtype for all such "virtual" tags? Or is the status
1921 # quo fine?
1921 # quo fine?
1922
1922
1923 # map tag name to (node, hist)
1923 # map tag name to (node, hist)
1924 alltags = tagsmod.findglobaltags(self.ui, self)
1924 alltags = tagsmod.findglobaltags(self.ui, self)
1925 # map tag name to tag type
1925 # map tag name to tag type
1926 tagtypes = {tag: b'global' for tag in alltags}
1926 tagtypes = {tag: b'global' for tag in alltags}
1927
1927
1928 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1928 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1929
1929
1930 # Build the return dicts. Have to re-encode tag names because
1930 # Build the return dicts. Have to re-encode tag names because
1931 # the tags module always uses UTF-8 (in order not to lose info
1931 # the tags module always uses UTF-8 (in order not to lose info
1932 # writing to the cache), but the rest of Mercurial wants them in
1932 # writing to the cache), but the rest of Mercurial wants them in
1933 # local encoding.
1933 # local encoding.
1934 tags = {}
1934 tags = {}
1935 for (name, (node, hist)) in pycompat.iteritems(alltags):
1935 for (name, (node, hist)) in pycompat.iteritems(alltags):
1936 if node != nullid:
1936 if node != nullid:
1937 tags[encoding.tolocal(name)] = node
1937 tags[encoding.tolocal(name)] = node
1938 tags[b'tip'] = self.changelog.tip()
1938 tags[b'tip'] = self.changelog.tip()
1939 tagtypes = {
1939 tagtypes = {
1940 encoding.tolocal(name): value
1940 encoding.tolocal(name): value
1941 for (name, value) in pycompat.iteritems(tagtypes)
1941 for (name, value) in pycompat.iteritems(tagtypes)
1942 }
1942 }
1943 return (tags, tagtypes)
1943 return (tags, tagtypes)
1944
1944
1945 def tagtype(self, tagname):
1945 def tagtype(self, tagname):
1946 """
1946 """
1947 return the type of the given tag. result can be:
1947 return the type of the given tag. result can be:
1948
1948
1949 'local' : a local tag
1949 'local' : a local tag
1950 'global' : a global tag
1950 'global' : a global tag
1951 None : tag does not exist
1951 None : tag does not exist
1952 """
1952 """
1953
1953
1954 return self._tagscache.tagtypes.get(tagname)
1954 return self._tagscache.tagtypes.get(tagname)
1955
1955
1956 def tagslist(self):
1956 def tagslist(self):
1957 '''return a list of tags ordered by revision'''
1957 '''return a list of tags ordered by revision'''
1958 if not self._tagscache.tagslist:
1958 if not self._tagscache.tagslist:
1959 l = []
1959 l = []
1960 for t, n in pycompat.iteritems(self.tags()):
1960 for t, n in pycompat.iteritems(self.tags()):
1961 l.append((self.changelog.rev(n), t, n))
1961 l.append((self.changelog.rev(n), t, n))
1962 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1962 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1963
1963
1964 return self._tagscache.tagslist
1964 return self._tagscache.tagslist
1965
1965
1966 def nodetags(self, node):
1966 def nodetags(self, node):
1967 '''return the tags associated with a node'''
1967 '''return the tags associated with a node'''
1968 if not self._tagscache.nodetagscache:
1968 if not self._tagscache.nodetagscache:
1969 nodetagscache = {}
1969 nodetagscache = {}
1970 for t, n in pycompat.iteritems(self._tagscache.tags):
1970 for t, n in pycompat.iteritems(self._tagscache.tags):
1971 nodetagscache.setdefault(n, []).append(t)
1971 nodetagscache.setdefault(n, []).append(t)
1972 for tags in pycompat.itervalues(nodetagscache):
1972 for tags in pycompat.itervalues(nodetagscache):
1973 tags.sort()
1973 tags.sort()
1974 self._tagscache.nodetagscache = nodetagscache
1974 self._tagscache.nodetagscache = nodetagscache
1975 return self._tagscache.nodetagscache.get(node, [])
1975 return self._tagscache.nodetagscache.get(node, [])
1976
1976
1977 def nodebookmarks(self, node):
1977 def nodebookmarks(self, node):
1978 """return the list of bookmarks pointing to the specified node"""
1978 """return the list of bookmarks pointing to the specified node"""
1979 return self._bookmarks.names(node)
1979 return self._bookmarks.names(node)
1980
1980
1981 def branchmap(self):
1981 def branchmap(self):
1982 """returns a dictionary {branch: [branchheads]} with branchheads
1982 """returns a dictionary {branch: [branchheads]} with branchheads
1983 ordered by increasing revision number"""
1983 ordered by increasing revision number"""
1984 return self._branchcaches[self]
1984 return self._branchcaches[self]
1985
1985
1986 @unfilteredmethod
1986 @unfilteredmethod
1987 def revbranchcache(self):
1987 def revbranchcache(self):
1988 if not self._revbranchcache:
1988 if not self._revbranchcache:
1989 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1989 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1990 return self._revbranchcache
1990 return self._revbranchcache
1991
1991
1992 def branchtip(self, branch, ignoremissing=False):
1992 def branchtip(self, branch, ignoremissing=False):
1993 """return the tip node for a given branch
1993 """return the tip node for a given branch
1994
1994
1995 If ignoremissing is True, then this method will not raise an error.
1995 If ignoremissing is True, then this method will not raise an error.
1996 This is helpful for callers that only expect None for a missing branch
1996 This is helpful for callers that only expect None for a missing branch
1997 (e.g. namespace).
1997 (e.g. namespace).
1998
1998
1999 """
1999 """
2000 try:
2000 try:
2001 return self.branchmap().branchtip(branch)
2001 return self.branchmap().branchtip(branch)
2002 except KeyError:
2002 except KeyError:
2003 if not ignoremissing:
2003 if not ignoremissing:
2004 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2004 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2005 else:
2005 else:
2006 pass
2006 pass
2007
2007
2008 def lookup(self, key):
2008 def lookup(self, key):
2009 node = scmutil.revsymbol(self, key).node()
2009 node = scmutil.revsymbol(self, key).node()
2010 if node is None:
2010 if node is None:
2011 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2011 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2012 return node
2012 return node
2013
2013
2014 def lookupbranch(self, key):
2014 def lookupbranch(self, key):
2015 if self.branchmap().hasbranch(key):
2015 if self.branchmap().hasbranch(key):
2016 return key
2016 return key
2017
2017
2018 return scmutil.revsymbol(self, key).branch()
2018 return scmutil.revsymbol(self, key).branch()
2019
2019
2020 def known(self, nodes):
2020 def known(self, nodes):
2021 cl = self.changelog
2021 cl = self.changelog
2022 get_rev = cl.index.get_rev
2022 get_rev = cl.index.get_rev
2023 filtered = cl.filteredrevs
2023 filtered = cl.filteredrevs
2024 result = []
2024 result = []
2025 for n in nodes:
2025 for n in nodes:
2026 r = get_rev(n)
2026 r = get_rev(n)
2027 resp = not (r is None or r in filtered)
2027 resp = not (r is None or r in filtered)
2028 result.append(resp)
2028 result.append(resp)
2029 return result
2029 return result
2030
2030
2031 def local(self):
2031 def local(self):
2032 return self
2032 return self
2033
2033
2034 def publishing(self):
2034 def publishing(self):
2035 # it's safe (and desirable) to trust the publish flag unconditionally
2035 # it's safe (and desirable) to trust the publish flag unconditionally
2036 # so that we don't finalize changes shared between users via ssh or nfs
2036 # so that we don't finalize changes shared between users via ssh or nfs
2037 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2037 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2038
2038
2039 def cancopy(self):
2039 def cancopy(self):
2040 # so statichttprepo's override of local() works
2040 # so statichttprepo's override of local() works
2041 if not self.local():
2041 if not self.local():
2042 return False
2042 return False
2043 if not self.publishing():
2043 if not self.publishing():
2044 return True
2044 return True
2045 # if publishing we can't copy if there is filtered content
2045 # if publishing we can't copy if there is filtered content
2046 return not self.filtered(b'visible').changelog.filteredrevs
2046 return not self.filtered(b'visible').changelog.filteredrevs
2047
2047
2048 def shared(self):
2048 def shared(self):
2049 '''the type of shared repository (None if not shared)'''
2049 '''the type of shared repository (None if not shared)'''
2050 if self.sharedpath != self.path:
2050 if self.sharedpath != self.path:
2051 return b'store'
2051 return b'store'
2052 return None
2052 return None
2053
2053
2054 def wjoin(self, f, *insidef):
2054 def wjoin(self, f, *insidef):
2055 return self.vfs.reljoin(self.root, f, *insidef)
2055 return self.vfs.reljoin(self.root, f, *insidef)
2056
2056
2057 def setparents(self, p1, p2=nullid):
2057 def setparents(self, p1, p2=nullid):
2058 self[None].setparents(p1, p2)
2058 self[None].setparents(p1, p2)
2059 self._quick_access_changeid_invalidate()
2059 self._quick_access_changeid_invalidate()
2060
2060
2061 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2061 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2062 """changeid must be a changeset revision, if specified.
2062 """changeid must be a changeset revision, if specified.
2063 fileid can be a file revision or node."""
2063 fileid can be a file revision or node."""
2064 return context.filectx(
2064 return context.filectx(
2065 self, path, changeid, fileid, changectx=changectx
2065 self, path, changeid, fileid, changectx=changectx
2066 )
2066 )
2067
2067
2068 def getcwd(self):
2068 def getcwd(self):
2069 return self.dirstate.getcwd()
2069 return self.dirstate.getcwd()
2070
2070
2071 def pathto(self, f, cwd=None):
2071 def pathto(self, f, cwd=None):
2072 return self.dirstate.pathto(f, cwd)
2072 return self.dirstate.pathto(f, cwd)
2073
2073
2074 def _loadfilter(self, filter):
2074 def _loadfilter(self, filter):
2075 if filter not in self._filterpats:
2075 if filter not in self._filterpats:
2076 l = []
2076 l = []
2077 for pat, cmd in self.ui.configitems(filter):
2077 for pat, cmd in self.ui.configitems(filter):
2078 if cmd == b'!':
2078 if cmd == b'!':
2079 continue
2079 continue
2080 mf = matchmod.match(self.root, b'', [pat])
2080 mf = matchmod.match(self.root, b'', [pat])
2081 fn = None
2081 fn = None
2082 params = cmd
2082 params = cmd
2083 for name, filterfn in pycompat.iteritems(self._datafilters):
2083 for name, filterfn in pycompat.iteritems(self._datafilters):
2084 if cmd.startswith(name):
2084 if cmd.startswith(name):
2085 fn = filterfn
2085 fn = filterfn
2086 params = cmd[len(name) :].lstrip()
2086 params = cmd[len(name) :].lstrip()
2087 break
2087 break
2088 if not fn:
2088 if not fn:
2089 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2089 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2090 fn.__name__ = 'commandfilter'
2090 fn.__name__ = 'commandfilter'
2091 # Wrap old filters not supporting keyword arguments
2091 # Wrap old filters not supporting keyword arguments
2092 if not pycompat.getargspec(fn)[2]:
2092 if not pycompat.getargspec(fn)[2]:
2093 oldfn = fn
2093 oldfn = fn
2094 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2094 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2095 fn.__name__ = 'compat-' + oldfn.__name__
2095 fn.__name__ = 'compat-' + oldfn.__name__
2096 l.append((mf, fn, params))
2096 l.append((mf, fn, params))
2097 self._filterpats[filter] = l
2097 self._filterpats[filter] = l
2098 return self._filterpats[filter]
2098 return self._filterpats[filter]
2099
2099
2100 def _filter(self, filterpats, filename, data):
2100 def _filter(self, filterpats, filename, data):
2101 for mf, fn, cmd in filterpats:
2101 for mf, fn, cmd in filterpats:
2102 if mf(filename):
2102 if mf(filename):
2103 self.ui.debug(
2103 self.ui.debug(
2104 b"filtering %s through %s\n"
2104 b"filtering %s through %s\n"
2105 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2105 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2106 )
2106 )
2107 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2107 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2108 break
2108 break
2109
2109
2110 return data
2110 return data
2111
2111
2112 @unfilteredpropertycache
2112 @unfilteredpropertycache
2113 def _encodefilterpats(self):
2113 def _encodefilterpats(self):
2114 return self._loadfilter(b'encode')
2114 return self._loadfilter(b'encode')
2115
2115
2116 @unfilteredpropertycache
2116 @unfilteredpropertycache
2117 def _decodefilterpats(self):
2117 def _decodefilterpats(self):
2118 return self._loadfilter(b'decode')
2118 return self._loadfilter(b'decode')
2119
2119
2120 def adddatafilter(self, name, filter):
2120 def adddatafilter(self, name, filter):
2121 self._datafilters[name] = filter
2121 self._datafilters[name] = filter
2122
2122
2123 def wread(self, filename):
2123 def wread(self, filename):
2124 if self.wvfs.islink(filename):
2124 if self.wvfs.islink(filename):
2125 data = self.wvfs.readlink(filename)
2125 data = self.wvfs.readlink(filename)
2126 else:
2126 else:
2127 data = self.wvfs.read(filename)
2127 data = self.wvfs.read(filename)
2128 return self._filter(self._encodefilterpats, filename, data)
2128 return self._filter(self._encodefilterpats, filename, data)
2129
2129
2130 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2130 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2131 """write ``data`` into ``filename`` in the working directory
2131 """write ``data`` into ``filename`` in the working directory
2132
2132
2133 This returns length of written (maybe decoded) data.
2133 This returns length of written (maybe decoded) data.
2134 """
2134 """
2135 data = self._filter(self._decodefilterpats, filename, data)
2135 data = self._filter(self._decodefilterpats, filename, data)
2136 if b'l' in flags:
2136 if b'l' in flags:
2137 self.wvfs.symlink(data, filename)
2137 self.wvfs.symlink(data, filename)
2138 else:
2138 else:
2139 self.wvfs.write(
2139 self.wvfs.write(
2140 filename, data, backgroundclose=backgroundclose, **kwargs
2140 filename, data, backgroundclose=backgroundclose, **kwargs
2141 )
2141 )
2142 if b'x' in flags:
2142 if b'x' in flags:
2143 self.wvfs.setflags(filename, False, True)
2143 self.wvfs.setflags(filename, False, True)
2144 else:
2144 else:
2145 self.wvfs.setflags(filename, False, False)
2145 self.wvfs.setflags(filename, False, False)
2146 return len(data)
2146 return len(data)
2147
2147
2148 def wwritedata(self, filename, data):
2148 def wwritedata(self, filename, data):
2149 return self._filter(self._decodefilterpats, filename, data)
2149 return self._filter(self._decodefilterpats, filename, data)
2150
2150
2151 def currenttransaction(self):
2151 def currenttransaction(self):
2152 """return the current transaction or None if non exists"""
2152 """return the current transaction or None if non exists"""
2153 if self._transref:
2153 if self._transref:
2154 tr = self._transref()
2154 tr = self._transref()
2155 else:
2155 else:
2156 tr = None
2156 tr = None
2157
2157
2158 if tr and tr.running():
2158 if tr and tr.running():
2159 return tr
2159 return tr
2160 return None
2160 return None
2161
2161
2162 def transaction(self, desc, report=None):
2162 def transaction(self, desc, report=None):
2163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2164 b'devel', b'check-locks'
2164 b'devel', b'check-locks'
2165 ):
2165 ):
2166 if self._currentlock(self._lockref) is None:
2166 if self._currentlock(self._lockref) is None:
2167 raise error.ProgrammingError(b'transaction requires locking')
2167 raise error.ProgrammingError(b'transaction requires locking')
2168 tr = self.currenttransaction()
2168 tr = self.currenttransaction()
2169 if tr is not None:
2169 if tr is not None:
2170 return tr.nest(name=desc)
2170 return tr.nest(name=desc)
2171
2171
2172 # abort here if the journal already exists
2172 # abort here if the journal already exists
2173 if self.svfs.exists(b"journal"):
2173 if self.svfs.exists(b"journal"):
2174 raise error.RepoError(
2174 raise error.RepoError(
2175 _(b"abandoned transaction found"),
2175 _(b"abandoned transaction found"),
2176 hint=_(b"run 'hg recover' to clean up transaction"),
2176 hint=_(b"run 'hg recover' to clean up transaction"),
2177 )
2177 )
2178
2178
2179 idbase = b"%.40f#%f" % (random.random(), time.time())
2179 idbase = b"%.40f#%f" % (random.random(), time.time())
2180 ha = hex(hashutil.sha1(idbase).digest())
2180 ha = hex(hashutil.sha1(idbase).digest())
2181 txnid = b'TXN:' + ha
2181 txnid = b'TXN:' + ha
2182 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2182 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2183
2183
2184 self._writejournal(desc)
2184 self._writejournal(desc)
2185 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2185 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2186 if report:
2186 if report:
2187 rp = report
2187 rp = report
2188 else:
2188 else:
2189 rp = self.ui.warn
2189 rp = self.ui.warn
2190 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2190 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2191 # we must avoid cyclic reference between repo and transaction.
2191 # we must avoid cyclic reference between repo and transaction.
2192 reporef = weakref.ref(self)
2192 reporef = weakref.ref(self)
2193 # Code to track tag movement
2193 # Code to track tag movement
2194 #
2194 #
2195 # Since tags are all handled as file content, it is actually quite hard
2195 # Since tags are all handled as file content, it is actually quite hard
2196 # to track these movement from a code perspective. So we fallback to a
2196 # to track these movement from a code perspective. So we fallback to a
2197 # tracking at the repository level. One could envision to track changes
2197 # tracking at the repository level. One could envision to track changes
2198 # to the '.hgtags' file through changegroup apply but that fails to
2198 # to the '.hgtags' file through changegroup apply but that fails to
2199 # cope with case where transaction expose new heads without changegroup
2199 # cope with case where transaction expose new heads without changegroup
2200 # being involved (eg: phase movement).
2200 # being involved (eg: phase movement).
2201 #
2201 #
2202 # For now, We gate the feature behind a flag since this likely comes
2202 # For now, We gate the feature behind a flag since this likely comes
2203 # with performance impacts. The current code run more often than needed
2203 # with performance impacts. The current code run more often than needed
2204 # and do not use caches as much as it could. The current focus is on
2204 # and do not use caches as much as it could. The current focus is on
2205 # the behavior of the feature so we disable it by default. The flag
2205 # the behavior of the feature so we disable it by default. The flag
2206 # will be removed when we are happy with the performance impact.
2206 # will be removed when we are happy with the performance impact.
2207 #
2207 #
2208 # Once this feature is no longer experimental move the following
2208 # Once this feature is no longer experimental move the following
2209 # documentation to the appropriate help section:
2209 # documentation to the appropriate help section:
2210 #
2210 #
2211 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2211 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2212 # tags (new or changed or deleted tags). In addition the details of
2212 # tags (new or changed or deleted tags). In addition the details of
2213 # these changes are made available in a file at:
2213 # these changes are made available in a file at:
2214 # ``REPOROOT/.hg/changes/tags.changes``.
2214 # ``REPOROOT/.hg/changes/tags.changes``.
2215 # Make sure you check for HG_TAG_MOVED before reading that file as it
2215 # Make sure you check for HG_TAG_MOVED before reading that file as it
2216 # might exist from a previous transaction even if no tag were touched
2216 # might exist from a previous transaction even if no tag were touched
2217 # in this one. Changes are recorded in a line base format::
2217 # in this one. Changes are recorded in a line base format::
2218 #
2218 #
2219 # <action> <hex-node> <tag-name>\n
2219 # <action> <hex-node> <tag-name>\n
2220 #
2220 #
2221 # Actions are defined as follow:
2221 # Actions are defined as follow:
2222 # "-R": tag is removed,
2222 # "-R": tag is removed,
2223 # "+A": tag is added,
2223 # "+A": tag is added,
2224 # "-M": tag is moved (old value),
2224 # "-M": tag is moved (old value),
2225 # "+M": tag is moved (new value),
2225 # "+M": tag is moved (new value),
2226 tracktags = lambda x: None
2226 tracktags = lambda x: None
2227 # experimental config: experimental.hook-track-tags
2227 # experimental config: experimental.hook-track-tags
2228 shouldtracktags = self.ui.configbool(
2228 shouldtracktags = self.ui.configbool(
2229 b'experimental', b'hook-track-tags'
2229 b'experimental', b'hook-track-tags'
2230 )
2230 )
2231 if desc != b'strip' and shouldtracktags:
2231 if desc != b'strip' and shouldtracktags:
2232 oldheads = self.changelog.headrevs()
2232 oldheads = self.changelog.headrevs()
2233
2233
2234 def tracktags(tr2):
2234 def tracktags(tr2):
2235 repo = reporef()
2235 repo = reporef()
2236 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2236 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2237 newheads = repo.changelog.headrevs()
2237 newheads = repo.changelog.headrevs()
2238 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2238 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2239 # notes: we compare lists here.
2239 # notes: we compare lists here.
2240 # As we do it only once buiding set would not be cheaper
2240 # As we do it only once buiding set would not be cheaper
2241 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2241 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2242 if changes:
2242 if changes:
2243 tr2.hookargs[b'tag_moved'] = b'1'
2243 tr2.hookargs[b'tag_moved'] = b'1'
2244 with repo.vfs(
2244 with repo.vfs(
2245 b'changes/tags.changes', b'w', atomictemp=True
2245 b'changes/tags.changes', b'w', atomictemp=True
2246 ) as changesfile:
2246 ) as changesfile:
2247 # note: we do not register the file to the transaction
2247 # note: we do not register the file to the transaction
2248 # because we needs it to still exist on the transaction
2248 # because we needs it to still exist on the transaction
2249 # is close (for txnclose hooks)
2249 # is close (for txnclose hooks)
2250 tagsmod.writediff(changesfile, changes)
2250 tagsmod.writediff(changesfile, changes)
2251
2251
2252 def validate(tr2):
2252 def validate(tr2):
2253 """will run pre-closing hooks"""
2253 """will run pre-closing hooks"""
2254 # XXX the transaction API is a bit lacking here so we take a hacky
2254 # XXX the transaction API is a bit lacking here so we take a hacky
2255 # path for now
2255 # path for now
2256 #
2256 #
2257 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2257 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2258 # dict is copied before these run. In addition we needs the data
2258 # dict is copied before these run. In addition we needs the data
2259 # available to in memory hooks too.
2259 # available to in memory hooks too.
2260 #
2260 #
2261 # Moreover, we also need to make sure this runs before txnclose
2261 # Moreover, we also need to make sure this runs before txnclose
2262 # hooks and there is no "pending" mechanism that would execute
2262 # hooks and there is no "pending" mechanism that would execute
2263 # logic only if hooks are about to run.
2263 # logic only if hooks are about to run.
2264 #
2264 #
2265 # Fixing this limitation of the transaction is also needed to track
2265 # Fixing this limitation of the transaction is also needed to track
2266 # other families of changes (bookmarks, phases, obsolescence).
2266 # other families of changes (bookmarks, phases, obsolescence).
2267 #
2267 #
2268 # This will have to be fixed before we remove the experimental
2268 # This will have to be fixed before we remove the experimental
2269 # gating.
2269 # gating.
2270 tracktags(tr2)
2270 tracktags(tr2)
2271 repo = reporef()
2271 repo = reporef()
2272
2272
2273 singleheadopt = (b'experimental', b'single-head-per-branch')
2273 singleheadopt = (b'experimental', b'single-head-per-branch')
2274 singlehead = repo.ui.configbool(*singleheadopt)
2274 singlehead = repo.ui.configbool(*singleheadopt)
2275 if singlehead:
2275 if singlehead:
2276 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2276 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2277 accountclosed = singleheadsub.get(
2277 accountclosed = singleheadsub.get(
2278 b"account-closed-heads", False
2278 b"account-closed-heads", False
2279 )
2279 )
2280 if singleheadsub.get(b"public-changes-only", False):
2280 if singleheadsub.get(b"public-changes-only", False):
2281 filtername = b"immutable"
2281 filtername = b"immutable"
2282 else:
2282 else:
2283 filtername = b"visible"
2283 filtername = b"visible"
2284 scmutil.enforcesinglehead(
2284 scmutil.enforcesinglehead(
2285 repo, tr2, desc, accountclosed, filtername
2285 repo, tr2, desc, accountclosed, filtername
2286 )
2286 )
2287 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2287 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2288 for name, (old, new) in sorted(
2288 for name, (old, new) in sorted(
2289 tr.changes[b'bookmarks'].items()
2289 tr.changes[b'bookmarks'].items()
2290 ):
2290 ):
2291 args = tr.hookargs.copy()
2291 args = tr.hookargs.copy()
2292 args.update(bookmarks.preparehookargs(name, old, new))
2292 args.update(bookmarks.preparehookargs(name, old, new))
2293 repo.hook(
2293 repo.hook(
2294 b'pretxnclose-bookmark',
2294 b'pretxnclose-bookmark',
2295 throw=True,
2295 throw=True,
2296 **pycompat.strkwargs(args)
2296 **pycompat.strkwargs(args)
2297 )
2297 )
2298 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2298 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2299 cl = repo.unfiltered().changelog
2299 cl = repo.unfiltered().changelog
2300 for revs, (old, new) in tr.changes[b'phases']:
2300 for revs, (old, new) in tr.changes[b'phases']:
2301 for rev in revs:
2301 for rev in revs:
2302 args = tr.hookargs.copy()
2302 args = tr.hookargs.copy()
2303 node = hex(cl.node(rev))
2303 node = hex(cl.node(rev))
2304 args.update(phases.preparehookargs(node, old, new))
2304 args.update(phases.preparehookargs(node, old, new))
2305 repo.hook(
2305 repo.hook(
2306 b'pretxnclose-phase',
2306 b'pretxnclose-phase',
2307 throw=True,
2307 throw=True,
2308 **pycompat.strkwargs(args)
2308 **pycompat.strkwargs(args)
2309 )
2309 )
2310
2310
2311 repo.hook(
2311 repo.hook(
2312 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2312 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2313 )
2313 )
2314
2314
2315 def releasefn(tr, success):
2315 def releasefn(tr, success):
2316 repo = reporef()
2316 repo = reporef()
2317 if repo is None:
2317 if repo is None:
2318 # If the repo has been GC'd (and this release function is being
2318 # If the repo has been GC'd (and this release function is being
2319 # called from transaction.__del__), there's not much we can do,
2319 # called from transaction.__del__), there's not much we can do,
2320 # so just leave the unfinished transaction there and let the
2320 # so just leave the unfinished transaction there and let the
2321 # user run `hg recover`.
2321 # user run `hg recover`.
2322 return
2322 return
2323 if success:
2323 if success:
2324 # this should be explicitly invoked here, because
2324 # this should be explicitly invoked here, because
2325 # in-memory changes aren't written out at closing
2325 # in-memory changes aren't written out at closing
2326 # transaction, if tr.addfilegenerator (via
2326 # transaction, if tr.addfilegenerator (via
2327 # dirstate.write or so) isn't invoked while
2327 # dirstate.write or so) isn't invoked while
2328 # transaction running
2328 # transaction running
2329 repo.dirstate.write(None)
2329 repo.dirstate.write(None)
2330 else:
2330 else:
2331 # discard all changes (including ones already written
2331 # discard all changes (including ones already written
2332 # out) in this transaction
2332 # out) in this transaction
2333 narrowspec.restorebackup(self, b'journal.narrowspec')
2333 narrowspec.restorebackup(self, b'journal.narrowspec')
2334 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2334 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2335 repo.dirstate.restorebackup(None, b'journal.dirstate')
2335 repo.dirstate.restorebackup(None, b'journal.dirstate')
2336
2336
2337 repo.invalidate(clearfilecache=True)
2337 repo.invalidate(clearfilecache=True)
2338
2338
2339 tr = transaction.transaction(
2339 tr = transaction.transaction(
2340 rp,
2340 rp,
2341 self.svfs,
2341 self.svfs,
2342 vfsmap,
2342 vfsmap,
2343 b"journal",
2343 b"journal",
2344 b"undo",
2344 b"undo",
2345 aftertrans(renames),
2345 aftertrans(renames),
2346 self.store.createmode,
2346 self.store.createmode,
2347 validator=validate,
2347 validator=validate,
2348 releasefn=releasefn,
2348 releasefn=releasefn,
2349 checkambigfiles=_cachedfiles,
2349 checkambigfiles=_cachedfiles,
2350 name=desc,
2350 name=desc,
2351 )
2351 )
2352 tr.changes[b'origrepolen'] = len(self)
2352 tr.changes[b'origrepolen'] = len(self)
2353 tr.changes[b'obsmarkers'] = set()
2353 tr.changes[b'obsmarkers'] = set()
2354 tr.changes[b'phases'] = []
2354 tr.changes[b'phases'] = []
2355 tr.changes[b'bookmarks'] = {}
2355 tr.changes[b'bookmarks'] = {}
2356
2356
2357 tr.hookargs[b'txnid'] = txnid
2357 tr.hookargs[b'txnid'] = txnid
2358 tr.hookargs[b'txnname'] = desc
2358 tr.hookargs[b'txnname'] = desc
2359 tr.hookargs[b'changes'] = tr.changes
2359 tr.hookargs[b'changes'] = tr.changes
2360 # note: writing the fncache only during finalize mean that the file is
2360 # note: writing the fncache only during finalize mean that the file is
2361 # outdated when running hooks. As fncache is used for streaming clone,
2361 # outdated when running hooks. As fncache is used for streaming clone,
2362 # this is not expected to break anything that happen during the hooks.
2362 # this is not expected to break anything that happen during the hooks.
2363 tr.addfinalize(b'flush-fncache', self.store.write)
2363 tr.addfinalize(b'flush-fncache', self.store.write)
2364
2364
2365 def txnclosehook(tr2):
2365 def txnclosehook(tr2):
2366 """To be run if transaction is successful, will schedule a hook run"""
2366 """To be run if transaction is successful, will schedule a hook run"""
2367 # Don't reference tr2 in hook() so we don't hold a reference.
2367 # Don't reference tr2 in hook() so we don't hold a reference.
2368 # This reduces memory consumption when there are multiple
2368 # This reduces memory consumption when there are multiple
2369 # transactions per lock. This can likely go away if issue5045
2369 # transactions per lock. This can likely go away if issue5045
2370 # fixes the function accumulation.
2370 # fixes the function accumulation.
2371 hookargs = tr2.hookargs
2371 hookargs = tr2.hookargs
2372
2372
2373 def hookfunc(unused_success):
2373 def hookfunc(unused_success):
2374 repo = reporef()
2374 repo = reporef()
2375 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2375 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2376 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2376 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2377 for name, (old, new) in bmchanges:
2377 for name, (old, new) in bmchanges:
2378 args = tr.hookargs.copy()
2378 args = tr.hookargs.copy()
2379 args.update(bookmarks.preparehookargs(name, old, new))
2379 args.update(bookmarks.preparehookargs(name, old, new))
2380 repo.hook(
2380 repo.hook(
2381 b'txnclose-bookmark',
2381 b'txnclose-bookmark',
2382 throw=False,
2382 throw=False,
2383 **pycompat.strkwargs(args)
2383 **pycompat.strkwargs(args)
2384 )
2384 )
2385
2385
2386 if hook.hashook(repo.ui, b'txnclose-phase'):
2386 if hook.hashook(repo.ui, b'txnclose-phase'):
2387 cl = repo.unfiltered().changelog
2387 cl = repo.unfiltered().changelog
2388 phasemv = sorted(
2388 phasemv = sorted(
2389 tr.changes[b'phases'], key=lambda r: r[0][0]
2389 tr.changes[b'phases'], key=lambda r: r[0][0]
2390 )
2390 )
2391 for revs, (old, new) in phasemv:
2391 for revs, (old, new) in phasemv:
2392 for rev in revs:
2392 for rev in revs:
2393 args = tr.hookargs.copy()
2393 args = tr.hookargs.copy()
2394 node = hex(cl.node(rev))
2394 node = hex(cl.node(rev))
2395 args.update(phases.preparehookargs(node, old, new))
2395 args.update(phases.preparehookargs(node, old, new))
2396 repo.hook(
2396 repo.hook(
2397 b'txnclose-phase',
2397 b'txnclose-phase',
2398 throw=False,
2398 throw=False,
2399 **pycompat.strkwargs(args)
2399 **pycompat.strkwargs(args)
2400 )
2400 )
2401
2401
2402 repo.hook(
2402 repo.hook(
2403 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2403 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2404 )
2404 )
2405
2405
2406 reporef()._afterlock(hookfunc)
2406 reporef()._afterlock(hookfunc)
2407
2407
2408 tr.addfinalize(b'txnclose-hook', txnclosehook)
2408 tr.addfinalize(b'txnclose-hook', txnclosehook)
2409 # Include a leading "-" to make it happen before the transaction summary
2409 # Include a leading "-" to make it happen before the transaction summary
2410 # reports registered via scmutil.registersummarycallback() whose names
2410 # reports registered via scmutil.registersummarycallback() whose names
2411 # are 00-txnreport etc. That way, the caches will be warm when the
2411 # are 00-txnreport etc. That way, the caches will be warm when the
2412 # callbacks run.
2412 # callbacks run.
2413 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2413 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2414
2414
2415 def txnaborthook(tr2):
2415 def txnaborthook(tr2):
2416 """To be run if transaction is aborted"""
2416 """To be run if transaction is aborted"""
2417 reporef().hook(
2417 reporef().hook(
2418 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2418 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2419 )
2419 )
2420
2420
2421 tr.addabort(b'txnabort-hook', txnaborthook)
2421 tr.addabort(b'txnabort-hook', txnaborthook)
2422 # avoid eager cache invalidation. in-memory data should be identical
2422 # avoid eager cache invalidation. in-memory data should be identical
2423 # to stored data if transaction has no error.
2423 # to stored data if transaction has no error.
2424 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2424 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2425 self._transref = weakref.ref(tr)
2425 self._transref = weakref.ref(tr)
2426 scmutil.registersummarycallback(self, tr, desc)
2426 scmutil.registersummarycallback(self, tr, desc)
2427 return tr
2427 return tr
2428
2428
2429 def _journalfiles(self):
2429 def _journalfiles(self):
2430 return (
2430 return (
2431 (self.svfs, b'journal'),
2431 (self.svfs, b'journal'),
2432 (self.svfs, b'journal.narrowspec'),
2432 (self.svfs, b'journal.narrowspec'),
2433 (self.vfs, b'journal.narrowspec.dirstate'),
2433 (self.vfs, b'journal.narrowspec.dirstate'),
2434 (self.vfs, b'journal.dirstate'),
2434 (self.vfs, b'journal.dirstate'),
2435 (self.vfs, b'journal.branch'),
2435 (self.vfs, b'journal.branch'),
2436 (self.vfs, b'journal.desc'),
2436 (self.vfs, b'journal.desc'),
2437 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2437 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2438 (self.svfs, b'journal.phaseroots'),
2438 (self.svfs, b'journal.phaseroots'),
2439 )
2439 )
2440
2440
2441 def undofiles(self):
2441 def undofiles(self):
2442 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2442 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2443
2443
2444 @unfilteredmethod
2444 @unfilteredmethod
2445 def _writejournal(self, desc):
2445 def _writejournal(self, desc):
2446 self.dirstate.savebackup(None, b'journal.dirstate')
2446 self.dirstate.savebackup(None, b'journal.dirstate')
2447 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2447 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2448 narrowspec.savebackup(self, b'journal.narrowspec')
2448 narrowspec.savebackup(self, b'journal.narrowspec')
2449 self.vfs.write(
2449 self.vfs.write(
2450 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2450 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2451 )
2451 )
2452 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2452 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2453 bookmarksvfs = bookmarks.bookmarksvfs(self)
2453 bookmarksvfs = bookmarks.bookmarksvfs(self)
2454 bookmarksvfs.write(
2454 bookmarksvfs.write(
2455 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2455 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2456 )
2456 )
2457 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2457 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2458
2458
2459 def recover(self):
2459 def recover(self):
2460 with self.lock():
2460 with self.lock():
2461 if self.svfs.exists(b"journal"):
2461 if self.svfs.exists(b"journal"):
2462 self.ui.status(_(b"rolling back interrupted transaction\n"))
2462 self.ui.status(_(b"rolling back interrupted transaction\n"))
2463 vfsmap = {
2463 vfsmap = {
2464 b'': self.svfs,
2464 b'': self.svfs,
2465 b'plain': self.vfs,
2465 b'plain': self.vfs,
2466 }
2466 }
2467 transaction.rollback(
2467 transaction.rollback(
2468 self.svfs,
2468 self.svfs,
2469 vfsmap,
2469 vfsmap,
2470 b"journal",
2470 b"journal",
2471 self.ui.warn,
2471 self.ui.warn,
2472 checkambigfiles=_cachedfiles,
2472 checkambigfiles=_cachedfiles,
2473 )
2473 )
2474 self.invalidate()
2474 self.invalidate()
2475 return True
2475 return True
2476 else:
2476 else:
2477 self.ui.warn(_(b"no interrupted transaction available\n"))
2477 self.ui.warn(_(b"no interrupted transaction available\n"))
2478 return False
2478 return False
2479
2479
2480 def rollback(self, dryrun=False, force=False):
2480 def rollback(self, dryrun=False, force=False):
2481 wlock = lock = dsguard = None
2481 wlock = lock = dsguard = None
2482 try:
2482 try:
2483 wlock = self.wlock()
2483 wlock = self.wlock()
2484 lock = self.lock()
2484 lock = self.lock()
2485 if self.svfs.exists(b"undo"):
2485 if self.svfs.exists(b"undo"):
2486 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2486 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2487
2487
2488 return self._rollback(dryrun, force, dsguard)
2488 return self._rollback(dryrun, force, dsguard)
2489 else:
2489 else:
2490 self.ui.warn(_(b"no rollback information available\n"))
2490 self.ui.warn(_(b"no rollback information available\n"))
2491 return 1
2491 return 1
2492 finally:
2492 finally:
2493 release(dsguard, lock, wlock)
2493 release(dsguard, lock, wlock)
2494
2494
2495 @unfilteredmethod # Until we get smarter cache management
2495 @unfilteredmethod # Until we get smarter cache management
2496 def _rollback(self, dryrun, force, dsguard):
2496 def _rollback(self, dryrun, force, dsguard):
2497 ui = self.ui
2497 ui = self.ui
2498 try:
2498 try:
2499 args = self.vfs.read(b'undo.desc').splitlines()
2499 args = self.vfs.read(b'undo.desc').splitlines()
2500 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2500 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2501 if len(args) >= 3:
2501 if len(args) >= 3:
2502 detail = args[2]
2502 detail = args[2]
2503 oldtip = oldlen - 1
2503 oldtip = oldlen - 1
2504
2504
2505 if detail and ui.verbose:
2505 if detail and ui.verbose:
2506 msg = _(
2506 msg = _(
2507 b'repository tip rolled back to revision %d'
2507 b'repository tip rolled back to revision %d'
2508 b' (undo %s: %s)\n'
2508 b' (undo %s: %s)\n'
2509 ) % (oldtip, desc, detail)
2509 ) % (oldtip, desc, detail)
2510 else:
2510 else:
2511 msg = _(
2511 msg = _(
2512 b'repository tip rolled back to revision %d (undo %s)\n'
2512 b'repository tip rolled back to revision %d (undo %s)\n'
2513 ) % (oldtip, desc)
2513 ) % (oldtip, desc)
2514 except IOError:
2514 except IOError:
2515 msg = _(b'rolling back unknown transaction\n')
2515 msg = _(b'rolling back unknown transaction\n')
2516 desc = None
2516 desc = None
2517
2517
2518 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2518 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2519 raise error.Abort(
2519 raise error.Abort(
2520 _(
2520 _(
2521 b'rollback of last commit while not checked out '
2521 b'rollback of last commit while not checked out '
2522 b'may lose data'
2522 b'may lose data'
2523 ),
2523 ),
2524 hint=_(b'use -f to force'),
2524 hint=_(b'use -f to force'),
2525 )
2525 )
2526
2526
2527 ui.status(msg)
2527 ui.status(msg)
2528 if dryrun:
2528 if dryrun:
2529 return 0
2529 return 0
2530
2530
2531 parents = self.dirstate.parents()
2531 parents = self.dirstate.parents()
2532 self.destroying()
2532 self.destroying()
2533 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2533 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2534 transaction.rollback(
2534 transaction.rollback(
2535 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2535 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2536 )
2536 )
2537 bookmarksvfs = bookmarks.bookmarksvfs(self)
2537 bookmarksvfs = bookmarks.bookmarksvfs(self)
2538 if bookmarksvfs.exists(b'undo.bookmarks'):
2538 if bookmarksvfs.exists(b'undo.bookmarks'):
2539 bookmarksvfs.rename(
2539 bookmarksvfs.rename(
2540 b'undo.bookmarks', b'bookmarks', checkambig=True
2540 b'undo.bookmarks', b'bookmarks', checkambig=True
2541 )
2541 )
2542 if self.svfs.exists(b'undo.phaseroots'):
2542 if self.svfs.exists(b'undo.phaseroots'):
2543 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2543 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2544 self.invalidate()
2544 self.invalidate()
2545
2545
2546 has_node = self.changelog.index.has_node
2546 has_node = self.changelog.index.has_node
2547 parentgone = any(not has_node(p) for p in parents)
2547 parentgone = any(not has_node(p) for p in parents)
2548 if parentgone:
2548 if parentgone:
2549 # prevent dirstateguard from overwriting already restored one
2549 # prevent dirstateguard from overwriting already restored one
2550 dsguard.close()
2550 dsguard.close()
2551
2551
2552 narrowspec.restorebackup(self, b'undo.narrowspec')
2552 narrowspec.restorebackup(self, b'undo.narrowspec')
2553 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2553 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2554 self.dirstate.restorebackup(None, b'undo.dirstate')
2554 self.dirstate.restorebackup(None, b'undo.dirstate')
2555 try:
2555 try:
2556 branch = self.vfs.read(b'undo.branch')
2556 branch = self.vfs.read(b'undo.branch')
2557 self.dirstate.setbranch(encoding.tolocal(branch))
2557 self.dirstate.setbranch(encoding.tolocal(branch))
2558 except IOError:
2558 except IOError:
2559 ui.warn(
2559 ui.warn(
2560 _(
2560 _(
2561 b'named branch could not be reset: '
2561 b'named branch could not be reset: '
2562 b'current branch is still \'%s\'\n'
2562 b'current branch is still \'%s\'\n'
2563 )
2563 )
2564 % self.dirstate.branch()
2564 % self.dirstate.branch()
2565 )
2565 )
2566
2566
2567 parents = tuple([p.rev() for p in self[None].parents()])
2567 parents = tuple([p.rev() for p in self[None].parents()])
2568 if len(parents) > 1:
2568 if len(parents) > 1:
2569 ui.status(
2569 ui.status(
2570 _(
2570 _(
2571 b'working directory now based on '
2571 b'working directory now based on '
2572 b'revisions %d and %d\n'
2572 b'revisions %d and %d\n'
2573 )
2573 )
2574 % parents
2574 % parents
2575 )
2575 )
2576 else:
2576 else:
2577 ui.status(
2577 ui.status(
2578 _(b'working directory now based on revision %d\n') % parents
2578 _(b'working directory now based on revision %d\n') % parents
2579 )
2579 )
2580 mergestatemod.mergestate.clean(self)
2580 mergestatemod.mergestate.clean(self)
2581
2581
2582 # TODO: if we know which new heads may result from this rollback, pass
2582 # TODO: if we know which new heads may result from this rollback, pass
2583 # them to destroy(), which will prevent the branchhead cache from being
2583 # them to destroy(), which will prevent the branchhead cache from being
2584 # invalidated.
2584 # invalidated.
2585 self.destroyed()
2585 self.destroyed()
2586 return 0
2586 return 0
2587
2587
2588 def _buildcacheupdater(self, newtransaction):
2588 def _buildcacheupdater(self, newtransaction):
2589 """called during transaction to build the callback updating cache
2589 """called during transaction to build the callback updating cache
2590
2590
2591 Lives on the repository to help extension who might want to augment
2591 Lives on the repository to help extension who might want to augment
2592 this logic. For this purpose, the created transaction is passed to the
2592 this logic. For this purpose, the created transaction is passed to the
2593 method.
2593 method.
2594 """
2594 """
2595 # we must avoid cyclic reference between repo and transaction.
2595 # we must avoid cyclic reference between repo and transaction.
2596 reporef = weakref.ref(self)
2596 reporef = weakref.ref(self)
2597
2597
2598 def updater(tr):
2598 def updater(tr):
2599 repo = reporef()
2599 repo = reporef()
2600 repo.updatecaches(tr)
2600 repo.updatecaches(tr)
2601
2601
2602 return updater
2602 return updater
2603
2603
2604 @unfilteredmethod
2604 @unfilteredmethod
2605 def updatecaches(self, tr=None, full=False):
2605 def updatecaches(self, tr=None, full=False):
2606 """warm appropriate caches
2606 """warm appropriate caches
2607
2607
2608 If this function is called after a transaction closed. The transaction
2608 If this function is called after a transaction closed. The transaction
2609 will be available in the 'tr' argument. This can be used to selectively
2609 will be available in the 'tr' argument. This can be used to selectively
2610 update caches relevant to the changes in that transaction.
2610 update caches relevant to the changes in that transaction.
2611
2611
2612 If 'full' is set, make sure all caches the function knows about have
2612 If 'full' is set, make sure all caches the function knows about have
2613 up-to-date data. Even the ones usually loaded more lazily.
2613 up-to-date data. Even the ones usually loaded more lazily.
2614 """
2614 """
2615 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2615 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2616 # During strip, many caches are invalid but
2616 # During strip, many caches are invalid but
2617 # later call to `destroyed` will refresh them.
2617 # later call to `destroyed` will refresh them.
2618 return
2618 return
2619
2619
2620 if tr is None or tr.changes[b'origrepolen'] < len(self):
2620 if tr is None or tr.changes[b'origrepolen'] < len(self):
2621 # accessing the 'served' branchmap should refresh all the others,
2621 # accessing the 'served' branchmap should refresh all the others,
2622 self.ui.debug(b'updating the branch cache\n')
2622 self.ui.debug(b'updating the branch cache\n')
2623 self.filtered(b'served').branchmap()
2623 self.filtered(b'served').branchmap()
2624 self.filtered(b'served.hidden').branchmap()
2624 self.filtered(b'served.hidden').branchmap()
2625
2625
2626 if full:
2626 if full:
2627 unfi = self.unfiltered()
2627 unfi = self.unfiltered()
2628
2628
2629 self.changelog.update_caches(transaction=tr)
2629 self.changelog.update_caches(transaction=tr)
2630 self.manifestlog.update_caches(transaction=tr)
2630 self.manifestlog.update_caches(transaction=tr)
2631
2631
2632 rbc = unfi.revbranchcache()
2632 rbc = unfi.revbranchcache()
2633 for r in unfi.changelog:
2633 for r in unfi.changelog:
2634 rbc.branchinfo(r)
2634 rbc.branchinfo(r)
2635 rbc.write()
2635 rbc.write()
2636
2636
2637 # ensure the working copy parents are in the manifestfulltextcache
2637 # ensure the working copy parents are in the manifestfulltextcache
2638 for ctx in self[b'.'].parents():
2638 for ctx in self[b'.'].parents():
2639 ctx.manifest() # accessing the manifest is enough
2639 ctx.manifest() # accessing the manifest is enough
2640
2640
2641 # accessing fnode cache warms the cache
2641 # accessing fnode cache warms the cache
2642 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2642 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2643 # accessing tags warm the cache
2643 # accessing tags warm the cache
2644 self.tags()
2644 self.tags()
2645 self.filtered(b'served').tags()
2645 self.filtered(b'served').tags()
2646
2646
2647 # The `full` arg is documented as updating even the lazily-loaded
2647 # The `full` arg is documented as updating even the lazily-loaded
2648 # caches immediately, so we're forcing a write to cause these caches
2648 # caches immediately, so we're forcing a write to cause these caches
2649 # to be warmed up even if they haven't explicitly been requested
2649 # to be warmed up even if they haven't explicitly been requested
2650 # yet (if they've never been used by hg, they won't ever have been
2650 # yet (if they've never been used by hg, they won't ever have been
2651 # written, even if they're a subset of another kind of cache that
2651 # written, even if they're a subset of another kind of cache that
2652 # *has* been used).
2652 # *has* been used).
2653 for filt in repoview.filtertable.keys():
2653 for filt in repoview.filtertable.keys():
2654 filtered = self.filtered(filt)
2654 filtered = self.filtered(filt)
2655 filtered.branchmap().write(filtered)
2655 filtered.branchmap().write(filtered)
2656
2656
2657 def invalidatecaches(self):
2657 def invalidatecaches(self):
2658
2658
2659 if '_tagscache' in vars(self):
2659 if '_tagscache' in vars(self):
2660 # can't use delattr on proxy
2660 # can't use delattr on proxy
2661 del self.__dict__['_tagscache']
2661 del self.__dict__['_tagscache']
2662
2662
2663 self._branchcaches.clear()
2663 self._branchcaches.clear()
2664 self.invalidatevolatilesets()
2664 self.invalidatevolatilesets()
2665 self._sparsesignaturecache.clear()
2665 self._sparsesignaturecache.clear()
2666
2666
2667 def invalidatevolatilesets(self):
2667 def invalidatevolatilesets(self):
2668 self.filteredrevcache.clear()
2668 self.filteredrevcache.clear()
2669 obsolete.clearobscaches(self)
2669 obsolete.clearobscaches(self)
2670 self._quick_access_changeid_invalidate()
2670 self._quick_access_changeid_invalidate()
2671
2671
2672 def invalidatedirstate(self):
2672 def invalidatedirstate(self):
2673 """Invalidates the dirstate, causing the next call to dirstate
2673 """Invalidates the dirstate, causing the next call to dirstate
2674 to check if it was modified since the last time it was read,
2674 to check if it was modified since the last time it was read,
2675 rereading it if it has.
2675 rereading it if it has.
2676
2676
2677 This is different to dirstate.invalidate() that it doesn't always
2677 This is different to dirstate.invalidate() that it doesn't always
2678 rereads the dirstate. Use dirstate.invalidate() if you want to
2678 rereads the dirstate. Use dirstate.invalidate() if you want to
2679 explicitly read the dirstate again (i.e. restoring it to a previous
2679 explicitly read the dirstate again (i.e. restoring it to a previous
2680 known good state)."""
2680 known good state)."""
2681 if hasunfilteredcache(self, 'dirstate'):
2681 if hasunfilteredcache(self, 'dirstate'):
2682 for k in self.dirstate._filecache:
2682 for k in self.dirstate._filecache:
2683 try:
2683 try:
2684 delattr(self.dirstate, k)
2684 delattr(self.dirstate, k)
2685 except AttributeError:
2685 except AttributeError:
2686 pass
2686 pass
2687 delattr(self.unfiltered(), 'dirstate')
2687 delattr(self.unfiltered(), 'dirstate')
2688
2688
2689 def invalidate(self, clearfilecache=False):
2689 def invalidate(self, clearfilecache=False):
2690 """Invalidates both store and non-store parts other than dirstate
2690 """Invalidates both store and non-store parts other than dirstate
2691
2691
2692 If a transaction is running, invalidation of store is omitted,
2692 If a transaction is running, invalidation of store is omitted,
2693 because discarding in-memory changes might cause inconsistency
2693 because discarding in-memory changes might cause inconsistency
2694 (e.g. incomplete fncache causes unintentional failure, but
2694 (e.g. incomplete fncache causes unintentional failure, but
2695 redundant one doesn't).
2695 redundant one doesn't).
2696 """
2696 """
2697 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2697 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2698 for k in list(self._filecache.keys()):
2698 for k in list(self._filecache.keys()):
2699 # dirstate is invalidated separately in invalidatedirstate()
2699 # dirstate is invalidated separately in invalidatedirstate()
2700 if k == b'dirstate':
2700 if k == b'dirstate':
2701 continue
2701 continue
2702 if (
2702 if (
2703 k == b'changelog'
2703 k == b'changelog'
2704 and self.currenttransaction()
2704 and self.currenttransaction()
2705 and self.changelog._delayed
2705 and self.changelog._delayed
2706 ):
2706 ):
2707 # The changelog object may store unwritten revisions. We don't
2707 # The changelog object may store unwritten revisions. We don't
2708 # want to lose them.
2708 # want to lose them.
2709 # TODO: Solve the problem instead of working around it.
2709 # TODO: Solve the problem instead of working around it.
2710 continue
2710 continue
2711
2711
2712 if clearfilecache:
2712 if clearfilecache:
2713 del self._filecache[k]
2713 del self._filecache[k]
2714 try:
2714 try:
2715 delattr(unfiltered, k)
2715 delattr(unfiltered, k)
2716 except AttributeError:
2716 except AttributeError:
2717 pass
2717 pass
2718 self.invalidatecaches()
2718 self.invalidatecaches()
2719 if not self.currenttransaction():
2719 if not self.currenttransaction():
2720 # TODO: Changing contents of store outside transaction
2720 # TODO: Changing contents of store outside transaction
2721 # causes inconsistency. We should make in-memory store
2721 # causes inconsistency. We should make in-memory store
2722 # changes detectable, and abort if changed.
2722 # changes detectable, and abort if changed.
2723 self.store.invalidatecaches()
2723 self.store.invalidatecaches()
2724
2724
2725 def invalidateall(self):
2725 def invalidateall(self):
2726 """Fully invalidates both store and non-store parts, causing the
2726 """Fully invalidates both store and non-store parts, causing the
2727 subsequent operation to reread any outside changes."""
2727 subsequent operation to reread any outside changes."""
2728 # extension should hook this to invalidate its caches
2728 # extension should hook this to invalidate its caches
2729 self.invalidate()
2729 self.invalidate()
2730 self.invalidatedirstate()
2730 self.invalidatedirstate()
2731
2731
2732 @unfilteredmethod
2732 @unfilteredmethod
2733 def _refreshfilecachestats(self, tr):
2733 def _refreshfilecachestats(self, tr):
2734 """Reload stats of cached files so that they are flagged as valid"""
2734 """Reload stats of cached files so that they are flagged as valid"""
2735 for k, ce in self._filecache.items():
2735 for k, ce in self._filecache.items():
2736 k = pycompat.sysstr(k)
2736 k = pycompat.sysstr(k)
2737 if k == 'dirstate' or k not in self.__dict__:
2737 if k == 'dirstate' or k not in self.__dict__:
2738 continue
2738 continue
2739 ce.refresh()
2739 ce.refresh()
2740
2740
2741 def _lock(
2741 def _lock(
2742 self,
2742 self,
2743 vfs,
2743 vfs,
2744 lockname,
2744 lockname,
2745 wait,
2745 wait,
2746 releasefn,
2746 releasefn,
2747 acquirefn,
2747 acquirefn,
2748 desc,
2748 desc,
2749 ):
2749 ):
2750 timeout = 0
2750 timeout = 0
2751 warntimeout = 0
2751 warntimeout = 0
2752 if wait:
2752 if wait:
2753 timeout = self.ui.configint(b"ui", b"timeout")
2753 timeout = self.ui.configint(b"ui", b"timeout")
2754 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2754 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2755 # internal config: ui.signal-safe-lock
2755 # internal config: ui.signal-safe-lock
2756 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2756 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2757
2757
2758 l = lockmod.trylock(
2758 l = lockmod.trylock(
2759 self.ui,
2759 self.ui,
2760 vfs,
2760 vfs,
2761 lockname,
2761 lockname,
2762 timeout,
2762 timeout,
2763 warntimeout,
2763 warntimeout,
2764 releasefn=releasefn,
2764 releasefn=releasefn,
2765 acquirefn=acquirefn,
2765 acquirefn=acquirefn,
2766 desc=desc,
2766 desc=desc,
2767 signalsafe=signalsafe,
2767 signalsafe=signalsafe,
2768 )
2768 )
2769 return l
2769 return l
2770
2770
2771 def _afterlock(self, callback):
2771 def _afterlock(self, callback):
2772 """add a callback to be run when the repository is fully unlocked
2772 """add a callback to be run when the repository is fully unlocked
2773
2773
2774 The callback will be executed when the outermost lock is released
2774 The callback will be executed when the outermost lock is released
2775 (with wlock being higher level than 'lock')."""
2775 (with wlock being higher level than 'lock')."""
2776 for ref in (self._wlockref, self._lockref):
2776 for ref in (self._wlockref, self._lockref):
2777 l = ref and ref()
2777 l = ref and ref()
2778 if l and l.held:
2778 if l and l.held:
2779 l.postrelease.append(callback)
2779 l.postrelease.append(callback)
2780 break
2780 break
2781 else: # no lock have been found.
2781 else: # no lock have been found.
2782 callback(True)
2782 callback(True)
2783
2783
2784 def lock(self, wait=True):
2784 def lock(self, wait=True):
2785 """Lock the repository store (.hg/store) and return a weak reference
2785 """Lock the repository store (.hg/store) and return a weak reference
2786 to the lock. Use this before modifying the store (e.g. committing or
2786 to the lock. Use this before modifying the store (e.g. committing or
2787 stripping). If you are opening a transaction, get a lock as well.)
2787 stripping). If you are opening a transaction, get a lock as well.)
2788
2788
2789 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2789 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2790 'wlock' first to avoid a dead-lock hazard."""
2790 'wlock' first to avoid a dead-lock hazard."""
2791 l = self._currentlock(self._lockref)
2791 l = self._currentlock(self._lockref)
2792 if l is not None:
2792 if l is not None:
2793 l.lock()
2793 l.lock()
2794 return l
2794 return l
2795
2795
2796 l = self._lock(
2796 l = self._lock(
2797 vfs=self.svfs,
2797 vfs=self.svfs,
2798 lockname=b"lock",
2798 lockname=b"lock",
2799 wait=wait,
2799 wait=wait,
2800 releasefn=None,
2800 releasefn=None,
2801 acquirefn=self.invalidate,
2801 acquirefn=self.invalidate,
2802 desc=_(b'repository %s') % self.origroot,
2802 desc=_(b'repository %s') % self.origroot,
2803 )
2803 )
2804 self._lockref = weakref.ref(l)
2804 self._lockref = weakref.ref(l)
2805 return l
2805 return l
2806
2806
2807 def wlock(self, wait=True):
2807 def wlock(self, wait=True):
2808 """Lock the non-store parts of the repository (everything under
2808 """Lock the non-store parts of the repository (everything under
2809 .hg except .hg/store) and return a weak reference to the lock.
2809 .hg except .hg/store) and return a weak reference to the lock.
2810
2810
2811 Use this before modifying files in .hg.
2811 Use this before modifying files in .hg.
2812
2812
2813 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2813 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2814 'wlock' first to avoid a dead-lock hazard."""
2814 'wlock' first to avoid a dead-lock hazard."""
2815 l = self._wlockref and self._wlockref()
2815 l = self._wlockref and self._wlockref()
2816 if l is not None and l.held:
2816 if l is not None and l.held:
2817 l.lock()
2817 l.lock()
2818 return l
2818 return l
2819
2819
2820 # We do not need to check for non-waiting lock acquisition. Such
2820 # We do not need to check for non-waiting lock acquisition. Such
2821 # acquisition would not cause dead-lock as they would just fail.
2821 # acquisition would not cause dead-lock as they would just fail.
2822 if wait and (
2822 if wait and (
2823 self.ui.configbool(b'devel', b'all-warnings')
2823 self.ui.configbool(b'devel', b'all-warnings')
2824 or self.ui.configbool(b'devel', b'check-locks')
2824 or self.ui.configbool(b'devel', b'check-locks')
2825 ):
2825 ):
2826 if self._currentlock(self._lockref) is not None:
2826 if self._currentlock(self._lockref) is not None:
2827 self.ui.develwarn(b'"wlock" acquired after "lock"')
2827 self.ui.develwarn(b'"wlock" acquired after "lock"')
2828
2828
2829 def unlock():
2829 def unlock():
2830 if self.dirstate.pendingparentchange():
2830 if self.dirstate.pendingparentchange():
2831 self.dirstate.invalidate()
2831 self.dirstate.invalidate()
2832 else:
2832 else:
2833 self.dirstate.write(None)
2833 self.dirstate.write(None)
2834
2834
2835 self._filecache[b'dirstate'].refresh()
2835 self._filecache[b'dirstate'].refresh()
2836
2836
2837 l = self._lock(
2837 l = self._lock(
2838 self.vfs,
2838 self.vfs,
2839 b"wlock",
2839 b"wlock",
2840 wait,
2840 wait,
2841 unlock,
2841 unlock,
2842 self.invalidatedirstate,
2842 self.invalidatedirstate,
2843 _(b'working directory of %s') % self.origroot,
2843 _(b'working directory of %s') % self.origroot,
2844 )
2844 )
2845 self._wlockref = weakref.ref(l)
2845 self._wlockref = weakref.ref(l)
2846 return l
2846 return l
2847
2847
2848 def _currentlock(self, lockref):
2848 def _currentlock(self, lockref):
2849 """Returns the lock if it's held, or None if it's not."""
2849 """Returns the lock if it's held, or None if it's not."""
2850 if lockref is None:
2850 if lockref is None:
2851 return None
2851 return None
2852 l = lockref()
2852 l = lockref()
2853 if l is None or not l.held:
2853 if l is None or not l.held:
2854 return None
2854 return None
2855 return l
2855 return l
2856
2856
2857 def currentwlock(self):
2857 def currentwlock(self):
2858 """Returns the wlock if it's held, or None if it's not."""
2858 """Returns the wlock if it's held, or None if it's not."""
2859 return self._currentlock(self._wlockref)
2859 return self._currentlock(self._wlockref)
2860
2860
2861 def checkcommitpatterns(self, wctx, match, status, fail):
2861 def checkcommitpatterns(self, wctx, match, status, fail):
2862 """check for commit arguments that aren't committable"""
2862 """check for commit arguments that aren't committable"""
2863 if match.isexact() or match.prefix():
2863 if match.isexact() or match.prefix():
2864 matched = set(status.modified + status.added + status.removed)
2864 matched = set(status.modified + status.added + status.removed)
2865
2865
2866 for f in match.files():
2866 for f in match.files():
2867 f = self.dirstate.normalize(f)
2867 f = self.dirstate.normalize(f)
2868 if f == b'.' or f in matched or f in wctx.substate:
2868 if f == b'.' or f in matched or f in wctx.substate:
2869 continue
2869 continue
2870 if f in status.deleted:
2870 if f in status.deleted:
2871 fail(f, _(b'file not found!'))
2871 fail(f, _(b'file not found!'))
2872 # Is it a directory that exists or used to exist?
2872 # Is it a directory that exists or used to exist?
2873 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2873 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2874 d = f + b'/'
2874 d = f + b'/'
2875 for mf in matched:
2875 for mf in matched:
2876 if mf.startswith(d):
2876 if mf.startswith(d):
2877 break
2877 break
2878 else:
2878 else:
2879 fail(f, _(b"no match under directory!"))
2879 fail(f, _(b"no match under directory!"))
2880 elif f not in self.dirstate:
2880 elif f not in self.dirstate:
2881 fail(f, _(b"file not tracked!"))
2881 fail(f, _(b"file not tracked!"))
2882
2882
2883 @unfilteredmethod
2883 @unfilteredmethod
2884 def commit(
2884 def commit(
2885 self,
2885 self,
2886 text=b"",
2886 text=b"",
2887 user=None,
2887 user=None,
2888 date=None,
2888 date=None,
2889 match=None,
2889 match=None,
2890 force=False,
2890 force=False,
2891 editor=None,
2891 editor=None,
2892 extra=None,
2892 extra=None,
2893 ):
2893 ):
2894 """Add a new revision to current repository.
2894 """Add a new revision to current repository.
2895
2895
2896 Revision information is gathered from the working directory,
2896 Revision information is gathered from the working directory,
2897 match can be used to filter the committed files. If editor is
2897 match can be used to filter the committed files. If editor is
2898 supplied, it is called to get a commit message.
2898 supplied, it is called to get a commit message.
2899 """
2899 """
2900 if extra is None:
2900 if extra is None:
2901 extra = {}
2901 extra = {}
2902
2902
2903 def fail(f, msg):
2903 def fail(f, msg):
2904 raise error.InputError(b'%s: %s' % (f, msg))
2904 raise error.InputError(b'%s: %s' % (f, msg))
2905
2905
2906 if not match:
2906 if not match:
2907 match = matchmod.always()
2907 match = matchmod.always()
2908
2908
2909 if not force:
2909 if not force:
2910 match.bad = fail
2910 match.bad = fail
2911
2911
2912 # lock() for recent changelog (see issue4368)
2912 # lock() for recent changelog (see issue4368)
2913 with self.wlock(), self.lock():
2913 with self.wlock(), self.lock():
2914 wctx = self[None]
2914 wctx = self[None]
2915 merge = len(wctx.parents()) > 1
2915 merge = len(wctx.parents()) > 1
2916
2916
2917 if not force and merge and not match.always():
2917 if not force and merge and not match.always():
2918 raise error.Abort(
2918 raise error.Abort(
2919 _(
2919 _(
2920 b'cannot partially commit a merge '
2920 b'cannot partially commit a merge '
2921 b'(do not specify files or patterns)'
2921 b'(do not specify files or patterns)'
2922 )
2922 )
2923 )
2923 )
2924
2924
2925 status = self.status(match=match, clean=force)
2925 status = self.status(match=match, clean=force)
2926 if force:
2926 if force:
2927 status.modified.extend(
2927 status.modified.extend(
2928 status.clean
2928 status.clean
2929 ) # mq may commit clean files
2929 ) # mq may commit clean files
2930
2930
2931 # check subrepos
2931 # check subrepos
2932 subs, commitsubs, newstate = subrepoutil.precommit(
2932 subs, commitsubs, newstate = subrepoutil.precommit(
2933 self.ui, wctx, status, match, force=force
2933 self.ui, wctx, status, match, force=force
2934 )
2934 )
2935
2935
2936 # make sure all explicit patterns are matched
2936 # make sure all explicit patterns are matched
2937 if not force:
2937 if not force:
2938 self.checkcommitpatterns(wctx, match, status, fail)
2938 self.checkcommitpatterns(wctx, match, status, fail)
2939
2939
2940 cctx = context.workingcommitctx(
2940 cctx = context.workingcommitctx(
2941 self, status, text, user, date, extra
2941 self, status, text, user, date, extra
2942 )
2942 )
2943
2943
2944 ms = mergestatemod.mergestate.read(self)
2944 ms = mergestatemod.mergestate.read(self)
2945 mergeutil.checkunresolved(ms)
2945 mergeutil.checkunresolved(ms)
2946
2946
2947 # internal config: ui.allowemptycommit
2947 # internal config: ui.allowemptycommit
2948 if cctx.isempty() and not self.ui.configbool(
2948 if cctx.isempty() and not self.ui.configbool(
2949 b'ui', b'allowemptycommit'
2949 b'ui', b'allowemptycommit'
2950 ):
2950 ):
2951 self.ui.debug(b'nothing to commit, clearing merge state\n')
2951 self.ui.debug(b'nothing to commit, clearing merge state\n')
2952 ms.reset()
2952 ms.reset()
2953 return None
2953 return None
2954
2954
2955 if merge and cctx.deleted():
2955 if merge and cctx.deleted():
2956 raise error.Abort(_(b"cannot commit merge with missing files"))
2956 raise error.Abort(_(b"cannot commit merge with missing files"))
2957
2957
2958 if editor:
2958 if editor:
2959 cctx._text = editor(self, cctx, subs)
2959 cctx._text = editor(self, cctx, subs)
2960 edited = text != cctx._text
2960 edited = text != cctx._text
2961
2961
2962 # Save commit message in case this transaction gets rolled back
2962 # Save commit message in case this transaction gets rolled back
2963 # (e.g. by a pretxncommit hook). Leave the content alone on
2963 # (e.g. by a pretxncommit hook). Leave the content alone on
2964 # the assumption that the user will use the same editor again.
2964 # the assumption that the user will use the same editor again.
2965 msgfn = self.savecommitmessage(cctx._text)
2965 msgfn = self.savecommitmessage(cctx._text)
2966
2966
2967 # commit subs and write new state
2967 # commit subs and write new state
2968 if subs:
2968 if subs:
2969 uipathfn = scmutil.getuipathfn(self)
2969 uipathfn = scmutil.getuipathfn(self)
2970 for s in sorted(commitsubs):
2970 for s in sorted(commitsubs):
2971 sub = wctx.sub(s)
2971 sub = wctx.sub(s)
2972 self.ui.status(
2972 self.ui.status(
2973 _(b'committing subrepository %s\n')
2973 _(b'committing subrepository %s\n')
2974 % uipathfn(subrepoutil.subrelpath(sub))
2974 % uipathfn(subrepoutil.subrelpath(sub))
2975 )
2975 )
2976 sr = sub.commit(cctx._text, user, date)
2976 sr = sub.commit(cctx._text, user, date)
2977 newstate[s] = (newstate[s][0], sr)
2977 newstate[s] = (newstate[s][0], sr)
2978 subrepoutil.writestate(self, newstate)
2978 subrepoutil.writestate(self, newstate)
2979
2979
2980 p1, p2 = self.dirstate.parents()
2980 p1, p2 = self.dirstate.parents()
2981 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2981 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2982 try:
2982 try:
2983 self.hook(
2983 self.hook(
2984 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2984 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2985 )
2985 )
2986 with self.transaction(b'commit'):
2986 with self.transaction(b'commit'):
2987 ret = self.commitctx(cctx, True)
2987 ret = self.commitctx(cctx, True)
2988 # update bookmarks, dirstate and mergestate
2988 # update bookmarks, dirstate and mergestate
2989 bookmarks.update(self, [p1, p2], ret)
2989 bookmarks.update(self, [p1, p2], ret)
2990 cctx.markcommitted(ret)
2990 cctx.markcommitted(ret)
2991 ms.reset()
2991 ms.reset()
2992 except: # re-raises
2992 except: # re-raises
2993 if edited:
2993 if edited:
2994 self.ui.write(
2994 self.ui.write(
2995 _(b'note: commit message saved in %s\n') % msgfn
2995 _(b'note: commit message saved in %s\n') % msgfn
2996 )
2996 )
2997 self.ui.write(
2997 self.ui.write(
2998 _(
2998 _(
2999 b"note: use 'hg commit --logfile "
2999 b"note: use 'hg commit --logfile "
3000 b".hg/last-message.txt --edit' to reuse it\n"
3000 b".hg/last-message.txt --edit' to reuse it\n"
3001 )
3001 )
3002 )
3002 )
3003 raise
3003 raise
3004
3004
3005 def commithook(unused_success):
3005 def commithook(unused_success):
3006 # hack for command that use a temporary commit (eg: histedit)
3006 # hack for command that use a temporary commit (eg: histedit)
3007 # temporary commit got stripped before hook release
3007 # temporary commit got stripped before hook release
3008 if self.changelog.hasnode(ret):
3008 if self.changelog.hasnode(ret):
3009 self.hook(
3009 self.hook(
3010 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3010 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3011 )
3011 )
3012
3012
3013 self._afterlock(commithook)
3013 self._afterlock(commithook)
3014 return ret
3014 return ret
3015
3015
3016 @unfilteredmethod
3016 @unfilteredmethod
3017 def commitctx(self, ctx, error=False, origctx=None):
3017 def commitctx(self, ctx, error=False, origctx=None):
3018 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3018 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3019
3019
3020 @unfilteredmethod
3020 @unfilteredmethod
3021 def destroying(self):
3021 def destroying(self):
3022 """Inform the repository that nodes are about to be destroyed.
3022 """Inform the repository that nodes are about to be destroyed.
3023 Intended for use by strip and rollback, so there's a common
3023 Intended for use by strip and rollback, so there's a common
3024 place for anything that has to be done before destroying history.
3024 place for anything that has to be done before destroying history.
3025
3025
3026 This is mostly useful for saving state that is in memory and waiting
3026 This is mostly useful for saving state that is in memory and waiting
3027 to be flushed when the current lock is released. Because a call to
3027 to be flushed when the current lock is released. Because a call to
3028 destroyed is imminent, the repo will be invalidated causing those
3028 destroyed is imminent, the repo will be invalidated causing those
3029 changes to stay in memory (waiting for the next unlock), or vanish
3029 changes to stay in memory (waiting for the next unlock), or vanish
3030 completely.
3030 completely.
3031 """
3031 """
3032 # When using the same lock to commit and strip, the phasecache is left
3032 # When using the same lock to commit and strip, the phasecache is left
3033 # dirty after committing. Then when we strip, the repo is invalidated,
3033 # dirty after committing. Then when we strip, the repo is invalidated,
3034 # causing those changes to disappear.
3034 # causing those changes to disappear.
3035 if '_phasecache' in vars(self):
3035 if '_phasecache' in vars(self):
3036 self._phasecache.write()
3036 self._phasecache.write()
3037
3037
3038 @unfilteredmethod
3038 @unfilteredmethod
3039 def destroyed(self):
3039 def destroyed(self):
3040 """Inform the repository that nodes have been destroyed.
3040 """Inform the repository that nodes have been destroyed.
3041 Intended for use by strip and rollback, so there's a common
3041 Intended for use by strip and rollback, so there's a common
3042 place for anything that has to be done after destroying history.
3042 place for anything that has to be done after destroying history.
3043 """
3043 """
3044 # When one tries to:
3044 # When one tries to:
3045 # 1) destroy nodes thus calling this method (e.g. strip)
3045 # 1) destroy nodes thus calling this method (e.g. strip)
3046 # 2) use phasecache somewhere (e.g. commit)
3046 # 2) use phasecache somewhere (e.g. commit)
3047 #
3047 #
3048 # then 2) will fail because the phasecache contains nodes that were
3048 # then 2) will fail because the phasecache contains nodes that were
3049 # removed. We can either remove phasecache from the filecache,
3049 # removed. We can either remove phasecache from the filecache,
3050 # causing it to reload next time it is accessed, or simply filter
3050 # causing it to reload next time it is accessed, or simply filter
3051 # the removed nodes now and write the updated cache.
3051 # the removed nodes now and write the updated cache.
3052 self._phasecache.filterunknown(self)
3052 self._phasecache.filterunknown(self)
3053 self._phasecache.write()
3053 self._phasecache.write()
3054
3054
3055 # refresh all repository caches
3055 # refresh all repository caches
3056 self.updatecaches()
3056 self.updatecaches()
3057
3057
3058 # Ensure the persistent tag cache is updated. Doing it now
3058 # Ensure the persistent tag cache is updated. Doing it now
3059 # means that the tag cache only has to worry about destroyed
3059 # means that the tag cache only has to worry about destroyed
3060 # heads immediately after a strip/rollback. That in turn
3060 # heads immediately after a strip/rollback. That in turn
3061 # guarantees that "cachetip == currenttip" (comparing both rev
3061 # guarantees that "cachetip == currenttip" (comparing both rev
3062 # and node) always means no nodes have been added or destroyed.
3062 # and node) always means no nodes have been added or destroyed.
3063
3063
3064 # XXX this is suboptimal when qrefresh'ing: we strip the current
3064 # XXX this is suboptimal when qrefresh'ing: we strip the current
3065 # head, refresh the tag cache, then immediately add a new head.
3065 # head, refresh the tag cache, then immediately add a new head.
3066 # But I think doing it this way is necessary for the "instant
3066 # But I think doing it this way is necessary for the "instant
3067 # tag cache retrieval" case to work.
3067 # tag cache retrieval" case to work.
3068 self.invalidate()
3068 self.invalidate()
3069
3069
3070 def status(
3070 def status(
3071 self,
3071 self,
3072 node1=b'.',
3072 node1=b'.',
3073 node2=None,
3073 node2=None,
3074 match=None,
3074 match=None,
3075 ignored=False,
3075 ignored=False,
3076 clean=False,
3076 clean=False,
3077 unknown=False,
3077 unknown=False,
3078 listsubrepos=False,
3078 listsubrepos=False,
3079 ):
3079 ):
3080 '''a convenience method that calls node1.status(node2)'''
3080 '''a convenience method that calls node1.status(node2)'''
3081 return self[node1].status(
3081 return self[node1].status(
3082 node2, match, ignored, clean, unknown, listsubrepos
3082 node2, match, ignored, clean, unknown, listsubrepos
3083 )
3083 )
3084
3084
3085 def addpostdsstatus(self, ps):
3085 def addpostdsstatus(self, ps):
3086 """Add a callback to run within the wlock, at the point at which status
3086 """Add a callback to run within the wlock, at the point at which status
3087 fixups happen.
3087 fixups happen.
3088
3088
3089 On status completion, callback(wctx, status) will be called with the
3089 On status completion, callback(wctx, status) will be called with the
3090 wlock held, unless the dirstate has changed from underneath or the wlock
3090 wlock held, unless the dirstate has changed from underneath or the wlock
3091 couldn't be grabbed.
3091 couldn't be grabbed.
3092
3092
3093 Callbacks should not capture and use a cached copy of the dirstate --
3093 Callbacks should not capture and use a cached copy of the dirstate --
3094 it might change in the meanwhile. Instead, they should access the
3094 it might change in the meanwhile. Instead, they should access the
3095 dirstate via wctx.repo().dirstate.
3095 dirstate via wctx.repo().dirstate.
3096
3096
3097 This list is emptied out after each status run -- extensions should
3097 This list is emptied out after each status run -- extensions should
3098 make sure it adds to this list each time dirstate.status is called.
3098 make sure it adds to this list each time dirstate.status is called.
3099 Extensions should also make sure they don't call this for statuses
3099 Extensions should also make sure they don't call this for statuses
3100 that don't involve the dirstate.
3100 that don't involve the dirstate.
3101 """
3101 """
3102
3102
3103 # The list is located here for uniqueness reasons -- it is actually
3103 # The list is located here for uniqueness reasons -- it is actually
3104 # managed by the workingctx, but that isn't unique per-repo.
3104 # managed by the workingctx, but that isn't unique per-repo.
3105 self._postdsstatus.append(ps)
3105 self._postdsstatus.append(ps)
3106
3106
3107 def postdsstatus(self):
3107 def postdsstatus(self):
3108 """Used by workingctx to get the list of post-dirstate-status hooks."""
3108 """Used by workingctx to get the list of post-dirstate-status hooks."""
3109 return self._postdsstatus
3109 return self._postdsstatus
3110
3110
3111 def clearpostdsstatus(self):
3111 def clearpostdsstatus(self):
3112 """Used by workingctx to clear post-dirstate-status hooks."""
3112 """Used by workingctx to clear post-dirstate-status hooks."""
3113 del self._postdsstatus[:]
3113 del self._postdsstatus[:]
3114
3114
3115 def heads(self, start=None):
3115 def heads(self, start=None):
3116 if start is None:
3116 if start is None:
3117 cl = self.changelog
3117 cl = self.changelog
3118 headrevs = reversed(cl.headrevs())
3118 headrevs = reversed(cl.headrevs())
3119 return [cl.node(rev) for rev in headrevs]
3119 return [cl.node(rev) for rev in headrevs]
3120
3120
3121 heads = self.changelog.heads(start)
3121 heads = self.changelog.heads(start)
3122 # sort the output in rev descending order
3122 # sort the output in rev descending order
3123 return sorted(heads, key=self.changelog.rev, reverse=True)
3123 return sorted(heads, key=self.changelog.rev, reverse=True)
3124
3124
3125 def branchheads(self, branch=None, start=None, closed=False):
3125 def branchheads(self, branch=None, start=None, closed=False):
3126 """return a (possibly filtered) list of heads for the given branch
3126 """return a (possibly filtered) list of heads for the given branch
3127
3127
3128 Heads are returned in topological order, from newest to oldest.
3128 Heads are returned in topological order, from newest to oldest.
3129 If branch is None, use the dirstate branch.
3129 If branch is None, use the dirstate branch.
3130 If start is not None, return only heads reachable from start.
3130 If start is not None, return only heads reachable from start.
3131 If closed is True, return heads that are marked as closed as well.
3131 If closed is True, return heads that are marked as closed as well.
3132 """
3132 """
3133 if branch is None:
3133 if branch is None:
3134 branch = self[None].branch()
3134 branch = self[None].branch()
3135 branches = self.branchmap()
3135 branches = self.branchmap()
3136 if not branches.hasbranch(branch):
3136 if not branches.hasbranch(branch):
3137 return []
3137 return []
3138 # the cache returns heads ordered lowest to highest
3138 # the cache returns heads ordered lowest to highest
3139 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3139 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3140 if start is not None:
3140 if start is not None:
3141 # filter out the heads that cannot be reached from startrev
3141 # filter out the heads that cannot be reached from startrev
3142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3143 bheads = [h for h in bheads if h in fbheads]
3143 bheads = [h for h in bheads if h in fbheads]
3144 return bheads
3144 return bheads
3145
3145
3146 def branches(self, nodes):
3146 def branches(self, nodes):
3147 if not nodes:
3147 if not nodes:
3148 nodes = [self.changelog.tip()]
3148 nodes = [self.changelog.tip()]
3149 b = []
3149 b = []
3150 for n in nodes:
3150 for n in nodes:
3151 t = n
3151 t = n
3152 while True:
3152 while True:
3153 p = self.changelog.parents(n)
3153 p = self.changelog.parents(n)
3154 if p[1] != nullid or p[0] == nullid:
3154 if p[1] != nullid or p[0] == nullid:
3155 b.append((t, n, p[0], p[1]))
3155 b.append((t, n, p[0], p[1]))
3156 break
3156 break
3157 n = p[0]
3157 n = p[0]
3158 return b
3158 return b
3159
3159
3160 def between(self, pairs):
3160 def between(self, pairs):
3161 r = []
3161 r = []
3162
3162
3163 for top, bottom in pairs:
3163 for top, bottom in pairs:
3164 n, l, i = top, [], 0
3164 n, l, i = top, [], 0
3165 f = 1
3165 f = 1
3166
3166
3167 while n != bottom and n != nullid:
3167 while n != bottom and n != nullid:
3168 p = self.changelog.parents(n)[0]
3168 p = self.changelog.parents(n)[0]
3169 if i == f:
3169 if i == f:
3170 l.append(n)
3170 l.append(n)
3171 f = f * 2
3171 f = f * 2
3172 n = p
3172 n = p
3173 i += 1
3173 i += 1
3174
3174
3175 r.append(l)
3175 r.append(l)
3176
3176
3177 return r
3177 return r
3178
3178
3179 def checkpush(self, pushop):
3179 def checkpush(self, pushop):
3180 """Extensions can override this function if additional checks have
3180 """Extensions can override this function if additional checks have
3181 to be performed before pushing, or call it if they override push
3181 to be performed before pushing, or call it if they override push
3182 command.
3182 command.
3183 """
3183 """
3184
3184
3185 @unfilteredpropertycache
3185 @unfilteredpropertycache
3186 def prepushoutgoinghooks(self):
3186 def prepushoutgoinghooks(self):
3187 """Return util.hooks consists of a pushop with repo, remote, outgoing
3187 """Return util.hooks consists of a pushop with repo, remote, outgoing
3188 methods, which are called before pushing changesets.
3188 methods, which are called before pushing changesets.
3189 """
3189 """
3190 return util.hooks()
3190 return util.hooks()
3191
3191
3192 def pushkey(self, namespace, key, old, new):
3192 def pushkey(self, namespace, key, old, new):
3193 try:
3193 try:
3194 tr = self.currenttransaction()
3194 tr = self.currenttransaction()
3195 hookargs = {}
3195 hookargs = {}
3196 if tr is not None:
3196 if tr is not None:
3197 hookargs.update(tr.hookargs)
3197 hookargs.update(tr.hookargs)
3198 hookargs = pycompat.strkwargs(hookargs)
3198 hookargs = pycompat.strkwargs(hookargs)
3199 hookargs['namespace'] = namespace
3199 hookargs['namespace'] = namespace
3200 hookargs['key'] = key
3200 hookargs['key'] = key
3201 hookargs['old'] = old
3201 hookargs['old'] = old
3202 hookargs['new'] = new
3202 hookargs['new'] = new
3203 self.hook(b'prepushkey', throw=True, **hookargs)
3203 self.hook(b'prepushkey', throw=True, **hookargs)
3204 except error.HookAbort as exc:
3204 except error.HookAbort as exc:
3205 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3205 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3206 if exc.hint:
3206 if exc.hint:
3207 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3207 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3208 return False
3208 return False
3209 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3209 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3210 ret = pushkey.push(self, namespace, key, old, new)
3210 ret = pushkey.push(self, namespace, key, old, new)
3211
3211
3212 def runhook(unused_success):
3212 def runhook(unused_success):
3213 self.hook(
3213 self.hook(
3214 b'pushkey',
3214 b'pushkey',
3215 namespace=namespace,
3215 namespace=namespace,
3216 key=key,
3216 key=key,
3217 old=old,
3217 old=old,
3218 new=new,
3218 new=new,
3219 ret=ret,
3219 ret=ret,
3220 )
3220 )
3221
3221
3222 self._afterlock(runhook)
3222 self._afterlock(runhook)
3223 return ret
3223 return ret
3224
3224
3225 def listkeys(self, namespace):
3225 def listkeys(self, namespace):
3226 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3226 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3227 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3227 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3228 values = pushkey.list(self, namespace)
3228 values = pushkey.list(self, namespace)
3229 self.hook(b'listkeys', namespace=namespace, values=values)
3229 self.hook(b'listkeys', namespace=namespace, values=values)
3230 return values
3230 return values
3231
3231
3232 def debugwireargs(self, one, two, three=None, four=None, five=None):
3232 def debugwireargs(self, one, two, three=None, four=None, five=None):
3233 '''used to test argument passing over the wire'''
3233 '''used to test argument passing over the wire'''
3234 return b"%s %s %s %s %s" % (
3234 return b"%s %s %s %s %s" % (
3235 one,
3235 one,
3236 two,
3236 two,
3237 pycompat.bytestr(three),
3237 pycompat.bytestr(three),
3238 pycompat.bytestr(four),
3238 pycompat.bytestr(four),
3239 pycompat.bytestr(five),
3239 pycompat.bytestr(five),
3240 )
3240 )
3241
3241
3242 def savecommitmessage(self, text):
3242 def savecommitmessage(self, text):
3243 fp = self.vfs(b'last-message.txt', b'wb')
3243 fp = self.vfs(b'last-message.txt', b'wb')
3244 try:
3244 try:
3245 fp.write(text)
3245 fp.write(text)
3246 finally:
3246 finally:
3247 fp.close()
3247 fp.close()
3248 return self.pathto(fp.name[len(self.root) + 1 :])
3248 return self.pathto(fp.name[len(self.root) + 1 :])
3249
3249
3250
3250
3251 # used to avoid circular references so destructors work
3251 # used to avoid circular references so destructors work
3252 def aftertrans(files):
3252 def aftertrans(files):
3253 renamefiles = [tuple(t) for t in files]
3253 renamefiles = [tuple(t) for t in files]
3254
3254
3255 def a():
3255 def a():
3256 for vfs, src, dest in renamefiles:
3256 for vfs, src, dest in renamefiles:
3257 # if src and dest refer to a same file, vfs.rename is a no-op,
3257 # if src and dest refer to a same file, vfs.rename is a no-op,
3258 # leaving both src and dest on disk. delete dest to make sure
3258 # leaving both src and dest on disk. delete dest to make sure
3259 # the rename couldn't be such a no-op.
3259 # the rename couldn't be such a no-op.
3260 vfs.tryunlink(dest)
3260 vfs.tryunlink(dest)
3261 try:
3261 try:
3262 vfs.rename(src, dest)
3262 vfs.rename(src, dest)
3263 except OSError: # journal file does not yet exist
3263 except OSError: # journal file does not yet exist
3264 pass
3264 pass
3265
3265
3266 return a
3266 return a
3267
3267
3268
3268
3269 def undoname(fn):
3269 def undoname(fn):
3270 base, name = os.path.split(fn)
3270 base, name = os.path.split(fn)
3271 assert name.startswith(b'journal')
3271 assert name.startswith(b'journal')
3272 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3272 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3273
3273
3274
3274
3275 def instance(ui, path, create, intents=None, createopts=None):
3275 def instance(ui, path, create, intents=None, createopts=None):
3276 localpath = util.urllocalpath(path)
3276 localpath = util.urllocalpath(path)
3277 if create:
3277 if create:
3278 createrepository(ui, localpath, createopts=createopts)
3278 createrepository(ui, localpath, createopts=createopts)
3279
3279
3280 return makelocalrepository(ui, localpath, intents=intents)
3280 return makelocalrepository(ui, localpath, intents=intents)
3281
3281
3282
3282
3283 def islocal(path):
3283 def islocal(path):
3284 return True
3284 return True
3285
3285
3286
3286
3287 def defaultcreateopts(ui, createopts=None):
3287 def defaultcreateopts(ui, createopts=None):
3288 """Populate the default creation options for a repository.
3288 """Populate the default creation options for a repository.
3289
3289
3290 A dictionary of explicitly requested creation options can be passed
3290 A dictionary of explicitly requested creation options can be passed
3291 in. Missing keys will be populated.
3291 in. Missing keys will be populated.
3292 """
3292 """
3293 createopts = dict(createopts or {})
3293 createopts = dict(createopts or {})
3294
3294
3295 if b'backend' not in createopts:
3295 if b'backend' not in createopts:
3296 # experimental config: storage.new-repo-backend
3296 # experimental config: storage.new-repo-backend
3297 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3297 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3298
3298
3299 return createopts
3299 return createopts
3300
3300
3301
3301
3302 def newreporequirements(ui, createopts):
3302 def newreporequirements(ui, createopts):
3303 """Determine the set of requirements for a new local repository.
3303 """Determine the set of requirements for a new local repository.
3304
3304
3305 Extensions can wrap this function to specify custom requirements for
3305 Extensions can wrap this function to specify custom requirements for
3306 new repositories.
3306 new repositories.
3307 """
3307 """
3308 # If the repo is being created from a shared repository, we copy
3308 # If the repo is being created from a shared repository, we copy
3309 # its requirements.
3309 # its requirements.
3310 if b'sharedrepo' in createopts:
3310 if b'sharedrepo' in createopts:
3311 requirements = set(createopts[b'sharedrepo'].requirements)
3311 requirements = set(createopts[b'sharedrepo'].requirements)
3312 if createopts.get(b'sharedrelative'):
3312 if createopts.get(b'sharedrelative'):
3313 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3313 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3314 else:
3314 else:
3315 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3315 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3316
3316
3317 return requirements
3317 return requirements
3318
3318
3319 if b'backend' not in createopts:
3319 if b'backend' not in createopts:
3320 raise error.ProgrammingError(
3320 raise error.ProgrammingError(
3321 b'backend key not present in createopts; '
3321 b'backend key not present in createopts; '
3322 b'was defaultcreateopts() called?'
3322 b'was defaultcreateopts() called?'
3323 )
3323 )
3324
3324
3325 if createopts[b'backend'] != b'revlogv1':
3325 if createopts[b'backend'] != b'revlogv1':
3326 raise error.Abort(
3326 raise error.Abort(
3327 _(
3327 _(
3328 b'unable to determine repository requirements for '
3328 b'unable to determine repository requirements for '
3329 b'storage backend: %s'
3329 b'storage backend: %s'
3330 )
3330 )
3331 % createopts[b'backend']
3331 % createopts[b'backend']
3332 )
3332 )
3333
3333
3334 requirements = {b'revlogv1'}
3334 requirements = {b'revlogv1'}
3335 if ui.configbool(b'format', b'usestore'):
3335 if ui.configbool(b'format', b'usestore'):
3336 requirements.add(b'store')
3336 requirements.add(b'store')
3337 if ui.configbool(b'format', b'usefncache'):
3337 if ui.configbool(b'format', b'usefncache'):
3338 requirements.add(b'fncache')
3338 requirements.add(b'fncache')
3339 if ui.configbool(b'format', b'dotencode'):
3339 if ui.configbool(b'format', b'dotencode'):
3340 requirements.add(b'dotencode')
3340 requirements.add(b'dotencode')
3341
3341
3342 compengines = ui.configlist(b'format', b'revlog-compression')
3342 compengines = ui.configlist(b'format', b'revlog-compression')
3343 for compengine in compengines:
3343 for compengine in compengines:
3344 if compengine in util.compengines:
3344 if compengine in util.compengines:
3345 break
3345 break
3346 else:
3346 else:
3347 raise error.Abort(
3347 raise error.Abort(
3348 _(
3348 _(
3349 b'compression engines %s defined by '
3349 b'compression engines %s defined by '
3350 b'format.revlog-compression not available'
3350 b'format.revlog-compression not available'
3351 )
3351 )
3352 % b', '.join(b'"%s"' % e for e in compengines),
3352 % b', '.join(b'"%s"' % e for e in compengines),
3353 hint=_(
3353 hint=_(
3354 b'run "hg debuginstall" to list available '
3354 b'run "hg debuginstall" to list available '
3355 b'compression engines'
3355 b'compression engines'
3356 ),
3356 ),
3357 )
3357 )
3358
3358
3359 # zlib is the historical default and doesn't need an explicit requirement.
3359 # zlib is the historical default and doesn't need an explicit requirement.
3360 if compengine == b'zstd':
3360 if compengine == b'zstd':
3361 requirements.add(b'revlog-compression-zstd')
3361 requirements.add(b'revlog-compression-zstd')
3362 elif compengine != b'zlib':
3362 elif compengine != b'zlib':
3363 requirements.add(b'exp-compression-%s' % compengine)
3363 requirements.add(b'exp-compression-%s' % compengine)
3364
3364
3365 if scmutil.gdinitconfig(ui):
3365 if scmutil.gdinitconfig(ui):
3366 requirements.add(b'generaldelta')
3366 requirements.add(b'generaldelta')
3367 if ui.configbool(b'format', b'sparse-revlog'):
3367 if ui.configbool(b'format', b'sparse-revlog'):
3368 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3368 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3369
3369
3370 # experimental config: format.exp-use-side-data
3370 # experimental config: format.exp-use-side-data
3371 if ui.configbool(b'format', b'exp-use-side-data'):
3371 if ui.configbool(b'format', b'exp-use-side-data'):
3372 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3372 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3373 # experimental config: format.exp-use-copies-side-data-changeset
3373 # experimental config: format.exp-use-copies-side-data-changeset
3374 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3374 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3375 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3375 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3376 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3376 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3377 if ui.configbool(b'experimental', b'treemanifest'):
3377 if ui.configbool(b'experimental', b'treemanifest'):
3378 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3378 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3379
3379
3380 revlogv2 = ui.config(b'experimental', b'revlogv2')
3380 revlogv2 = ui.config(b'experimental', b'revlogv2')
3381 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3381 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3382 requirements.remove(b'revlogv1')
3382 requirements.remove(b'revlogv1')
3383 # generaldelta is implied by revlogv2.
3383 # generaldelta is implied by revlogv2.
3384 requirements.discard(b'generaldelta')
3384 requirements.discard(b'generaldelta')
3385 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3385 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3386 # experimental config: format.internal-phase
3386 # experimental config: format.internal-phase
3387 if ui.configbool(b'format', b'internal-phase'):
3387 if ui.configbool(b'format', b'internal-phase'):
3388 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3388 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3389
3389
3390 if createopts.get(b'narrowfiles'):
3390 if createopts.get(b'narrowfiles'):
3391 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3391 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3392
3392
3393 if createopts.get(b'lfs'):
3393 if createopts.get(b'lfs'):
3394 requirements.add(b'lfs')
3394 requirements.add(b'lfs')
3395
3395
3396 if ui.configbool(b'format', b'bookmarks-in-store'):
3396 if ui.configbool(b'format', b'bookmarks-in-store'):
3397 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3397 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3398
3398
3399 if ui.configbool(b'format', b'use-persistent-nodemap'):
3399 if ui.configbool(b'format', b'use-persistent-nodemap'):
3400 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3400 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3401
3401
3402 # if share-safe is enabled, let's create the new repository with the new
3402 # if share-safe is enabled, let's create the new repository with the new
3403 # requirement
3403 # requirement
3404 if ui.configbool(b'format', b'exp-share-safe'):
3404 if ui.configbool(b'format', b'exp-share-safe'):
3405 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3405 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3406
3406
3407 return requirements
3407 return requirements
3408
3408
3409
3409
3410 def checkrequirementscompat(ui, requirements):
3410 def checkrequirementscompat(ui, requirements):
3411 """Checks compatibility of repository requirements enabled and disabled.
3411 """Checks compatibility of repository requirements enabled and disabled.
3412
3412
3413 Returns a set of requirements which needs to be dropped because dependend
3413 Returns a set of requirements which needs to be dropped because dependend
3414 requirements are not enabled. Also warns users about it"""
3414 requirements are not enabled. Also warns users about it"""
3415
3415
3416 dropped = set()
3416 dropped = set()
3417
3417
3418 if b'store' not in requirements:
3418 if b'store' not in requirements:
3419 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3419 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3420 ui.warn(
3420 ui.warn(
3421 _(
3421 _(
3422 b'ignoring enabled \'format.bookmarks-in-store\' config '
3422 b'ignoring enabled \'format.bookmarks-in-store\' config '
3423 b'beacuse it is incompatible with disabled '
3423 b'beacuse it is incompatible with disabled '
3424 b'\'format.usestore\' config\n'
3424 b'\'format.usestore\' config\n'
3425 )
3425 )
3426 )
3426 )
3427 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3427 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3428
3428
3429 if (
3429 if (
3430 requirementsmod.SHARED_REQUIREMENT in requirements
3430 requirementsmod.SHARED_REQUIREMENT in requirements
3431 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3431 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3432 ):
3432 ):
3433 raise error.Abort(
3433 raise error.Abort(
3434 _(
3434 _(
3435 b"cannot create shared repository as source was created"
3435 b"cannot create shared repository as source was created"
3436 b" with 'format.usestore' config disabled"
3436 b" with 'format.usestore' config disabled"
3437 )
3437 )
3438 )
3438 )
3439
3439
3440 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3440 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3441 ui.warn(
3441 ui.warn(
3442 _(
3442 _(
3443 b"ignoring enabled 'format.exp-share-safe' config because "
3443 b"ignoring enabled 'format.exp-share-safe' config because "
3444 b"it is incompatible with disabled 'format.usestore'"
3444 b"it is incompatible with disabled 'format.usestore'"
3445 b" config\n"
3445 b" config\n"
3446 )
3446 )
3447 )
3447 )
3448 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3448 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3449
3449
3450 return dropped
3450 return dropped
3451
3451
3452
3452
3453 def filterknowncreateopts(ui, createopts):
3453 def filterknowncreateopts(ui, createopts):
3454 """Filters a dict of repo creation options against options that are known.
3454 """Filters a dict of repo creation options against options that are known.
3455
3455
3456 Receives a dict of repo creation options and returns a dict of those
3456 Receives a dict of repo creation options and returns a dict of those
3457 options that we don't know how to handle.
3457 options that we don't know how to handle.
3458
3458
3459 This function is called as part of repository creation. If the
3459 This function is called as part of repository creation. If the
3460 returned dict contains any items, repository creation will not
3460 returned dict contains any items, repository creation will not
3461 be allowed, as it means there was a request to create a repository
3461 be allowed, as it means there was a request to create a repository
3462 with options not recognized by loaded code.
3462 with options not recognized by loaded code.
3463
3463
3464 Extensions can wrap this function to filter out creation options
3464 Extensions can wrap this function to filter out creation options
3465 they know how to handle.
3465 they know how to handle.
3466 """
3466 """
3467 known = {
3467 known = {
3468 b'backend',
3468 b'backend',
3469 b'lfs',
3469 b'lfs',
3470 b'narrowfiles',
3470 b'narrowfiles',
3471 b'sharedrepo',
3471 b'sharedrepo',
3472 b'sharedrelative',
3472 b'sharedrelative',
3473 b'shareditems',
3473 b'shareditems',
3474 b'shallowfilestore',
3474 b'shallowfilestore',
3475 }
3475 }
3476
3476
3477 return {k: v for k, v in createopts.items() if k not in known}
3477 return {k: v for k, v in createopts.items() if k not in known}
3478
3478
3479
3479
3480 def createrepository(ui, path, createopts=None):
3480 def createrepository(ui, path, createopts=None):
3481 """Create a new repository in a vfs.
3481 """Create a new repository in a vfs.
3482
3482
3483 ``path`` path to the new repo's working directory.
3483 ``path`` path to the new repo's working directory.
3484 ``createopts`` options for the new repository.
3484 ``createopts`` options for the new repository.
3485
3485
3486 The following keys for ``createopts`` are recognized:
3486 The following keys for ``createopts`` are recognized:
3487
3487
3488 backend
3488 backend
3489 The storage backend to use.
3489 The storage backend to use.
3490 lfs
3490 lfs
3491 Repository will be created with ``lfs`` requirement. The lfs extension
3491 Repository will be created with ``lfs`` requirement. The lfs extension
3492 will automatically be loaded when the repository is accessed.
3492 will automatically be loaded when the repository is accessed.
3493 narrowfiles
3493 narrowfiles
3494 Set up repository to support narrow file storage.
3494 Set up repository to support narrow file storage.
3495 sharedrepo
3495 sharedrepo
3496 Repository object from which storage should be shared.
3496 Repository object from which storage should be shared.
3497 sharedrelative
3497 sharedrelative
3498 Boolean indicating if the path to the shared repo should be
3498 Boolean indicating if the path to the shared repo should be
3499 stored as relative. By default, the pointer to the "parent" repo
3499 stored as relative. By default, the pointer to the "parent" repo
3500 is stored as an absolute path.
3500 is stored as an absolute path.
3501 shareditems
3501 shareditems
3502 Set of items to share to the new repository (in addition to storage).
3502 Set of items to share to the new repository (in addition to storage).
3503 shallowfilestore
3503 shallowfilestore
3504 Indicates that storage for files should be shallow (not all ancestor
3504 Indicates that storage for files should be shallow (not all ancestor
3505 revisions are known).
3505 revisions are known).
3506 """
3506 """
3507 createopts = defaultcreateopts(ui, createopts=createopts)
3507 createopts = defaultcreateopts(ui, createopts=createopts)
3508
3508
3509 unknownopts = filterknowncreateopts(ui, createopts)
3509 unknownopts = filterknowncreateopts(ui, createopts)
3510
3510
3511 if not isinstance(unknownopts, dict):
3511 if not isinstance(unknownopts, dict):
3512 raise error.ProgrammingError(
3512 raise error.ProgrammingError(
3513 b'filterknowncreateopts() did not return a dict'
3513 b'filterknowncreateopts() did not return a dict'
3514 )
3514 )
3515
3515
3516 if unknownopts:
3516 if unknownopts:
3517 raise error.Abort(
3517 raise error.Abort(
3518 _(
3518 _(
3519 b'unable to create repository because of unknown '
3519 b'unable to create repository because of unknown '
3520 b'creation option: %s'
3520 b'creation option: %s'
3521 )
3521 )
3522 % b', '.join(sorted(unknownopts)),
3522 % b', '.join(sorted(unknownopts)),
3523 hint=_(b'is a required extension not loaded?'),
3523 hint=_(b'is a required extension not loaded?'),
3524 )
3524 )
3525
3525
3526 requirements = newreporequirements(ui, createopts=createopts)
3526 requirements = newreporequirements(ui, createopts=createopts)
3527 requirements -= checkrequirementscompat(ui, requirements)
3527 requirements -= checkrequirementscompat(ui, requirements)
3528
3528
3529 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3529 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3530
3530
3531 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3531 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3532 if hgvfs.exists():
3532 if hgvfs.exists():
3533 raise error.RepoError(_(b'repository %s already exists') % path)
3533 raise error.RepoError(_(b'repository %s already exists') % path)
3534
3534
3535 if b'sharedrepo' in createopts:
3535 if b'sharedrepo' in createopts:
3536 sharedpath = createopts[b'sharedrepo'].sharedpath
3536 sharedpath = createopts[b'sharedrepo'].sharedpath
3537
3537
3538 if createopts.get(b'sharedrelative'):
3538 if createopts.get(b'sharedrelative'):
3539 try:
3539 try:
3540 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3540 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3541 except (IOError, ValueError) as e:
3541 except (IOError, ValueError) as e:
3542 # ValueError is raised on Windows if the drive letters differ
3542 # ValueError is raised on Windows if the drive letters differ
3543 # on each path.
3543 # on each path.
3544 raise error.Abort(
3544 raise error.Abort(
3545 _(b'cannot calculate relative path'),
3545 _(b'cannot calculate relative path'),
3546 hint=stringutil.forcebytestr(e),
3546 hint=stringutil.forcebytestr(e),
3547 )
3547 )
3548
3548
3549 if not wdirvfs.exists():
3549 if not wdirvfs.exists():
3550 wdirvfs.makedirs()
3550 wdirvfs.makedirs()
3551
3551
3552 hgvfs.makedir(notindexed=True)
3552 hgvfs.makedir(notindexed=True)
3553 if b'sharedrepo' not in createopts:
3553 if b'sharedrepo' not in createopts:
3554 hgvfs.mkdir(b'cache')
3554 hgvfs.mkdir(b'cache')
3555 hgvfs.mkdir(b'wcache')
3555 hgvfs.mkdir(b'wcache')
3556
3556
3557 if b'store' in requirements and b'sharedrepo' not in createopts:
3557 if b'store' in requirements and b'sharedrepo' not in createopts:
3558 hgvfs.mkdir(b'store')
3558 hgvfs.mkdir(b'store')
3559
3559
3560 # We create an invalid changelog outside the store so very old
3560 # We create an invalid changelog outside the store so very old
3561 # Mercurial versions (which didn't know about the requirements
3561 # Mercurial versions (which didn't know about the requirements
3562 # file) encounter an error on reading the changelog. This
3562 # file) encounter an error on reading the changelog. This
3563 # effectively locks out old clients and prevents them from
3563 # effectively locks out old clients and prevents them from
3564 # mucking with a repo in an unknown format.
3564 # mucking with a repo in an unknown format.
3565 #
3565 #
3566 # The revlog header has version 2, which won't be recognized by
3566 # The revlog header has version 2, which won't be recognized by
3567 # such old clients.
3567 # such old clients.
3568 hgvfs.append(
3568 hgvfs.append(
3569 b'00changelog.i',
3569 b'00changelog.i',
3570 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3570 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3571 b'layout',
3571 b'layout',
3572 )
3572 )
3573
3573
3574 # Filter the requirements into working copy and store ones
3574 # Filter the requirements into working copy and store ones
3575 wcreq, storereq = scmutil.filterrequirements(requirements)
3575 wcreq, storereq = scmutil.filterrequirements(requirements)
3576 # write working copy ones
3576 # write working copy ones
3577 scmutil.writerequires(hgvfs, wcreq)
3577 scmutil.writerequires(hgvfs, wcreq)
3578 # If there are store requirements and the current repository
3578 # If there are store requirements and the current repository
3579 # is not a shared one, write stored requirements
3579 # is not a shared one, write stored requirements
3580 # For new shared repository, we don't need to write the store
3580 # For new shared repository, we don't need to write the store
3581 # requirements as they are already present in store requires
3581 # requirements as they are already present in store requires
3582 if storereq and b'sharedrepo' not in createopts:
3582 if storereq and b'sharedrepo' not in createopts:
3583 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3583 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3584 scmutil.writerequires(storevfs, storereq)
3584 scmutil.writerequires(storevfs, storereq)
3585
3585
3586 # Write out file telling readers where to find the shared store.
3586 # Write out file telling readers where to find the shared store.
3587 if b'sharedrepo' in createopts:
3587 if b'sharedrepo' in createopts:
3588 hgvfs.write(b'sharedpath', sharedpath)
3588 hgvfs.write(b'sharedpath', sharedpath)
3589
3589
3590 if createopts.get(b'shareditems'):
3590 if createopts.get(b'shareditems'):
3591 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3591 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3592 hgvfs.write(b'shared', shared)
3592 hgvfs.write(b'shared', shared)
3593
3593
3594
3594
3595 def poisonrepository(repo):
3595 def poisonrepository(repo):
3596 """Poison a repository instance so it can no longer be used."""
3596 """Poison a repository instance so it can no longer be used."""
3597 # Perform any cleanup on the instance.
3597 # Perform any cleanup on the instance.
3598 repo.close()
3598 repo.close()
3599
3599
3600 # Our strategy is to replace the type of the object with one that
3600 # Our strategy is to replace the type of the object with one that
3601 # has all attribute lookups result in error.
3601 # has all attribute lookups result in error.
3602 #
3602 #
3603 # But we have to allow the close() method because some constructors
3603 # But we have to allow the close() method because some constructors
3604 # of repos call close() on repo references.
3604 # of repos call close() on repo references.
3605 class poisonedrepository(object):
3605 class poisonedrepository(object):
3606 def __getattribute__(self, item):
3606 def __getattribute__(self, item):
3607 if item == 'close':
3607 if item == 'close':
3608 return object.__getattribute__(self, item)
3608 return object.__getattribute__(self, item)
3609
3609
3610 raise error.ProgrammingError(
3610 raise error.ProgrammingError(
3611 b'repo instances should not be used after unshare'
3611 b'repo instances should not be used after unshare'
3612 )
3612 )
3613
3613
3614 def close(self):
3614 def close(self):
3615 pass
3615 pass
3616
3616
3617 # We may have a repoview, which intercepts __setattr__. So be sure
3617 # We may have a repoview, which intercepts __setattr__. So be sure
3618 # we operate at the lowest level possible.
3618 # we operate at the lowest level possible.
3619 object.__setattr__(repo, '__class__', poisonedrepository)
3619 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,628 +1,628 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5 $ cat << EOF >> $HGRCPATH
5 $ cat << EOF >> $HGRCPATH
6 > [format]
6 > [format]
7 > use-persistent-nodemap=yes
7 > use-persistent-nodemap=yes
8 > [devel]
8 > [devel]
9 > persistent-nodemap=yes
9 > persistent-nodemap=yes
10 > EOF
10 > EOF
11 $ hg init test-repo
11 $ hg init test-repo
12 $ cd test-repo
12 $ cd test-repo
13 $ hg debugformat
13 $ hg debugformat
14 format-variant repo
14 format-variant repo
15 fncache: yes
15 fncache: yes
16 dotencode: yes
16 dotencode: yes
17 generaldelta: yes
17 generaldelta: yes
18 exp-sharesafe: no
18 exp-sharesafe: no
19 sparserevlog: yes
19 sparserevlog: yes
20 sidedata: no
20 sidedata: no
21 persistent-nodemap: yes
21 persistent-nodemap: yes
22 copies-sdc: no
22 copies-sdc: no
23 plain-cl-delta: yes
23 plain-cl-delta: yes
24 compression: zlib
24 compression: zlib
25 compression-level: default
25 compression-level: default
26 $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
26 $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
27 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
27 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
28 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
28 persistent nodemap in strict mode without efficient method (no-rust no-pure !)
29 $ hg debugnodemap --metadata
29 $ hg debugnodemap --metadata
30 uid: ???????????????? (glob)
30 uid: ???????????????? (glob)
31 tip-rev: 5000
31 tip-rev: 5000
32 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
32 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
33 data-length: 121088
33 data-length: 121088
34 data-unused: 0
34 data-unused: 0
35 data-unused: 0.000%
35 data-unused: 0.000%
36 $ f --size .hg/store/00changelog.n
36 $ f --size .hg/store/00changelog.n
37 .hg/store/00changelog.n: size=70
37 .hg/store/00changelog.n: size=70
38
38
39 Simple lookup works
39 Simple lookup works
40
40
41 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
41 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
42 $ hg log -r "$ANYNODE" --template '{rev}\n'
42 $ hg log -r "$ANYNODE" --template '{rev}\n'
43 5000
43 5000
44
44
45
45
46 #if rust
46 #if rust
47
47
48 $ f --sha256 .hg/store/00changelog-*.nd
48 $ f --sha256 .hg/store/00changelog-*.nd
49 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
49 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
50
50
51 $ f --sha256 .hg/store/00manifest-*.nd
51 $ f --sha256 .hg/store/00manifest-*.nd
52 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
52 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
53 $ hg debugnodemap --dump-new | f --sha256 --size
53 $ hg debugnodemap --dump-new | f --sha256 --size
54 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
54 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
55 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
55 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
56 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
56 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
57 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
57 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
58 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
58 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
59 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
59 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
60 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
60 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
61 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
61 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
62 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
62 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
63 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
63 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
64 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
64 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
65 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
65 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
66 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
66 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
67 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
67 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
68 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
68 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
69 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
69 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
70 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
70 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
71 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
71 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
72 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
72 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
73
73
74
74
75 #else
75 #else
76
76
77 $ f --sha256 .hg/store/00changelog-*.nd
77 $ f --sha256 .hg/store/00changelog-*.nd
78 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
78 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
79 $ hg debugnodemap --dump-new | f --sha256 --size
79 $ hg debugnodemap --dump-new | f --sha256 --size
80 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
80 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
81 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
81 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
82 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
82 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
83 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
83 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
84 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
84 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
85 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
85 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
86 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
86 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
87 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
87 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
88 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
88 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
89 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
89 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
90 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
90 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
91 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
91 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
92 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
92 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
93 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
93 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
94 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
94 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
95 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
95 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
96 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
96 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
97 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
97 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
98 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
98 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
99
99
100 #endif
100 #endif
101
101
102 $ hg debugnodemap --check
102 $ hg debugnodemap --check
103 revision in index: 5001
103 revision in index: 5001
104 revision in nodemap: 5001
104 revision in nodemap: 5001
105
105
106 add a new commit
106 add a new commit
107
107
108 $ hg up
108 $ hg up
109 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 $ echo foo > foo
110 $ echo foo > foo
111 $ hg add foo
111 $ hg add foo
112
112
113 #if no-pure no-rust
113 #if no-pure no-rust
114
114
115 $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
115 $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
116 transaction abort!
116 transaction abort!
117 rollback completed
117 rollback completed
118 abort: persistent nodemap in strict mode without efficient method
118 abort: persistent nodemap in strict mode without efficient method
119 [255]
119 [255]
120
120
121 #endif
121 #endif
122
122
123 $ hg ci -m 'foo'
123 $ hg ci -m 'foo'
124
124
125 #if no-pure no-rust
125 #if no-pure no-rust
126 $ hg debugnodemap --metadata
126 $ hg debugnodemap --metadata
127 uid: ???????????????? (glob)
127 uid: ???????????????? (glob)
128 tip-rev: 5001
128 tip-rev: 5001
129 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
129 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
130 data-length: 121088
130 data-length: 121088
131 data-unused: 0
131 data-unused: 0
132 data-unused: 0.000%
132 data-unused: 0.000%
133 #else
133 #else
134 $ hg debugnodemap --metadata
134 $ hg debugnodemap --metadata
135 uid: ???????????????? (glob)
135 uid: ???????????????? (glob)
136 tip-rev: 5001
136 tip-rev: 5001
137 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
137 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
138 data-length: 121344
138 data-length: 121344
139 data-unused: 256
139 data-unused: 256
140 data-unused: 0.211%
140 data-unused: 0.211%
141 #endif
141 #endif
142
142
143 $ f --size .hg/store/00changelog.n
143 $ f --size .hg/store/00changelog.n
144 .hg/store/00changelog.n: size=70
144 .hg/store/00changelog.n: size=70
145
145
146 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
146 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
147
147
148 #if pure
148 #if pure
149 $ f --sha256 .hg/store/00changelog-*.nd --size
149 $ f --sha256 .hg/store/00changelog-*.nd --size
150 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
150 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
151 #endif
151 #endif
152
152
153 #if rust
153 #if rust
154 $ f --sha256 .hg/store/00changelog-*.nd --size
154 $ f --sha256 .hg/store/00changelog-*.nd --size
155 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
155 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
156 #endif
156 #endif
157
157
158 #if no-pure no-rust
158 #if no-pure no-rust
159 $ f --sha256 .hg/store/00changelog-*.nd --size
159 $ f --sha256 .hg/store/00changelog-*.nd --size
160 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
160 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
161 #endif
161 #endif
162
162
163 $ hg debugnodemap --check
163 $ hg debugnodemap --check
164 revision in index: 5002
164 revision in index: 5002
165 revision in nodemap: 5002
165 revision in nodemap: 5002
166
166
167 Test code path without mmap
167 Test code path without mmap
168 ---------------------------
168 ---------------------------
169
169
170 $ echo bar > bar
170 $ echo bar > bar
171 $ hg add bar
171 $ hg add bar
172 $ hg ci -m 'bar' --config storage.revlog.nodemap.mmap=no
172 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
173
173
174 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=yes
174 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
175 revision in index: 5003
175 revision in index: 5003
176 revision in nodemap: 5003
176 revision in nodemap: 5003
177 $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=no
177 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
178 revision in index: 5003
178 revision in index: 5003
179 revision in nodemap: 5003
179 revision in nodemap: 5003
180
180
181
181
182 #if pure
182 #if pure
183 $ hg debugnodemap --metadata
183 $ hg debugnodemap --metadata
184 uid: ???????????????? (glob)
184 uid: ???????????????? (glob)
185 tip-rev: 5002
185 tip-rev: 5002
186 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
186 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
187 data-length: 121600
187 data-length: 121600
188 data-unused: 512
188 data-unused: 512
189 data-unused: 0.421%
189 data-unused: 0.421%
190 $ f --sha256 .hg/store/00changelog-*.nd --size
190 $ f --sha256 .hg/store/00changelog-*.nd --size
191 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
191 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
192 #endif
192 #endif
193 #if rust
193 #if rust
194 $ hg debugnodemap --metadata
194 $ hg debugnodemap --metadata
195 uid: ???????????????? (glob)
195 uid: ???????????????? (glob)
196 tip-rev: 5002
196 tip-rev: 5002
197 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
197 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
198 data-length: 121600
198 data-length: 121600
199 data-unused: 512
199 data-unused: 512
200 data-unused: 0.421%
200 data-unused: 0.421%
201 $ f --sha256 .hg/store/00changelog-*.nd --size
201 $ f --sha256 .hg/store/00changelog-*.nd --size
202 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
202 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
203 #endif
203 #endif
204 #if no-pure no-rust
204 #if no-pure no-rust
205 $ hg debugnodemap --metadata
205 $ hg debugnodemap --metadata
206 uid: ???????????????? (glob)
206 uid: ???????????????? (glob)
207 tip-rev: 5002
207 tip-rev: 5002
208 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
208 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
209 data-length: 121088
209 data-length: 121088
210 data-unused: 0
210 data-unused: 0
211 data-unused: 0.000%
211 data-unused: 0.000%
212 $ f --sha256 .hg/store/00changelog-*.nd --size
212 $ f --sha256 .hg/store/00changelog-*.nd --size
213 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
213 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
214 #endif
214 #endif
215
215
216 Test force warming the cache
216 Test force warming the cache
217
217
218 $ rm .hg/store/00changelog.n
218 $ rm .hg/store/00changelog.n
219 $ hg debugnodemap --metadata
219 $ hg debugnodemap --metadata
220 $ hg debugupdatecache
220 $ hg debugupdatecache
221 #if pure
221 #if pure
222 $ hg debugnodemap --metadata
222 $ hg debugnodemap --metadata
223 uid: ???????????????? (glob)
223 uid: ???????????????? (glob)
224 tip-rev: 5002
224 tip-rev: 5002
225 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
225 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
226 data-length: 121088
226 data-length: 121088
227 data-unused: 0
227 data-unused: 0
228 data-unused: 0.000%
228 data-unused: 0.000%
229 #else
229 #else
230 $ hg debugnodemap --metadata
230 $ hg debugnodemap --metadata
231 uid: ???????????????? (glob)
231 uid: ???????????????? (glob)
232 tip-rev: 5002
232 tip-rev: 5002
233 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
233 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
234 data-length: 121088
234 data-length: 121088
235 data-unused: 0
235 data-unused: 0
236 data-unused: 0.000%
236 data-unused: 0.000%
237 #endif
237 #endif
238
238
239 Check out of sync nodemap
239 Check out of sync nodemap
240 =========================
240 =========================
241
241
242 First copy old data on the side.
242 First copy old data on the side.
243
243
244 $ mkdir ../tmp-copies
244 $ mkdir ../tmp-copies
245 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
245 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
246
246
247 Nodemap lagging behind
247 Nodemap lagging behind
248 ----------------------
248 ----------------------
249
249
250 make a new commit
250 make a new commit
251
251
252 $ echo bar2 > bar
252 $ echo bar2 > bar
253 $ hg ci -m 'bar2'
253 $ hg ci -m 'bar2'
254 $ NODE=`hg log -r tip -T '{node}\n'`
254 $ NODE=`hg log -r tip -T '{node}\n'`
255 $ hg log -r "$NODE" -T '{rev}\n'
255 $ hg log -r "$NODE" -T '{rev}\n'
256 5003
256 5003
257
257
258 If the nodemap is lagging behind, it can catch up fine
258 If the nodemap is lagging behind, it can catch up fine
259
259
260 $ hg debugnodemap --metadata
260 $ hg debugnodemap --metadata
261 uid: ???????????????? (glob)
261 uid: ???????????????? (glob)
262 tip-rev: 5003
262 tip-rev: 5003
263 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
263 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
264 data-length: 121344 (pure !)
264 data-length: 121344 (pure !)
265 data-length: 121344 (rust !)
265 data-length: 121344 (rust !)
266 data-length: 121152 (no-rust no-pure !)
266 data-length: 121152 (no-rust no-pure !)
267 data-unused: 192 (pure !)
267 data-unused: 192 (pure !)
268 data-unused: 192 (rust !)
268 data-unused: 192 (rust !)
269 data-unused: 0 (no-rust no-pure !)
269 data-unused: 0 (no-rust no-pure !)
270 data-unused: 0.158% (pure !)
270 data-unused: 0.158% (pure !)
271 data-unused: 0.158% (rust !)
271 data-unused: 0.158% (rust !)
272 data-unused: 0.000% (no-rust no-pure !)
272 data-unused: 0.000% (no-rust no-pure !)
273 $ cp -f ../tmp-copies/* .hg/store/
273 $ cp -f ../tmp-copies/* .hg/store/
274 $ hg debugnodemap --metadata
274 $ hg debugnodemap --metadata
275 uid: ???????????????? (glob)
275 uid: ???????????????? (glob)
276 tip-rev: 5002
276 tip-rev: 5002
277 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
277 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
278 data-length: 121088
278 data-length: 121088
279 data-unused: 0
279 data-unused: 0
280 data-unused: 0.000%
280 data-unused: 0.000%
281 $ hg log -r "$NODE" -T '{rev}\n'
281 $ hg log -r "$NODE" -T '{rev}\n'
282 5003
282 5003
283
283
284 changelog altered
284 changelog altered
285 -----------------
285 -----------------
286
286
287 If the nodemap is not gated behind a requirements, an unaware client can alter
287 If the nodemap is not gated behind a requirements, an unaware client can alter
288 the repository so the revlog used to generate the nodemap is not longer
288 the repository so the revlog used to generate the nodemap is not longer
289 compatible with the persistent nodemap. We need to detect that.
289 compatible with the persistent nodemap. We need to detect that.
290
290
291 $ hg up "$NODE~5"
291 $ hg up "$NODE~5"
292 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
292 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
293 $ echo bar > babar
293 $ echo bar > babar
294 $ hg add babar
294 $ hg add babar
295 $ hg ci -m 'babar'
295 $ hg ci -m 'babar'
296 created new head
296 created new head
297 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
297 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
298 $ hg log -r "$OTHERNODE" -T '{rev}\n'
298 $ hg log -r "$OTHERNODE" -T '{rev}\n'
299 5004
299 5004
300
300
301 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
301 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
302
302
303 the nodemap should detect the changelog have been tampered with and recover.
303 the nodemap should detect the changelog have been tampered with and recover.
304
304
305 $ hg debugnodemap --metadata
305 $ hg debugnodemap --metadata
306 uid: ???????????????? (glob)
306 uid: ???????????????? (glob)
307 tip-rev: 5002
307 tip-rev: 5002
308 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
308 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
309 data-length: 121536 (pure !)
309 data-length: 121536 (pure !)
310 data-length: 121088 (rust !)
310 data-length: 121088 (rust !)
311 data-length: 121088 (no-pure no-rust !)
311 data-length: 121088 (no-pure no-rust !)
312 data-unused: 448 (pure !)
312 data-unused: 448 (pure !)
313 data-unused: 0 (rust !)
313 data-unused: 0 (rust !)
314 data-unused: 0 (no-pure no-rust !)
314 data-unused: 0 (no-pure no-rust !)
315 data-unused: 0.000% (rust !)
315 data-unused: 0.000% (rust !)
316 data-unused: 0.369% (pure !)
316 data-unused: 0.369% (pure !)
317 data-unused: 0.000% (no-pure no-rust !)
317 data-unused: 0.000% (no-pure no-rust !)
318
318
319 $ cp -f ../tmp-copies/* .hg/store/
319 $ cp -f ../tmp-copies/* .hg/store/
320 $ hg debugnodemap --metadata
320 $ hg debugnodemap --metadata
321 uid: ???????????????? (glob)
321 uid: ???????????????? (glob)
322 tip-rev: 5002
322 tip-rev: 5002
323 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
323 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
324 data-length: 121088
324 data-length: 121088
325 data-unused: 0
325 data-unused: 0
326 data-unused: 0.000%
326 data-unused: 0.000%
327 $ hg log -r "$OTHERNODE" -T '{rev}\n'
327 $ hg log -r "$OTHERNODE" -T '{rev}\n'
328 5002
328 5002
329
329
330 Check transaction related property
330 Check transaction related property
331 ==================================
331 ==================================
332
332
333 An up to date nodemap should be available to shell hooks,
333 An up to date nodemap should be available to shell hooks,
334
334
335 $ echo dsljfl > a
335 $ echo dsljfl > a
336 $ hg add a
336 $ hg add a
337 $ hg ci -m a
337 $ hg ci -m a
338 $ hg debugnodemap --metadata
338 $ hg debugnodemap --metadata
339 uid: ???????????????? (glob)
339 uid: ???????????????? (glob)
340 tip-rev: 5003
340 tip-rev: 5003
341 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
341 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
342 data-length: 121088
342 data-length: 121088
343 data-unused: 0
343 data-unused: 0
344 data-unused: 0.000%
344 data-unused: 0.000%
345 $ echo babar2 > babar
345 $ echo babar2 > babar
346 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
346 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
347 uid: ???????????????? (glob)
347 uid: ???????????????? (glob)
348 tip-rev: 5004
348 tip-rev: 5004
349 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
349 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
350 data-length: 121280 (pure !)
350 data-length: 121280 (pure !)
351 data-length: 121280 (rust !)
351 data-length: 121280 (rust !)
352 data-length: 121088 (no-pure no-rust !)
352 data-length: 121088 (no-pure no-rust !)
353 data-unused: 192 (pure !)
353 data-unused: 192 (pure !)
354 data-unused: 192 (rust !)
354 data-unused: 192 (rust !)
355 data-unused: 0 (no-pure no-rust !)
355 data-unused: 0 (no-pure no-rust !)
356 data-unused: 0.158% (pure !)
356 data-unused: 0.158% (pure !)
357 data-unused: 0.158% (rust !)
357 data-unused: 0.158% (rust !)
358 data-unused: 0.000% (no-pure no-rust !)
358 data-unused: 0.000% (no-pure no-rust !)
359 $ hg debugnodemap --metadata
359 $ hg debugnodemap --metadata
360 uid: ???????????????? (glob)
360 uid: ???????????????? (glob)
361 tip-rev: 5004
361 tip-rev: 5004
362 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
362 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
363 data-length: 121280 (pure !)
363 data-length: 121280 (pure !)
364 data-length: 121280 (rust !)
364 data-length: 121280 (rust !)
365 data-length: 121088 (no-pure no-rust !)
365 data-length: 121088 (no-pure no-rust !)
366 data-unused: 192 (pure !)
366 data-unused: 192 (pure !)
367 data-unused: 192 (rust !)
367 data-unused: 192 (rust !)
368 data-unused: 0 (no-pure no-rust !)
368 data-unused: 0 (no-pure no-rust !)
369 data-unused: 0.158% (pure !)
369 data-unused: 0.158% (pure !)
370 data-unused: 0.158% (rust !)
370 data-unused: 0.158% (rust !)
371 data-unused: 0.000% (no-pure no-rust !)
371 data-unused: 0.000% (no-pure no-rust !)
372
372
373 Another process does not see the pending nodemap content during run.
373 Another process does not see the pending nodemap content during run.
374
374
375 $ PATH=$RUNTESTDIR/testlib/:$PATH
375 $ PATH=$RUNTESTDIR/testlib/:$PATH
376 $ echo qpoasp > a
376 $ echo qpoasp > a
377 $ hg ci -m a2 \
377 $ hg ci -m a2 \
378 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
378 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
379 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
379 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
380
380
381 (read the repository while the commit transaction is pending)
381 (read the repository while the commit transaction is pending)
382
382
383 $ wait-on-file 20 sync-txn-pending && \
383 $ wait-on-file 20 sync-txn-pending && \
384 > hg debugnodemap --metadata && \
384 > hg debugnodemap --metadata && \
385 > wait-on-file 20 sync-txn-close sync-repo-read
385 > wait-on-file 20 sync-txn-close sync-repo-read
386 uid: ???????????????? (glob)
386 uid: ???????????????? (glob)
387 tip-rev: 5004
387 tip-rev: 5004
388 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
388 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
389 data-length: 121280 (pure !)
389 data-length: 121280 (pure !)
390 data-length: 121280 (rust !)
390 data-length: 121280 (rust !)
391 data-length: 121088 (no-pure no-rust !)
391 data-length: 121088 (no-pure no-rust !)
392 data-unused: 192 (pure !)
392 data-unused: 192 (pure !)
393 data-unused: 192 (rust !)
393 data-unused: 192 (rust !)
394 data-unused: 0 (no-pure no-rust !)
394 data-unused: 0 (no-pure no-rust !)
395 data-unused: 0.158% (pure !)
395 data-unused: 0.158% (pure !)
396 data-unused: 0.158% (rust !)
396 data-unused: 0.158% (rust !)
397 data-unused: 0.000% (no-pure no-rust !)
397 data-unused: 0.000% (no-pure no-rust !)
398 $ hg debugnodemap --metadata
398 $ hg debugnodemap --metadata
399 uid: ???????????????? (glob)
399 uid: ???????????????? (glob)
400 tip-rev: 5005
400 tip-rev: 5005
401 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
401 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
402 data-length: 121536 (pure !)
402 data-length: 121536 (pure !)
403 data-length: 121536 (rust !)
403 data-length: 121536 (rust !)
404 data-length: 121088 (no-pure no-rust !)
404 data-length: 121088 (no-pure no-rust !)
405 data-unused: 448 (pure !)
405 data-unused: 448 (pure !)
406 data-unused: 448 (rust !)
406 data-unused: 448 (rust !)
407 data-unused: 0 (no-pure no-rust !)
407 data-unused: 0 (no-pure no-rust !)
408 data-unused: 0.369% (pure !)
408 data-unused: 0.369% (pure !)
409 data-unused: 0.369% (rust !)
409 data-unused: 0.369% (rust !)
410 data-unused: 0.000% (no-pure no-rust !)
410 data-unused: 0.000% (no-pure no-rust !)
411
411
412 $ cat output.txt
412 $ cat output.txt
413
413
414 Check that a failing transaction will properly revert the data
414 Check that a failing transaction will properly revert the data
415
415
416 $ echo plakfe > a
416 $ echo plakfe > a
417 $ f --size --sha256 .hg/store/00changelog-*.nd
417 $ f --size --sha256 .hg/store/00changelog-*.nd
418 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
418 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
419 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
419 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
420 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
420 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
421 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
421 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
422 transaction abort!
422 transaction abort!
423 rollback completed
423 rollback completed
424 abort: This is a late abort
424 abort: This is a late abort
425 [255]
425 [255]
426 $ hg debugnodemap --metadata
426 $ hg debugnodemap --metadata
427 uid: ???????????????? (glob)
427 uid: ???????????????? (glob)
428 tip-rev: 5005
428 tip-rev: 5005
429 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
429 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
430 data-length: 121536 (pure !)
430 data-length: 121536 (pure !)
431 data-length: 121536 (rust !)
431 data-length: 121536 (rust !)
432 data-length: 121088 (no-pure no-rust !)
432 data-length: 121088 (no-pure no-rust !)
433 data-unused: 448 (pure !)
433 data-unused: 448 (pure !)
434 data-unused: 448 (rust !)
434 data-unused: 448 (rust !)
435 data-unused: 0 (no-pure no-rust !)
435 data-unused: 0 (no-pure no-rust !)
436 data-unused: 0.369% (pure !)
436 data-unused: 0.369% (pure !)
437 data-unused: 0.369% (rust !)
437 data-unused: 0.369% (rust !)
438 data-unused: 0.000% (no-pure no-rust !)
438 data-unused: 0.000% (no-pure no-rust !)
439 $ f --size --sha256 .hg/store/00changelog-*.nd
439 $ f --size --sha256 .hg/store/00changelog-*.nd
440 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
440 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
441 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
441 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
442 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
442 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
443
443
444 Check that removing content does not confuse the nodemap
444 Check that removing content does not confuse the nodemap
445 --------------------------------------------------------
445 --------------------------------------------------------
446
446
447 removing data with rollback
447 removing data with rollback
448
448
449 $ echo aso > a
449 $ echo aso > a
450 $ hg ci -m a4
450 $ hg ci -m a4
451 $ hg rollback
451 $ hg rollback
452 repository tip rolled back to revision 5005 (undo commit)
452 repository tip rolled back to revision 5005 (undo commit)
453 working directory now based on revision 5005
453 working directory now based on revision 5005
454 $ hg id -r .
454 $ hg id -r .
455 90d5d3ba2fc4 tip
455 90d5d3ba2fc4 tip
456
456
457 roming data with strip
457 roming data with strip
458
458
459 $ echo aso > a
459 $ echo aso > a
460 $ hg ci -m a4
460 $ hg ci -m a4
461 $ hg --config extensions.strip= strip -r . --no-backup
461 $ hg --config extensions.strip= strip -r . --no-backup
462 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
462 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
463 $ hg id -r . --traceback
463 $ hg id -r . --traceback
464 90d5d3ba2fc4 tip
464 90d5d3ba2fc4 tip
465
465
466 Test upgrade / downgrade
466 Test upgrade / downgrade
467 ========================
467 ========================
468
468
469 downgrading
469 downgrading
470
470
471 $ cat << EOF >> .hg/hgrc
471 $ cat << EOF >> .hg/hgrc
472 > [format]
472 > [format]
473 > use-persistent-nodemap=no
473 > use-persistent-nodemap=no
474 > EOF
474 > EOF
475 $ hg debugformat -v
475 $ hg debugformat -v
476 format-variant repo config default
476 format-variant repo config default
477 fncache: yes yes yes
477 fncache: yes yes yes
478 dotencode: yes yes yes
478 dotencode: yes yes yes
479 generaldelta: yes yes yes
479 generaldelta: yes yes yes
480 exp-sharesafe: no no no
480 exp-sharesafe: no no no
481 sparserevlog: yes yes yes
481 sparserevlog: yes yes yes
482 sidedata: no no no
482 sidedata: no no no
483 persistent-nodemap: yes no no
483 persistent-nodemap: yes no no
484 copies-sdc: no no no
484 copies-sdc: no no no
485 plain-cl-delta: yes yes yes
485 plain-cl-delta: yes yes yes
486 compression: zlib zlib zlib
486 compression: zlib zlib zlib
487 compression-level: default default default
487 compression-level: default default default
488 $ hg debugupgraderepo --run --no-backup --quiet
488 $ hg debugupgraderepo --run --no-backup --quiet
489 upgrade will perform the following actions:
489 upgrade will perform the following actions:
490
490
491 requirements
491 requirements
492 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
492 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
493 removed: persistent-nodemap
493 removed: persistent-nodemap
494
494
495 processed revlogs:
495 processed revlogs:
496 - all-filelogs
496 - all-filelogs
497 - changelog
497 - changelog
498 - manifest
498 - manifest
499
499
500 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
500 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
501 [1]
501 [1]
502 $ hg debugnodemap --metadata
502 $ hg debugnodemap --metadata
503
503
504
504
505 upgrading
505 upgrading
506
506
507 $ cat << EOF >> .hg/hgrc
507 $ cat << EOF >> .hg/hgrc
508 > [format]
508 > [format]
509 > use-persistent-nodemap=yes
509 > use-persistent-nodemap=yes
510 > EOF
510 > EOF
511 $ hg debugformat -v
511 $ hg debugformat -v
512 format-variant repo config default
512 format-variant repo config default
513 fncache: yes yes yes
513 fncache: yes yes yes
514 dotencode: yes yes yes
514 dotencode: yes yes yes
515 generaldelta: yes yes yes
515 generaldelta: yes yes yes
516 exp-sharesafe: no no no
516 exp-sharesafe: no no no
517 sparserevlog: yes yes yes
517 sparserevlog: yes yes yes
518 sidedata: no no no
518 sidedata: no no no
519 persistent-nodemap: no yes no
519 persistent-nodemap: no yes no
520 copies-sdc: no no no
520 copies-sdc: no no no
521 plain-cl-delta: yes yes yes
521 plain-cl-delta: yes yes yes
522 compression: zlib zlib zlib
522 compression: zlib zlib zlib
523 compression-level: default default default
523 compression-level: default default default
524 $ hg debugupgraderepo --run --no-backup --quiet
524 $ hg debugupgraderepo --run --no-backup --quiet
525 upgrade will perform the following actions:
525 upgrade will perform the following actions:
526
526
527 requirements
527 requirements
528 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
528 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
529 added: persistent-nodemap
529 added: persistent-nodemap
530
530
531 processed revlogs:
531 processed revlogs:
532 - all-filelogs
532 - all-filelogs
533 - changelog
533 - changelog
534 - manifest
534 - manifest
535
535
536 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
536 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
537 00changelog-*.nd (glob)
537 00changelog-*.nd (glob)
538 00changelog.n
538 00changelog.n
539 00manifest-*.nd (glob)
539 00manifest-*.nd (glob)
540 00manifest.n
540 00manifest.n
541
541
542 $ hg debugnodemap --metadata
542 $ hg debugnodemap --metadata
543 uid: * (glob)
543 uid: * (glob)
544 tip-rev: 5005
544 tip-rev: 5005
545 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
545 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
546 data-length: 121088
546 data-length: 121088
547 data-unused: 0
547 data-unused: 0
548 data-unused: 0.000%
548 data-unused: 0.000%
549
549
550 Running unrelated upgrade
550 Running unrelated upgrade
551
551
552 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
552 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
553 upgrade will perform the following actions:
553 upgrade will perform the following actions:
554
554
555 requirements
555 requirements
556 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
556 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
557
557
558 optimisations: re-delta-all
558 optimisations: re-delta-all
559
559
560 processed revlogs:
560 processed revlogs:
561 - all-filelogs
561 - all-filelogs
562 - changelog
562 - changelog
563 - manifest
563 - manifest
564
564
565 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
565 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
566 00changelog-*.nd (glob)
566 00changelog-*.nd (glob)
567 00changelog.n
567 00changelog.n
568 00manifest-*.nd (glob)
568 00manifest-*.nd (glob)
569 00manifest.n
569 00manifest.n
570
570
571 $ hg debugnodemap --metadata
571 $ hg debugnodemap --metadata
572 uid: * (glob)
572 uid: * (glob)
573 tip-rev: 5005
573 tip-rev: 5005
574 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
574 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
575 data-length: 121088
575 data-length: 121088
576 data-unused: 0
576 data-unused: 0
577 data-unused: 0.000%
577 data-unused: 0.000%
578
578
579 Persistent nodemap and local/streaming clone
579 Persistent nodemap and local/streaming clone
580 ============================================
580 ============================================
581
581
582 $ cd ..
582 $ cd ..
583
583
584 standard clone
584 standard clone
585 --------------
585 --------------
586
586
587 The persistent nodemap should exist after a streaming clone
587 The persistent nodemap should exist after a streaming clone
588
588
589 $ hg clone --pull --quiet -U test-repo standard-clone
589 $ hg clone --pull --quiet -U test-repo standard-clone
590 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
590 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
591 00changelog-*.nd (glob)
591 00changelog-*.nd (glob)
592 00changelog.n
592 00changelog.n
593 00manifest-*.nd (glob)
593 00manifest-*.nd (glob)
594 00manifest.n
594 00manifest.n
595 $ hg -R standard-clone debugnodemap --metadata
595 $ hg -R standard-clone debugnodemap --metadata
596 uid: * (glob)
596 uid: * (glob)
597 tip-rev: 5005
597 tip-rev: 5005
598 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
598 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
599 data-length: 121088
599 data-length: 121088
600 data-unused: 0
600 data-unused: 0
601 data-unused: 0.000%
601 data-unused: 0.000%
602
602
603
603
604 local clone
604 local clone
605 ------------
605 ------------
606
606
607 The persistent nodemap should exist after a streaming clone
607 The persistent nodemap should exist after a streaming clone
608
608
609 $ hg clone -U test-repo local-clone
609 $ hg clone -U test-repo local-clone
610 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
610 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
611 [1]
611 [1]
612 $ hg -R local-clone debugnodemap --metadata
612 $ hg -R local-clone debugnodemap --metadata
613
613
614 stream clone
614 stream clone
615 ------------
615 ------------
616
616
617 The persistent nodemap should exist after a streaming clone
617 The persistent nodemap should exist after a streaming clone
618
618
619 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
619 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
620 adding [s] 00manifest.n (70 bytes)
620 adding [s] 00manifest.n (70 bytes)
621 adding [s] 00manifest.i (313 KB)
621 adding [s] 00manifest.i (313 KB)
622 adding [s] 00manifest.d (452 KB)
622 adding [s] 00manifest.d (452 KB)
623 adding [s] 00changelog.n (70 bytes)
623 adding [s] 00changelog.n (70 bytes)
624 adding [s] 00changelog.i (313 KB)
624 adding [s] 00changelog.i (313 KB)
625 adding [s] 00changelog.d (360 KB)
625 adding [s] 00changelog.d (360 KB)
626 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
626 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
627 [1]
627 [1]
628 $ hg -R stream-clone debugnodemap --metadata
628 $ hg -R stream-clone debugnodemap --metadata
General Comments 0
You need to be logged in to leave comments. Login now